All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
@ 2010-06-16 15:04 Kathy Hadley
  2010-06-16 15:50 ` George Dunlap
  0 siblings, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-16 15:04 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Keir Fraser


[-- Attachment #1.1: Type: text/plain, Size: 36687 bytes --]

This patch adds an ARINC 653 scheduler to Xen.  This is a modification
of an earlier patch (submitted Friday, April 16).  In particular, it has
been modified to use the new .adjust_global callback function (added by
Keir Fraser in c/s 21282) and to support CPU pools (per feedback
received from George Dunlap on Tuesday, May 4).

 

Thanks and regards,

  Kathy Hadley

  DornerWorks, Ltd.

 

 

diff -rupN a/tools/libxc/Makefile b/tools/libxc/Makefile

--- a/tools/libxc/Makefile 2010-05-26 17:01:34.000000000 -0400

+++ b/tools/libxc/Makefile 2010-06-01 12:30:45.000000000 -0400

@@ -19,6 +19,7 @@ CTRL_SRCS-y       += xc_private.c

 CTRL_SRCS-y       += xc_sedf.c

 CTRL_SRCS-y       += xc_csched.c

 CTRL_SRCS-y       += xc_csched2.c

+CTRL_SRCS-y       += xc_arinc653.c

 CTRL_SRCS-y       += xc_tbuf.c

 CTRL_SRCS-y       += xc_pm.c

 CTRL_SRCS-y       += xc_cpu_hotplug.c

diff -rupN a/tools/libxc/xc_arinc653.c b/tools/libxc/xc_arinc653.c

--- a/tools/libxc/xc_arinc653.c 1969-12-31 19:00:00.000000000 -0500

+++ b/tools/libxc/xc_arinc653.c 2010-06-14 10:50:57.000000000 -0400

@@ -0,0 +1,27 @@

+/**********************************************************************
******

+ * (C) 2010 - DornerWorks, Ltd <DornerWorks.com>

+
************************************************************************
****

+ *

+ *        File: xc_arinc653.c

+ *      Author: Josh Holtrop <DornerWorks.com>

+ *

+ * Description: XC Interface to the ARINC 653 scheduler

+ *

+ */

+

+#include "xc_private.h"

+

+int

+xc_sched_arinc653_sched_set(

+    int xc_handle,

+    xen_sysctl_sched_arinc653_schedule_t * sched)

+{

+    DECLARE_SYSCTL;

+

+    sysctl.cmd = XEN_SYSCTL_scheduler_op;

+    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;

+    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;

+    set_xen_guest_handle(sysctl.u.scheduler_op.u.arinc653.schedule,
sched);

+

+    return do_sysctl(xc_handle, &sysctl);

+}

diff -rupN a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h

--- a/tools/libxc/xenctrl.h     2010-05-26 17:01:34.000000000 -0400

+++ b/tools/libxc/xenctrl.h     2010-06-01 12:30:45.000000000 -0400

@@ -485,6 +485,16 @@ int xc_sched_credit2_domain_get(int xc_h

                                struct xen_domctl_sched_credit2 *sdom);

 

 /**

+ * This function sets the global ARINC 653 schedule.

+ *

+ * @parm xc_handle a handle to an open hypervisor interface

+ * @parm sched a pointer to the new ARINC 653 schedule

+ * return 0 on success

+ */

+int xc_sched_arinc653_sched_set(int xc_handle,

+                                xen_sysctl_sched_arinc653_schedule_t *
sched);

+

+/**

  * This function sends a trigger to a domain.

  *

  * @parm xc_handle a handle to an open hypervisor interface

diff -rupN a/xen/common/Makefile b/xen/common/Makefile

--- a/xen/common/Makefile  2010-05-26 17:01:34.000000000 -0400

+++ b/xen/common/Makefile  2010-06-01 12:30:45.000000000 -0400

@@ -17,6 +17,7 @@ obj-y += rangeset.o

 obj-y += sched_credit.o

 obj-y += sched_credit2.o

 obj-y += sched_sedf.o

+obj-y += sched_arinc653.o

 obj-y += schedule.o

 obj-y += shutdown.o

 obj-y += softirq.o

diff -rupN a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c

--- a/xen/common/sched_arinc653.c    1969-12-31 19:00:00.000000000 -0500

+++ b/xen/common/sched_arinc653.c    2010-06-16 09:05:24.000000000 -0400

@@ -0,0 +1,806 @@

+/*

+ * File: sched_arinc653.c

+ * Copyright (c) 2010, DornerWorks, Ltd. <DornerWorks.com>

+ *

+ * Description:

+ *   This file provides an ARINC 653-compatible scheduling algorithm

+ *   for use in Xen.

+ *

+ * This program is free software; you can redistribute it and/or modify
it

+ * under the terms of the GNU General Public License as published by
the Free

+ * software Foundation; either version 2 of the License, or (at your
option)

+ * any later version.

+ *

+ * This program is distributed in the hope that it will be useful,

+ * but WITHOUT ANY WARRANTY; without even the implied warranty of

+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.

+ * See the GNU General Public License for more details.

+ */

+

+

+/**********************************************************************
****

+ * Includes
*

+
************************************************************************
*/

+#include <xen/lib.h>

+#include <xen/sched.h>

+#include <xen/sched-if.h>

+#include <xen/timer.h>

+#include <xen/softirq.h>

+#include <xen/time.h>

+#include <xen/errno.h>

+#include <xen/list.h>

+#include <public/sysctl.h>          /*
ARINC653_MAX_DOMAINS_PER_SCHEDULE */

+#include <xen/guest_access.h>

+

+

+/**********************************************************************
****

+ * Private Macros
*

+
************************************************************************
**/

+

+/**

+ * Retrieve the idle VCPU for a given physical CPU

+ */

+#define IDLETASK(cpu)  (idle_vcpu[cpu])

+

+/**

+ * Return a pointer to the ARINC 653-specific scheduler data
information

+ * associated with the given VCPU (vc)

+ */

+#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)

+

+

+/**********************************************************************
****

+ * Private Type Definitions
*

+
************************************************************************
**/

+

+/**

+ * The sched_entry_t structure holds a single entry of the

+ * ARINC 653 schedule.

+ */

+typedef struct sched_entry_s

+{

+    /* dom_handle holds the handle ("UUID") for the domain that this

+     * schedule entry refers to. */

+    xen_domain_handle_t dom_handle;

+    /* vcpu_id holds the VCPU number for the VCPU that this schedule

+     * entry refers to. */

+    int                 vcpu_id;

+    /* runtime holds the number of nanoseconds that the VCPU for this

+     * schedule entry should be allowed to run per major frame. */

+    s_time_t            runtime;

+    /* vc holds a pointer to the Xen VCPU structure */

+    struct vcpu *       vc;

+} sched_entry_t;

+

+/**

+ * The arinc653_vcpu_t structure holds ARINC 653-scheduler-specific

+ * information for all non-idle VCPUs

+ */

+typedef struct arinc653_vcpu_s

+{

+    /* vc points to Xen's struct vcpu so we can get to it from an

+     * arinc653_vcpu_t pointer. */

+    struct vcpu *       vc;

+    /* awake holds whether the VCPU has been woken with vcpu_wake() */

+    bool_t              awake;

+    /* list holds the linked list information for the list this VCPU

+     * is stored in */

+    struct list_head    list;

+} arinc653_vcpu_t;

+

+/**

+ * Data structure containing domain-specific information.

+ */

+struct arinc653_dom_info {

+    struct domain  *domain;

+};

+

+/**

+ * Data structure containing all the "global" data items used by the
scheduler.

+ */

+typedef struct arinc653_sched_private_s

+{

+    /*

+     * This array holds the active ARINC 653 schedule.

+     *

+     * When the system tries to start a new VCPU, this schedule is
scanned

+     * to look for a matching (handle, VCPU #) pair. If both the handle
("UUID")

+     * and VCPU number match, then the VCPU is allowed to run. Its run
time

+     * (per major frame) is given in the third entry of the schedule.

+     */

+    sched_entry_t arinc653_schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE];

+    /*

+     * This variable holds the number of entries that are valid in

+     * the arinc653_schedule table.

+     *

+     * This is not necessarily the same as the number of domains in the

+     * schedule. A domain could be listed multiple times within the
schedule,

+     * or a domain with multiple VCPUs could have a different

+     * schedule entry for each VCPU.

+     *

+     * A value of 1 means that only 1 domain (Dom0) will initially be
started.

+     */

+    int num_schedule_entries;

+    /*

+     * arinc653_major_frame holds the major frame time for the ARINC
653

+     * schedule.

+     */

+    s_time_t arinc653_major_frame;

+    /*

+     * next_major_frame holds the time that the next major frame starts

+     */

+    s_time_t next_major_frame;

+    /*

+     * vcpu_list holds pointers to all Xen VCPU structures for
iterating through

+     */

+    struct list_head vcpu_list;

+} arinc653_sched_private_t;

+

+

+/**********************************************************************
****

+ * Global data
*

+
************************************************************************
**/

+static arinc653_sched_private_t arinc653_schedule;

+

+

+/**********************************************************************
****

+ * Scheduler functions
*

+
************************************************************************
**/

+

+/**

+ * This function compares two domain handles.

+ *

+ * @param h1        Pointer to handle 1

+ * @param h2        Pointer to handle 2

+ *

+ * @return          <ul>

+ *                  <li> <0:  handle 1 is less than handle 2

+ *                  <li>  0:  handle 1 is equal to handle 2

+ *                  <li> >0:  handle 1 is greater than handle 2

+ *                  </ul>

+ */

+static int dom_handle_cmp(const xen_domain_handle_t h1,

+                          const xen_domain_handle_t h2)

+{

+    return memcmp(h1, h2, sizeof(xen_domain_handle_t));

+} /* end dom_handle_cmp */

+

+/**

+ * This function searches the vcpu list to find a VCPU that matches

+ * the domain handle and VCPU ID specified.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ * @param handle    Pointer to handle

+ * @param vcpu_id   VCPU ID

+ *

+ * @return          <ul>

+ *                  <li> Pointer to the matching VCPU if one is found

+ *                  <li> NULL otherwise

+ *                  </ul>

+ */

+static struct vcpu * find_vcpu(const struct scheduler *ops,

+                               xen_domain_handle_t handle,

+                               int vcpu_id)

+{

+    arinc653_sched_private_t *prv = ops->sched_data;

+    arinc653_vcpu_t * avcpu; /* loop index variable */

+    struct vcpu * vc = NULL;

+

+    /* loop through the vcpu_list looking for the specified VCPU */

+    list_for_each_entry(avcpu, &prv->vcpu_list, list)

+    {

+        /* If the handles & VCPU IDs match, we've found a matching VCPU
*/

+        if ((dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0)

+             && (vcpu_id == avcpu->vc->vcpu_id))

+        {

+            vc = avcpu->vc;

+            /*

+             * "break" statement used instead of loop control variable
because

+             * the macro used for this loop does not support using loop
control

+             * variables

+             */

+            break;

+        }

+    }

+

+    return vc;

+} /* end find_vcpu */

+

+/**

+ * This function updates the pointer to the Xen VCPU structure for each
entry in

+ * the ARINC 653 schedule.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ *

+ * @return <None>

+ */

+static void update_schedule_vcpus(const struct scheduler *ops)

+{

+    arinc653_sched_private_t *prv = ops->sched_data;

+

+    /* Loop through the number of entries in the schedule */

+    for (int i = 0; i < prv->num_schedule_entries; i++)

+    {

+        /* Update the pointer to the Xen VCPU structure for the current
entry */

+        prv->arinc653_schedule[i].vc =

+            find_vcpu(ops,

+                      prv->arinc653_schedule[i].dom_handle,

+                      prv->arinc653_schedule[i].vcpu_id);

+    }

+} /* end update_schedule_vcpus */

+

+/**

+ * This function is called by the arinc653_adjust_global scheduler

+ * callback function in response to a domain control hypercall with

+ * a scheduler operation.

+ *

+ * The parameter schedule is set to be the address of a local variable
from

+ * within arinc653_adjust_global(), so it is guaranteed to not be NULL.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ * @param schedule  Pointer to the new ARINC 653 schedule.

+ *

+ * @return          <ul>

+ *                  <li> 0 = success

+ *                  <li> !0 = error

+ *                  </ul>

+ */

+static int arinc653_sched_set(const struct scheduler *ops,

+                              xen_sysctl_sched_arinc653_schedule_t *
schedule)

+{

+    arinc653_sched_private_t *prv = ops->sched_data;

+

+    int ret = 0;

+    s_time_t total_runtime = 0;

+    bool_t found_dom0 = 0;

+    const static xen_domain_handle_t dom0_handle = {0};

+

+    /* check for valid major frame and number of schedule entries */

+    if ( (schedule->major_frame <= 0)

+      || (schedule->num_sched_entries < 1)

+      || (schedule->num_sched_entries >
ARINC653_MAX_DOMAINS_PER_SCHEDULE) )

+    {

+        ret = -EINVAL;

+    }

+

+    if (ret == 0)

+    {

+        for (int i = 0; i < schedule->num_sched_entries; i++)

+        {

+            /*

+             * look for domain 0 handle - every schedule must contain

+             * some time for domain 0 to run

+             */

+            if (dom_handle_cmp(schedule->sched_entries[i].dom_handle,

+                               dom0_handle) == 0)

+            {

+                found_dom0 = 1;

+            }

+

+            /* check for a valid VCPU ID and run time */

+            if ( (schedule->sched_entries[i].vcpu_id < 0)

+              || (schedule->sched_entries[i].runtime <= 0) )

+            {

+                ret = -EINVAL;

+            }

+            else

+            {

+                /* Add this entry's run time to total run time */

+                total_runtime += schedule->sched_entries[i].runtime;

+            }

+        } /* end loop through schedule entries */

+    }

+

+    if (ret == 0)

+    {

+        /* error if the schedule doesn't contain a slot for domain 0 */

+        if (found_dom0 == 0)

+        {

+            ret = -EINVAL;

+        }

+    }

+

+    if (ret == 0)

+    {

+        /*

+         * error if the major frame is not large enough to run all
entries

+         * as indicated by comparing the total run time to the major
frame

+         * length

+         */

+        if (total_runtime > schedule->major_frame)

+        {

+            ret = -EINVAL;

+        }

+    }

+

+    if (ret == 0)

+    {

+        /* copy the new schedule into place */

+        prv->num_schedule_entries = schedule->num_sched_entries;

+        prv->arinc653_major_frame = schedule->major_frame;

+        for (int i = 0; i < prv->num_schedule_entries; i++)

+        {

+            memcpy(prv->arinc653_schedule[i].dom_handle,

+                   schedule->sched_entries[i].dom_handle,

+                   sizeof(prv->arinc653_schedule[i].dom_handle));

+            prv->arinc653_schedule[i].vcpu_id =

+                schedule->sched_entries[i].vcpu_id;

+            prv->arinc653_schedule[i].runtime =

+                schedule->sched_entries[i].runtime;

+        }

+        update_schedule_vcpus(ops);

+

+        /*

+         * The newly-installed schedule takes effect immediately.

+         * We do not even wait for the current major frame to expire.

+         *

+         * Signal a new major frame to begin. The next major frame

+         * is set up by the do_schedule callback function when it

+         * is next invoked.

+         */

+        prv->next_major_frame = NOW();

+    }

+

+    return ret;

+} /* end arinc653_sched_set */

+

+/**

+ * Xen scheduler callback function to adjust global scheduling
parameters

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ * @param op        Pointer to the system control scheduler operation
structure

+ *

+ * @return          <ul>

+ *                  <li> 0 = success

+ *                  <li> !0 = error

+ *                  </ul>

+ */

+static int arinc653_adjust_global(const struct scheduler *ops,

+                                  struct xen_sysctl_scheduler_op * op)

+{

+    int ret = -1;

+    xen_sysctl_sched_arinc653_schedule_t new_sched;

+

+    if (op->cmd == XEN_SYSCTL_SCHEDOP_putinfo)

+    {

+        if (copy_from_guest(&new_sched, op->u.arinc653.schedule, 1) !=
0)

+        {

+            ret = -EFAULT;

+        }

+        else

+        {

+            ret = arinc653_sched_set(ops, &new_sched);

+        }

+    }

+

+    return ret;

+} /* end arinc653_adjust_global */

+

+/**

+ * Xen scheduler callback function to allocate and initialize the ARINC
653

+ * scheduler data structure.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ * @param pool0     Flag indicating whether pool 0 or other being
allocated

+ *                  [Not Used]

+ *

+ * @return          <ul>

+ *                  <li> 0 = success

+ *                  <li> !0 = error

+ *                  </ul>

+ */

+static int arinc653_init(struct scheduler *ops)

+{

+    arinc653_sched_private_t *prv;

+    int i;

+

+    /* Initial value for the ARINC 653 scheduler data.*/

+    const sched_entry_t init_sched_element = { "", 0, MILLISECS(10),
NULL };

+

+    /* Allocate memory for ARINC 653 scheduler data structure */

+    prv = &arinc653_schedule;

+    if ( prv == NULL )

+    {

+        return -ENOMEM;

+    }

+

+    /* Initialize the ARINC 653 scheduler data structure*/

+    memset(prv, 0, sizeof(*prv));

+

+    /* Set the "scheduler" structure to point to the ARINC 653
scheduler data */

+    ops->sched_data = prv;

+

+    /*

+     * Initialize the ARINC 653 scheduler data.  In particular:

+     *   All domains execute for 10 ms.

+     *   Only one domain is enabled (domain 0).

+     *   Major frame = 10 ms (time required for domain 0).

+     */

+    for (i=0; i<(sizeof(prv->arinc653_schedule)/sizeof(sched_entry_t));
i++)

+    {

+        prv->arinc653_schedule[i] = init_sched_element;

+    }

+    prv->num_schedule_entries = 1;

+    prv->arinc653_major_frame = MILLISECS(10);

+    prv->next_major_frame = 0;

+    INIT_LIST_HEAD(&prv->vcpu_list);

+

+    return 0;

+} /* end arinc653_init */

+

+/**

+ * Xen scheduler callback function to allocate and initialize a data
structure

+ * containing information for a VCPU.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ * @param v         Pointer to the VCPU structure

+ * @param dd        Domain data [Not Used]

+ *

+ * @return          <ul>

+ *                  <li> address of the allocated data structure

+ *                  <li> NULL if error

+ *                  </ul>

+ */

+static void *arinc653_alloc_vdata(const struct scheduler *ops,

+                                  struct vcpu *v,

+                                  void *dd)

+{

+    arinc653_sched_private_t *prv = ops->sched_data;

+    arinc653_vcpu_t *inf;

+

+    /*

+     * Allocate memory for the ARINC 653-specific scheduler data
information

+     * associated with the given VCPU (vc).

+     */

+    inf = xmalloc(arinc653_vcpu_t);

+    if (inf != NULL)

+    {

+        /*

+         * Initialize our ARINC 653 scheduler-specific information

+         * for the VCPU.

+         * The VCPU starts "asleep."

+         * When Xen is ready for the VCPU to run, it will call

+         * the vcpu_wake scheduler callback function and our

+         * scheduler will mark the VCPU awake.

+         */

+        inf->vc = v;

+        inf->awake = 0;

+        list_add(&inf->list, &prv->vcpu_list);

+        update_schedule_vcpus(ops);

+    }

+

+    return inf;

+} /* end arinc653_alloc_vdata */

+

+/**

+ * Xen scheduler callback function to free up VCPU data.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler [Not Used]

+ * @param priv      Pointer to the VCPU structure

+ *

+ * @return          <None>

+ */

+static void arinc653_free_vdata(const struct scheduler *ops, void
*priv)

+{

+    /* Free the arinc653_vcpu structure */

+    xfree(AVCPU((struct vcpu *)priv));

+} /* end arinc653_free_vdata */

+

+/**

+ * Xen scheduler callback function to allocate and initialize a data
structure

+ * containing domain-specific data.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler [Not Used]

+ * @param dom       Pointer to data structure with data for the current
domain

+ *                  [Not Used]

+ *

+ * @return void*    <ul>

+ *                  <li> address of the allocated data structure

+ *                  </ul>

+ */

+static void *

+arinc653_alloc_domdata(const struct scheduler *ops, struct domain *dom)

+{

+    void *mem;

+

+    /* Allocate memory for the domain-specific data structure */

+    mem = xmalloc(struct arinc653_dom_info);

+    if ( mem == NULL )

+    {

+        return NULL;

+    }

+

+    /* Initialize the allocated memory */

+    memset(mem, 0, sizeof(struct arinc653_dom_info));

+

+    return mem;

+} /* end arinc653_alloc_domdata */

+

+/**

+ * Xen scheduler callback function to free up domain-specific data.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler [Not Used]

+ * @param data      Pointer to the domain-specific data structure

+ *

+ * @return          <None>

+ */

+static void arinc653_free_domdata(const struct scheduler *ops, void
*data)

+{

+    /* free the domain-specific data structure */

+    xfree(data);

+} /* end arinc653_free_domdata */

+

+/**

+ * Xen scheduler callback function to initialize a domain.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler [Not Used]

+ * @param dom       Pointer to domain-specific data structure

+ *

+ * @return int      <ul>

+ *                  <li> 0 for success

+ *                  <li> -ENOMEM if out of memory

+ *                  </ul>

+ */

+static int arinc653_init_domain(const struct scheduler *ops,

+                                struct domain *dom)

+{

+    /* Don't allocate a data structure for an idle domain */

+    if ( is_idle_domain(dom) )

+    {

+        return 0;

+    }

+

+    /* Allocate memory for the domain. */

+    dom->sched_priv = arinc653_alloc_domdata(ops, dom);

+    if ( dom->sched_priv == NULL )

+    {

+        return -ENOMEM;

+    }

+

+    return 0;

+} /* end arinc653_init_domain */

+

+/**

+ * Xen scheduler callback function to destroy a domain.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ * @param dom       Pointer to domain-specific data structure

+ */

+static void arinc653_destroy_domain(const struct scheduler *ops,

+                                    struct domain *dom)

+{

+    arinc653_free_domdata(ops, dom->sched_priv);

+} /* end arinc653_destroy_domain */

+

+/**

+ * Xen scheduler callback function to remove a VCPU.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ * @param v         Pointer to the VCPU structure to remove

+ *

+ * @return          <None>

+ */

+static void arinc653_destroy_vcpu(const struct scheduler *ops, struct
vcpu * v)

+{

+    if (AVCPU(v) != NULL)

+    {

+        /* remove the VCPU from whichever list it is on */

+        list_del(&AVCPU(v)->list);

+        /* free the arinc653_vcpu structure */

+        arinc653_free_vdata(ops, v);

+        update_schedule_vcpus(ops);

+    }

+} /* end arinc653_destroy_vcpu */

+

+/**

+ * Xen scheduler callback function to select a VCPU to run.

+ * This is the main scheduler routine.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ * @param t         Current time

+ * @param tasklet_work_scheduled

+ *                  [Not Used]

+ *

+ * @return          Time slice and address of the VCPU structure for
the chosen

+ *                  domain

+ */

+static struct task_slice arinc653_do_schedule(const struct scheduler
*ops,

+                                              s_time_t t,

+                                              bool_t
tasklet_work_scheduled)

+{

+    arinc653_sched_private_t *prv = ops->sched_data;

+

+    struct task_slice ret;                      /* hold the chosen
domain */

+    struct vcpu * new_task = NULL;

+    static int sched_index = 0;

+    static s_time_t last_major_frame;

+    static s_time_t last_switch_time;

+    static s_time_t next_switch_time;

+

+    if (t >= prv->next_major_frame)

+    {

+        /* time to enter a new major frame

+         * the first time this function is called, this will be true */

+        sched_index = 0;

+        last_major_frame = last_switch_time = t;

+        prv->next_major_frame = t + prv->arinc653_major_frame;

+    }

+    else if (t >= next_switch_time)

+    {

+        /* time to switch to the next domain in this major frame */

+        sched_index++;

+        last_switch_time = next_switch_time;

+    }

+

+    /*

+     * If there are more domains to run in the current major frame, set

+     * next_switch_time equal to the last switch time + this domain's
run time.

+     * Otherwise, set next_switch_time equal to the start of the next
major

+     * frame.

+     */

+    next_switch_time = (sched_index < prv->num_schedule_entries)

+        ? last_switch_time +
prv->arinc653_schedule[sched_index].runtime

+        : prv->next_major_frame;

+

+    /*

+     * If there are more domains to run in the current major frame, set

+     * new_task equal to the address of next domain's VCPU structure.

+     * Otherwise, set new_task equal to the address of the idle task's
VCPU

+     * structure.

+     */

+    new_task = (sched_index < prv->num_schedule_entries)

+                   ? prv->arinc653_schedule[sched_index].vc

+                   : IDLETASK(0);

+

+    /* Check to see if the new task can be run (awake & runnable). */

+    if (!((new_task != NULL)

+            && AVCPU(new_task)->awake

+            && vcpu_runnable(new_task)) )

+    {

+        new_task = IDLETASK(0);

+    }

+    BUG_ON(new_task == NULL);

+

+    /*

+     * Check to make sure we did not miss a major frame.

+     * This is a good test for robust partitioning.

+     */

+    BUG_ON(t >= prv->next_major_frame);

+

+    /*

+     * Return the amount of time the next domain has to run and the
address

+     * of the selected task's VCPU structure.

+     */

+    ret.time = next_switch_time - t;

+    ret.task = new_task;

+

+    BUG_ON(ret.time <= 0);

+

+    return ret;

+} /* end arinc653_do_schedule */

+

+/**

+ * Xen scheduler callback function to select a CPU for the VCPU to run
on.

+ * Currently only one CPU is supported.

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler

+ * @param v         Pointer to the VCPU structure for the current
domain

+ *

+ * @return          Number of selected physical CPU

+ */

+static int arinc653_pick_cpu(const struct scheduler *ops, struct vcpu
*v)

+{

+    /* this implementation only supports one physical CPU */

+    return 0;

+} /* end arinc653_pick_cpu */

+

+/**

+ * Xen scheduler callback function to wake up a VCPU

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler [Not Used]

+ * @param vc        Pointer to the VCPU structure for the current
domain

+ *

+ * @return          <None>

+ */

+static void arinc653_vcpu_wake(const struct scheduler *ops, struct vcpu
*vc)

+{

+    /* boolean flag to indicate first run */

+    static bool_t dont_raise_softirq = 0;

+

+    if (AVCPU(vc) != NULL)  /* check that this is a VCPU we are
tracking */

+    {

+        AVCPU(vc)->awake = 1;

+    }

+

+    /* the first time the vcpu_wake function is called, we should raise

+     * a softirq to invoke the do_scheduler callback */

+    if (!dont_raise_softirq)

+    {

+        cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);

+        dont_raise_softirq = 1;

+    }

+} /* end arinc653_vcpu_wake */

+

+/**

+ * Xen scheduler callback function to sleep a VCPU

+ *

+ * @param ops       Pointer to data structure with data & functions for
the

+ *                  current scheduler [Not Used]

+ * @param vc        Pointer to the VCPU structure for the current
domain

+ *

+ * @return          <None>

+ */

+static void arinc653_vcpu_sleep(const struct scheduler *ops, struct
vcpu *vc)

+{

+    if (AVCPU(vc) != NULL)  /* check that this is a VCPU we are
tracking */

+    {

+        AVCPU(vc)->awake = 0;

+    }

+

+    /* if the VCPU being put to sleep is the same one that is currently

+     * running, raise a softirq to invoke the scheduler to switch
domains */

+    if (per_cpu(schedule_data, vc->processor).curr == vc)

+    {

+        cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);

+    }

+} /* end arinc653_vcpu_sleep */

+

+/**

+ * This structure defines our scheduler for Xen.

+ * The entries tell Xen where to find our scheduler-specific

+ * callback functions.

+ * The symbol must be visible to the rest of Xen at link time.

+ */

+struct scheduler sched_arinc653_def = {

+    .name           = "ARINC 653 Scheduler",

+    .opt_name       = "arinc653",

+    .sched_id       = XEN_SCHEDULER_ARINC653,

+    .sched_data     = &arinc653_schedule,

+

+    .init           = arinc653_init,

+/*  .deinit         = NULL, */

+

+    .free_vdata     = arinc653_free_vdata,

+    .alloc_vdata    = arinc653_alloc_vdata,

+

+/*  .free_pdata     = NULL, */

+/*  .alloc_pdata    = NULL, */

+    .free_domdata   = arinc653_free_domdata,

+    .alloc_domdata  = arinc653_alloc_domdata,

+

+    .init_domain    = arinc653_init_domain,

+    .destroy_domain = arinc653_destroy_domain,

+

+/*  .insert_vcpu    = NULL, */

+    .destroy_vcpu   = arinc653_destroy_vcpu,

+

+    .sleep          = arinc653_vcpu_sleep,

+    .wake           = arinc653_vcpu_wake,

+/*  .context_saved  = NULL, */

+

+    .do_schedule    = arinc653_do_schedule,

+

+    .pick_cpu       = arinc653_pick_cpu,

+/*  .adjust         = NULL, */

+    .adjust_global  = arinc653_adjust_global,

+/*  .dump_settings  = NULL, */

+/*  .dump_cpu_state = NULL, */

+

+/*  .tick_suspend   = NULL, */

+/*  .tick_resume    = NULL, */

+};

diff -rupN a/xen/common/schedule.c b/xen/common/schedule.c

--- a/xen/common/schedule.c     2010-05-26 17:01:34.000000000 -0400

+++ b/xen/common/schedule.c     2010-06-01 12:30:45.000000000 -0400

@@ -8,6 +8,8 @@

  *      Author: Rolf Neugebauer & Keir Fraser

  *              Updated for generic API by Mark Williamson

  * 

+ *              ARINC 653 scheduler added by DornerWorks
<DornerWorks.com>

+ * 

  * Description: Generic CPU scheduling code

  *              implements support functionality for the Xen scheduler
API.

  *

@@ -59,10 +61,12 @@ DEFINE_PER_CPU(struct scheduler *, sched

 extern const struct scheduler sched_sedf_def;

 extern const struct scheduler sched_credit_def;

 extern const struct scheduler sched_credit2_def;

+extern const struct scheduler sched_arinc653_def;

 static const struct scheduler *schedulers[] = {

     &sched_sedf_def,

     &sched_credit_def,

     &sched_credit2_def,

+    &sched_arinc653_def,

     NULL

 };

 

diff -rupN a/xen/include/public/domctl.h b/xen/include/public/domctl.h

--- a/xen/include/public/domctl.h    2010-05-26 17:01:34.000000000 -0400

+++ b/xen/include/public/domctl.h    2010-06-01 12:30:45.000000000 -0400

@@ -305,6 +305,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v

 #define XEN_SCHEDULER_SEDF     4

 #define XEN_SCHEDULER_CREDIT   5

 #define XEN_SCHEDULER_CREDIT2  6

+#define XEN_SCHEDULER_ARINC653 7

 /* Set or get info? */

 #define XEN_DOMCTL_SCHEDOP_putinfo 0

 #define XEN_DOMCTL_SCHEDOP_getinfo 1

diff -rupN a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h

--- a/xen/include/public/sysctl.h    2010-05-26 17:01:34.000000000 -0400

+++ b/xen/include/public/sysctl.h    2010-06-01 12:30:45.000000000 -0400

@@ -22,6 +22,8 @@

  * DEALINGS IN THE SOFTWARE.

  *

  * Copyright (c) 2002-2006, K Fraser

+ *

+ * ARINC 653 Scheduler type added by DornerWorks <DornerWorks.com>.

  */

 

 #ifndef __XEN_PUBLIC_SYSCTL_H__

@@ -539,10 +541,43 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupo

 /* Set or get info? */

 #define XEN_SYSCTL_SCHEDOP_putinfo 0

 #define XEN_SYSCTL_SCHEDOP_getinfo 1

+

+/*

+ * This structure is used to pass a new ARINC 653 schedule from a

+ * privileged domain (i.e. Dom0) to Xen.

+ */

+#define ARINC653_MAX_DOMAINS_PER_SCHEDULE   64

+struct xen_sysctl_sched_arinc653_schedule {

+    /* major_frame holds the time for the new schedule's major frame

+     * in nanoseconds. */

+    int64_t     major_frame;

+    /* num_sched_entries holds how many of the entries in the

+     * sched_entries[] array are valid. */

+    uint8_t     num_sched_entries;

+    /* The sched_entries array holds the actual schedule entries. */

+    struct {

+        /* dom_handle must match a domain's UUID */

+        xen_domain_handle_t dom_handle;

+        /* If a domain has multiple VCPUs, vcpu_id specifies which one

+         * this schedule entry applies to. It should be set to 0 if

+         * there is only one VCPU for the domain. */

+        int                 vcpu_id;

+        /* runtime specifies the amount of time that should be
allocated

+         * to this VCPU per major frame. It is specified in nanoseconds
*/

+        int64_t             runtime;

+    } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE];

+};

+typedef struct xen_sysctl_sched_arinc653_schedule

+    xen_sysctl_sched_arinc653_schedule_t;

+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_arinc653_schedule_t);

+

 struct xen_sysctl_scheduler_op {

     uint32_t sched_id;  /* XEN_SCHEDULER_* (domctl.h) */

     uint32_t cmd;       /* XEN_SYSCTL_SCHEDOP_* */

     union {

+        struct xen_sysctl_sched_arinc653 {

+            XEN_GUEST_HANDLE(xen_sysctl_sched_arinc653_schedule_t)
schedule;

+        } arinc653;

     } u;

 };

 typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;


[-- Attachment #1.2: Type: text/html, Size: 144554 bytes --]

[-- Attachment #2: dornerworks-xen-4.1-arinc653-scheduler.patch --]
[-- Type: application/octet-stream, Size: 33470 bytes --]

diff -rupN a/tools/libxc/Makefile b/tools/libxc/Makefile
--- a/tools/libxc/Makefile	2010-05-26 17:01:34.000000000 -0400
+++ b/tools/libxc/Makefile	2010-06-01 12:30:45.000000000 -0400
@@ -19,6 +19,7 @@ CTRL_SRCS-y       += xc_private.c
 CTRL_SRCS-y       += xc_sedf.c
 CTRL_SRCS-y       += xc_csched.c
 CTRL_SRCS-y       += xc_csched2.c
+CTRL_SRCS-y       += xc_arinc653.c
 CTRL_SRCS-y       += xc_tbuf.c
 CTRL_SRCS-y       += xc_pm.c
 CTRL_SRCS-y       += xc_cpu_hotplug.c
diff -rupN a/tools/libxc/xc_arinc653.c b/tools/libxc/xc_arinc653.c
--- a/tools/libxc/xc_arinc653.c	1969-12-31 19:00:00.000000000 -0500
+++ b/tools/libxc/xc_arinc653.c	2010-06-14 10:50:57.000000000 -0400
@@ -0,0 +1,27 @@
+/****************************************************************************
+ * (C) 2010 - DornerWorks, Ltd <DornerWorks.com>
+ ****************************************************************************
+ *
+ *        File: xc_arinc653.c
+ *      Author: Josh Holtrop <DornerWorks.com>
+ *
+ * Description: XC Interface to the ARINC 653 scheduler
+ *
+ */
+
+#include "xc_private.h"
+
+int
+xc_sched_arinc653_sched_set(
+    int xc_handle,
+    xen_sysctl_sched_arinc653_schedule_t * sched)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_scheduler_op;
+    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
+    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
+    set_xen_guest_handle(sysctl.u.scheduler_op.u.arinc653.schedule, sched);
+
+    return do_sysctl(xc_handle, &sysctl);
+}
diff -rupN a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	2010-05-26 17:01:34.000000000 -0400
+++ b/tools/libxc/xenctrl.h	2010-06-01 12:30:45.000000000 -0400
@@ -485,6 +485,16 @@ int xc_sched_credit2_domain_get(int xc_h
                                struct xen_domctl_sched_credit2 *sdom);
 
 /**
+ * This function sets the global ARINC 653 schedule.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm sched a pointer to the new ARINC 653 schedule
+ * return 0 on success
+ */
+int xc_sched_arinc653_sched_set(int xc_handle,
+                                xen_sysctl_sched_arinc653_schedule_t * sched);
+
+/**
  * This function sends a trigger to a domain.
  *
  * @parm xc_handle a handle to an open hypervisor interface
diff -rupN a/xen/common/Makefile b/xen/common/Makefile
--- a/xen/common/Makefile	2010-05-26 17:01:34.000000000 -0400
+++ b/xen/common/Makefile	2010-06-01 12:30:45.000000000 -0400
@@ -17,6 +17,7 @@ obj-y += rangeset.o
 obj-y += sched_credit.o
 obj-y += sched_credit2.o
 obj-y += sched_sedf.o
+obj-y += sched_arinc653.o
 obj-y += schedule.o
 obj-y += shutdown.o
 obj-y += softirq.o
diff -rupN a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
--- a/xen/common/sched_arinc653.c	1969-12-31 19:00:00.000000000 -0500
+++ b/xen/common/sched_arinc653.c	2010-06-16 09:05:24.000000000 -0400
@@ -0,0 +1,806 @@
+/*
+ * File: sched_arinc653.c
+ * Copyright (c) 2010, DornerWorks, Ltd. <DornerWorks.com>
+ *
+ * Description:
+ *   This file provides an ARINC 653-compatible scheduling algorithm
+ *   for use in Xen.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ */
+
+
+/**************************************************************************
+ * Includes                                                               *
+ *************************************************************************/
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/sched-if.h>
+#include <xen/timer.h>
+#include <xen/softirq.h>
+#include <xen/time.h>
+#include <xen/errno.h>
+#include <xen/list.h>
+#include <public/sysctl.h>          /* ARINC653_MAX_DOMAINS_PER_SCHEDULE */
+#include <xen/guest_access.h>
+
+
+/**************************************************************************
+ * Private Macros                                                         *
+ **************************************************************************/
+
+/**
+ * Retrieve the idle VCPU for a given physical CPU
+ */
+#define IDLETASK(cpu)  (idle_vcpu[cpu])
+
+/**
+ * Return a pointer to the ARINC 653-specific scheduler data information
+ * associated with the given VCPU (vc)
+ */
+#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)
+
+
+/**************************************************************************
+ * Private Type Definitions                                               *
+ **************************************************************************/
+
+/**
+ * The sched_entry_t structure holds a single entry of the
+ * ARINC 653 schedule.
+ */
+typedef struct sched_entry_s
+{
+    /* dom_handle holds the handle ("UUID") for the domain that this
+     * schedule entry refers to. */
+    xen_domain_handle_t dom_handle;
+    /* vcpu_id holds the VCPU number for the VCPU that this schedule
+     * entry refers to. */
+    int                 vcpu_id;
+    /* runtime holds the number of nanoseconds that the VCPU for this
+     * schedule entry should be allowed to run per major frame. */
+    s_time_t            runtime;
+    /* vc holds a pointer to the Xen VCPU structure */
+    struct vcpu *       vc;
+} sched_entry_t;
+
+/**
+ * The arinc653_vcpu_t structure holds ARINC 653-scheduler-specific
+ * information for all non-idle VCPUs
+ */
+typedef struct arinc653_vcpu_s
+{
+    /* vc points to Xen's struct vcpu so we can get to it from an
+     * arinc653_vcpu_t pointer. */
+    struct vcpu *       vc;
+    /* awake holds whether the VCPU has been woken with vcpu_wake() */
+    bool_t              awake;
+    /* list holds the linked list information for the list this VCPU
+     * is stored in */
+    struct list_head    list;
+} arinc653_vcpu_t;
+
+/**
+ * Data structure containing domain-specific information.
+ */
+struct arinc653_dom_info {
+    struct domain  *domain;
+};
+
+/**
+ * Data structure containing all the "global" data items used by the scheduler.
+ */
+typedef struct arinc653_sched_private_s
+{
+    /*
+     * This array holds the active ARINC 653 schedule.
+     *
+     * When the system tries to start a new VCPU, this schedule is scanned
+     * to look for a matching (handle, VCPU #) pair. If both the handle ("UUID")
+     * and VCPU number match, then the VCPU is allowed to run. Its run time
+     * (per major frame) is given in the third entry of the schedule.
+     */
+    sched_entry_t arinc653_schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
+    /*
+     * This variable holds the number of entries that are valid in
+     * the arinc653_schedule table.
+     *
+     * This is not necessarily the same as the number of domains in the
+     * schedule. A domain could be listed multiple times within the schedule,
+     * or a domain with multiple VCPUs could have a different
+     * schedule entry for each VCPU.
+     *
+     * A value of 1 means that only 1 domain (Dom0) will initially be started.
+     */
+    int num_schedule_entries;
+    /*
+     * arinc653_major_frame holds the major frame time for the ARINC 653
+     * schedule.
+     */
+    s_time_t arinc653_major_frame;
+    /*
+     * next_major_frame holds the time that the next major frame starts
+     */
+    s_time_t next_major_frame;
+    /*
+     * vcpu_list holds pointers to all Xen VCPU structures for iterating through
+     */
+    struct list_head vcpu_list;
+} arinc653_sched_private_t;
+
+
+/**************************************************************************
+ * Global data                                                            *
+ **************************************************************************/
+static arinc653_sched_private_t arinc653_schedule;
+
+
+/**************************************************************************
+ * Scheduler functions                                                    *
+ **************************************************************************/
+
+/**
+ * This function compares two domain handles.
+ *
+ * @param h1        Pointer to handle 1
+ * @param h2        Pointer to handle 2
+ *
+ * @return          <ul>
+ *                  <li> <0:  handle 1 is less than handle 2
+ *                  <li>  0:  handle 1 is equal to handle 2
+ *                  <li> >0:  handle 1 is greater than handle 2
+ *                  </ul>
+ */
+static int dom_handle_cmp(const xen_domain_handle_t h1,
+                          const xen_domain_handle_t h2)
+{
+    return memcmp(h1, h2, sizeof(xen_domain_handle_t));
+} /* end dom_handle_cmp */
+
+/**
+ * This function searches the vcpu list to find a VCPU that matches
+ * the domain handle and VCPU ID specified.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ * @param handle    Pointer to handle
+ * @param vcpu_id   VCPU ID
+ *
+ * @return          <ul>
+ *                  <li> Pointer to the matching VCPU if one is found
+ *                  <li> NULL otherwise
+ *                  </ul>
+ */
+static struct vcpu * find_vcpu(const struct scheduler *ops,
+                               xen_domain_handle_t handle,
+                               int vcpu_id)
+{
+    arinc653_sched_private_t *prv = ops->sched_data;
+    arinc653_vcpu_t * avcpu; /* loop index variable */
+    struct vcpu * vc = NULL;
+
+    /* loop through the vcpu_list looking for the specified VCPU */
+    list_for_each_entry(avcpu, &prv->vcpu_list, list)
+    {
+        /* If the handles & VCPU IDs match, we've found a matching VCPU */
+        if ((dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0)
+             && (vcpu_id == avcpu->vc->vcpu_id))
+        {
+            vc = avcpu->vc;
+            /*
+             * "break" statement used instead of loop control variable because
+             * the macro used for this loop does not support using loop control
+             * variables
+             */
+            break;
+        }
+    }
+
+    return vc;
+} /* end find_vcpu */
+
+/**
+ * This function updates the pointer to the Xen VCPU structure for each entry in
+ * the ARINC 653 schedule.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ *
+ * @return <None>
+ */
+static void update_schedule_vcpus(const struct scheduler *ops)
+{
+    arinc653_sched_private_t *prv = ops->sched_data;
+
+    /* Loop through the number of entries in the schedule */
+    for (int i = 0; i < prv->num_schedule_entries; i++)
+    {
+        /* Update the pointer to the Xen VCPU structure for the current entry */
+        prv->arinc653_schedule[i].vc =
+            find_vcpu(ops,
+                      prv->arinc653_schedule[i].dom_handle,
+                      prv->arinc653_schedule[i].vcpu_id);
+    }
+} /* end update_schedule_vcpus */
+
+/**
+ * This function is called by the arinc653_adjust_global scheduler
+ * callback function in response to a domain control hypercall with
+ * a scheduler operation.
+ *
+ * The parameter schedule is set to be the address of a local variable from
+ * within arinc653_adjust_global(), so it is guaranteed to not be NULL.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ * @param schedule  Pointer to the new ARINC 653 schedule.
+ *
+ * @return          <ul>
+ *                  <li> 0 = success
+ *                  <li> !0 = error
+ *                  </ul>
+ */
+static int arinc653_sched_set(const struct scheduler *ops,
+                              xen_sysctl_sched_arinc653_schedule_t * schedule)
+{
+    arinc653_sched_private_t *prv = ops->sched_data;
+
+    int ret = 0;
+    s_time_t total_runtime = 0;
+    bool_t found_dom0 = 0;
+    const static xen_domain_handle_t dom0_handle = {0};
+
+    /* check for valid major frame and number of schedule entries */
+    if ( (schedule->major_frame <= 0)
+      || (schedule->num_sched_entries < 1)
+      || (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
+    {
+        ret = -EINVAL;
+    }
+
+    if (ret == 0)
+    {
+        for (int i = 0; i < schedule->num_sched_entries; i++)
+        {
+            /*
+             * look for domain 0 handle - every schedule must contain
+             * some time for domain 0 to run
+             */
+            if (dom_handle_cmp(schedule->sched_entries[i].dom_handle,
+                               dom0_handle) == 0)
+            {
+                found_dom0 = 1;
+            }
+
+            /* check for a valid VCPU ID and run time */
+            if ( (schedule->sched_entries[i].vcpu_id < 0)
+              || (schedule->sched_entries[i].runtime <= 0) )
+            {
+                ret = -EINVAL;
+            }
+            else
+            {
+                /* Add this entry's run time to total run time */
+                total_runtime += schedule->sched_entries[i].runtime;
+            }
+        } /* end loop through schedule entries */
+    }
+
+    if (ret == 0)
+    {
+        /* error if the schedule doesn't contain a slot for domain 0 */
+        if (found_dom0 == 0)
+        {
+            ret = -EINVAL;
+        }
+    }
+
+    if (ret == 0)
+    {
+        /*
+         * error if the major frame is not large enough to run all entries
+         * as indicated by comparing the total run time to the major frame
+         * length
+         */
+        if (total_runtime > schedule->major_frame)
+        {
+            ret = -EINVAL;
+        }
+    }
+
+    if (ret == 0)
+    {
+        /* copy the new schedule into place */
+        prv->num_schedule_entries = schedule->num_sched_entries;
+        prv->arinc653_major_frame = schedule->major_frame;
+        for (int i = 0; i < prv->num_schedule_entries; i++)
+        {
+            memcpy(prv->arinc653_schedule[i].dom_handle,
+                   schedule->sched_entries[i].dom_handle,
+                   sizeof(prv->arinc653_schedule[i].dom_handle));
+            prv->arinc653_schedule[i].vcpu_id =
+                schedule->sched_entries[i].vcpu_id;
+            prv->arinc653_schedule[i].runtime =
+                schedule->sched_entries[i].runtime;
+        }
+        update_schedule_vcpus(ops);
+
+        /*
+         * The newly-installed schedule takes effect immediately.
+         * We do not even wait for the current major frame to expire.
+         *
+         * Signal a new major frame to begin. The next major frame
+         * is set up by the do_schedule callback function when it
+         * is next invoked.
+         */
+        prv->next_major_frame = NOW();
+    }
+
+    return ret;
+} /* end arinc653_sched_set */
+
+/**
+ * Xen scheduler callback function to adjust global scheduling parameters
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ * @param op        Pointer to the system control scheduler operation structure
+ *
+ * @return          <ul>
+ *                  <li> 0 = success
+ *                  <li> !0 = error
+ *                  </ul>
+ */
+static int arinc653_adjust_global(const struct scheduler *ops,
+                                  struct xen_sysctl_scheduler_op * op)
+{
+    int ret = -1;
+    xen_sysctl_sched_arinc653_schedule_t new_sched;
+
+    if (op->cmd == XEN_SYSCTL_SCHEDOP_putinfo)
+    {
+        if (copy_from_guest(&new_sched, op->u.arinc653.schedule, 1) != 0)
+        {
+            ret = -EFAULT;
+        }
+        else
+        {
+            ret = arinc653_sched_set(ops, &new_sched);
+        }
+    }
+
+    return ret;
+} /* end arinc653_adjust_global */
+
+/**
+ * Xen scheduler callback function to allocate and initialize the ARINC 653
+ * scheduler data structure.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ * @param pool0     Flag indicating whether pool 0 or other being allocated
+ *                  [Not Used]
+ *
+ * @return          <ul>
+ *                  <li> 0 = success
+ *                  <li> !0 = error
+ *                  </ul>
+ */
+static int arinc653_init(struct scheduler *ops)
+{
+    arinc653_sched_private_t *prv;
+    int i;
+
+    /* Initial value for the ARINC 653 scheduler data.*/
+    const sched_entry_t init_sched_element = { "", 0, MILLISECS(10), NULL };
+
+    /* Allocate memory for ARINC 653 scheduler data structure */
+    prv = &arinc653_schedule;
+    if ( prv == NULL )
+    {
+        return -ENOMEM;
+    }
+
+    /* Initialize the ARINC 653 scheduler data structure*/
+    memset(prv, 0, sizeof(*prv));
+
+    /* Set the "scheduler" structure to point to the ARINC 653 scheduler data */
+    ops->sched_data = prv;
+
+    /*
+     * Initialize the ARINC 653 scheduler data.  In particular:
+     *   All domains execute for 10 ms.
+     *   Only one domain is enabled (domain 0).
+     *   Major frame = 10 ms (time required for domain 0).
+     */
+    for (i=0; i<(sizeof(prv->arinc653_schedule)/sizeof(sched_entry_t)); i++)
+    {
+        prv->arinc653_schedule[i] = init_sched_element;
+    }
+    prv->num_schedule_entries = 1;
+    prv->arinc653_major_frame = MILLISECS(10);
+    prv->next_major_frame = 0;
+    INIT_LIST_HEAD(&prv->vcpu_list);
+
+    return 0;
+} /* end arinc653_init */
+
+/**
+ * Xen scheduler callback function to allocate and initialize a data structure
+ * containing information for a VCPU.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ * @param v         Pointer to the VCPU structure
+ * @param dd        Domain data [Not Used]
+ *
+ * @return          <ul>
+ *                  <li> address of the allocated data structure
+ *                  <li> NULL if error
+ *                  </ul>
+ */
+static void *arinc653_alloc_vdata(const struct scheduler *ops,
+                                  struct vcpu *v,
+                                  void *dd)
+{
+    arinc653_sched_private_t *prv = ops->sched_data;
+    arinc653_vcpu_t *inf;
+
+    /*
+     * Allocate memory for the ARINC 653-specific scheduler data information
+     * associated with the given VCPU (vc).
+     */
+    inf = xmalloc(arinc653_vcpu_t);
+    if (inf != NULL)
+    {
+        /*
+         * Initialize our ARINC 653 scheduler-specific information
+         * for the VCPU.
+         * The VCPU starts "asleep."
+         * When Xen is ready for the VCPU to run, it will call
+         * the vcpu_wake scheduler callback function and our
+         * scheduler will mark the VCPU awake.
+         */
+        inf->vc = v;
+        inf->awake = 0;
+        list_add(&inf->list, &prv->vcpu_list);
+        update_schedule_vcpus(ops);
+    }
+
+    return inf;
+} /* end arinc653_alloc_vdata */
+
+/**
+ * Xen scheduler callback function to free up VCPU data.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler [Not Used]
+ * @param priv      Pointer to the VCPU structure
+ *
+ * @return          <None>
+ */
+static void arinc653_free_vdata(const struct scheduler *ops, void *priv)
+{
+    /* Free the arinc653_vcpu structure */
+    xfree(AVCPU((struct vcpu *)priv));
+} /* end arinc653_free_vdata */
+
+/**
+ * Xen scheduler callback function to allocate and initialize a data structure
+ * containing domain-specific data.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler [Not Used]
+ * @param dom       Pointer to data structure with data for the current domain
+ *                  [Not Used]
+ *
+ * @return void*    <ul>
+ *                  <li> address of the allocated data structure
+ *                  </ul>
+ */
+static void *
+arinc653_alloc_domdata(const struct scheduler *ops, struct domain *dom)
+{
+    void *mem;
+
+    /* Allocate memory for the domain-specific data structure */
+    mem = xmalloc(struct arinc653_dom_info);
+    if ( mem == NULL )
+    {
+        return NULL;
+    }
+
+    /* Initialize the allocated memory */
+    memset(mem, 0, sizeof(struct arinc653_dom_info));
+
+    return mem;
+} /* end arinc653_alloc_domdata */
+
+/**
+ * Xen scheduler callback function to free up domain-specific data.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler [Not Used]
+ * @param data      Pointer to the domain-specific data structure
+ *
+ * @return          <None>
+ */
+static void arinc653_free_domdata(const struct scheduler *ops, void *data)
+{
+    /* free the domain-specific data structure */
+    xfree(data);
+} /* end arinc653_free_domdata */
+
+/**
+ * Xen scheduler callback function to initialize a domain.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler [Not Used]
+ * @param dom       Pointer to domain-specific data structure
+ *
+ * @return int      <ul>
+ *                  <li> 0 for success
+ *                  <li> -ENOMEM if out of memory
+ *                  </ul>
+ */
+static int arinc653_init_domain(const struct scheduler *ops,
+                                struct domain *dom)
+{
+    /* Don't allocate a data structure for an idle domain */
+    if ( is_idle_domain(dom) )
+    {
+        return 0;
+    }
+
+    /* Allocate memory for the domain. */
+    dom->sched_priv = arinc653_alloc_domdata(ops, dom);
+    if ( dom->sched_priv == NULL )
+    {
+        return -ENOMEM;
+    }
+
+    return 0;
+} /* end arinc653_init_domain */
+
+/**
+ * Xen scheduler callback function to destroy a domain.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ * @param dom       Pointer to domain-specific data structure
+ */
+static void arinc653_destroy_domain(const struct scheduler *ops,
+                                    struct domain *dom)
+{
+    arinc653_free_domdata(ops, dom->sched_priv);
+} /* end arinc653_destroy_domain */
+
+/**
+ * Xen scheduler callback function to remove a VCPU.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ * @param v         Pointer to the VCPU structure to remove
+ *
+ * @return          <None>
+ */
+static void arinc653_destroy_vcpu(const struct scheduler *ops, struct vcpu * v)
+{
+    if (AVCPU(v) != NULL)
+    {
+        /* remove the VCPU from whichever list it is on */
+        list_del(&AVCPU(v)->list);
+        /* free the arinc653_vcpu structure */
+        arinc653_free_vdata(ops, v);
+        update_schedule_vcpus(ops);
+    }
+} /* end arinc653_destroy_vcpu */
+
+/**
+ * Xen scheduler callback function to select a VCPU to run.
+ * This is the main scheduler routine.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ * @param t         Current time
+ * @param tasklet_work_scheduled
+ *                  [Not Used]
+ *
+ * @return          Time slice and address of the VCPU structure for the chosen
+ *                  domain
+ */
+static struct task_slice arinc653_do_schedule(const struct scheduler *ops,
+                                              s_time_t t,
+                                              bool_t tasklet_work_scheduled)
+{
+    arinc653_sched_private_t *prv = ops->sched_data;
+
+    struct task_slice ret;                      /* hold the chosen domain */
+    struct vcpu * new_task = NULL;
+    static int sched_index = 0;
+    static s_time_t last_major_frame;
+    static s_time_t last_switch_time;
+    static s_time_t next_switch_time;
+
+    if (t >= prv->next_major_frame)
+    {
+        /* time to enter a new major frame
+         * the first time this function is called, this will be true */
+        sched_index = 0;
+        last_major_frame = last_switch_time = t;
+        prv->next_major_frame = t + prv->arinc653_major_frame;
+    }
+    else if (t >= next_switch_time)
+    {
+        /* time to switch to the next domain in this major frame */
+        sched_index++;
+        last_switch_time = next_switch_time;
+    }
+
+    /*
+     * If there are more domains to run in the current major frame, set
+     * next_switch_time equal to the last switch time + this domain's run time.
+     * Otherwise, set next_switch_time equal to the start of the next major
+     * frame.
+     */
+    next_switch_time = (sched_index < prv->num_schedule_entries)
+        ? last_switch_time + prv->arinc653_schedule[sched_index].runtime
+        : prv->next_major_frame;
+
+    /*
+     * If there are more domains to run in the current major frame, set
+     * new_task equal to the address of next domain's VCPU structure.
+     * Otherwise, set new_task equal to the address of the idle task's VCPU
+     * structure.
+     */
+    new_task = (sched_index < prv->num_schedule_entries)
+                   ? prv->arinc653_schedule[sched_index].vc
+                   : IDLETASK(0);
+
+    /* Check to see if the new task can be run (awake & runnable). */
+    if (!((new_task != NULL)
+            && AVCPU(new_task)->awake
+            && vcpu_runnable(new_task)) )
+    {
+        new_task = IDLETASK(0);
+    }
+    BUG_ON(new_task == NULL);
+
+    /*
+     * Check to make sure we did not miss a major frame.
+     * This is a good test for robust partitioning.
+     */
+    BUG_ON(t >= prv->next_major_frame);
+
+    /*
+     * Return the amount of time the next domain has to run and the address
+     * of the selected task's VCPU structure.
+     */
+    ret.time = next_switch_time - t;
+    ret.task = new_task;
+
+    BUG_ON(ret.time <= 0);
+
+    return ret;
+} /* end arinc653_do_schedule */
+
+/**
+ * Xen scheduler callback function to select a CPU for the VCPU to run on.
+ * Currently only one CPU is supported.
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler
+ * @param v         Pointer to the VCPU structure for the current domain
+ *
+ * @return          Number of selected physical CPU
+ */
+static int arinc653_pick_cpu(const struct scheduler *ops, struct vcpu *v)
+{
+    /* this implementation only supports one physical CPU */
+    return 0;
+} /* end arinc653_pick_cpu */
+
+/**
+ * Xen scheduler callback function to wake up a VCPU
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler [Not Used]
+ * @param vc        Pointer to the VCPU structure for the current domain
+ *
+ * @return          <None>
+ */
+static void arinc653_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
+{
+    /* boolean flag to indicate first run */
+    static bool_t dont_raise_softirq = 0;
+
+    if (AVCPU(vc) != NULL)  /* check that this is a VCPU we are tracking */
+    {
+        AVCPU(vc)->awake = 1;
+    }
+
+    /* the first time the vcpu_wake function is called, we should raise
+     * a softirq to invoke the do_scheduler callback */
+    if (!dont_raise_softirq)
+    {
+        cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
+        dont_raise_softirq = 1;
+    }
+} /* end arinc653_vcpu_wake */
+
+/**
+ * Xen scheduler callback function to sleep a VCPU
+ *
+ * @param ops       Pointer to data structure with data & functions for the
+ *                  current scheduler [Not Used]
+ * @param vc        Pointer to the VCPU structure for the current domain
+ *
+ * @return          <None>
+ */
+static void arinc653_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
+{
+    if (AVCPU(vc) != NULL)  /* check that this is a VCPU we are tracking */
+    {
+        AVCPU(vc)->awake = 0;
+    }
+
+    /* if the VCPU being put to sleep is the same one that is currently
+     * running, raise a softirq to invoke the scheduler to switch domains */
+    if (per_cpu(schedule_data, vc->processor).curr == vc)
+    {
+        cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
+    }
+} /* end arinc653_vcpu_sleep */
+
+/**
+ * This structure defines our scheduler for Xen.
+ * The entries tell Xen where to find our scheduler-specific
+ * callback functions.
+ * The symbol must be visible to the rest of Xen at link time.
+ */
+struct scheduler sched_arinc653_def = {
+    .name           = "ARINC 653 Scheduler",
+    .opt_name       = "arinc653",
+    .sched_id       = XEN_SCHEDULER_ARINC653,
+    .sched_data     = &arinc653_schedule,
+
+    .init           = arinc653_init,
+/*  .deinit         = NULL, */
+
+    .free_vdata     = arinc653_free_vdata,
+    .alloc_vdata    = arinc653_alloc_vdata,
+
+/*  .free_pdata     = NULL, */
+/*  .alloc_pdata    = NULL, */
+    .free_domdata   = arinc653_free_domdata,
+    .alloc_domdata  = arinc653_alloc_domdata,
+
+    .init_domain    = arinc653_init_domain,
+    .destroy_domain = arinc653_destroy_domain,
+
+/*  .insert_vcpu    = NULL, */
+    .destroy_vcpu   = arinc653_destroy_vcpu,
+
+    .sleep          = arinc653_vcpu_sleep,
+    .wake           = arinc653_vcpu_wake,
+/*  .context_saved  = NULL, */
+
+    .do_schedule    = arinc653_do_schedule,
+
+    .pick_cpu       = arinc653_pick_cpu,
+/*  .adjust         = NULL, */
+    .adjust_global  = arinc653_adjust_global,
+/*  .dump_settings  = NULL, */
+/*  .dump_cpu_state = NULL, */
+
+/*  .tick_suspend   = NULL, */
+/*  .tick_resume    = NULL, */
+};
diff -rupN a/xen/common/schedule.c b/xen/common/schedule.c
--- a/xen/common/schedule.c	2010-05-26 17:01:34.000000000 -0400
+++ b/xen/common/schedule.c	2010-06-01 12:30:45.000000000 -0400
@@ -8,6 +8,8 @@
  *      Author: Rolf Neugebauer & Keir Fraser
  *              Updated for generic API by Mark Williamson
  * 
+ *              ARINC 653 scheduler added by DornerWorks <DornerWorks.com>
+ * 
  * Description: Generic CPU scheduling code
  *              implements support functionality for the Xen scheduler API.
  *
@@ -59,10 +61,12 @@ DEFINE_PER_CPU(struct scheduler *, sched
 extern const struct scheduler sched_sedf_def;
 extern const struct scheduler sched_credit_def;
 extern const struct scheduler sched_credit2_def;
+extern const struct scheduler sched_arinc653_def;
 static const struct scheduler *schedulers[] = {
     &sched_sedf_def,
     &sched_credit_def,
     &sched_credit2_def,
+    &sched_arinc653_def,
     NULL
 };
 
diff -rupN a/xen/include/public/domctl.h b/xen/include/public/domctl.h
--- a/xen/include/public/domctl.h	2010-05-26 17:01:34.000000000 -0400
+++ b/xen/include/public/domctl.h	2010-06-01 12:30:45.000000000 -0400
@@ -305,6 +305,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v
 #define XEN_SCHEDULER_SEDF     4
 #define XEN_SCHEDULER_CREDIT   5
 #define XEN_SCHEDULER_CREDIT2  6
+#define XEN_SCHEDULER_ARINC653 7
 /* Set or get info? */
 #define XEN_DOMCTL_SCHEDOP_putinfo 0
 #define XEN_DOMCTL_SCHEDOP_getinfo 1
diff -rupN a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
--- a/xen/include/public/sysctl.h	2010-05-26 17:01:34.000000000 -0400
+++ b/xen/include/public/sysctl.h	2010-06-01 12:30:45.000000000 -0400
@@ -22,6 +22,8 @@
  * DEALINGS IN THE SOFTWARE.
  *
  * Copyright (c) 2002-2006, K Fraser
+ *
+ * ARINC 653 Scheduler type added by DornerWorks <DornerWorks.com>.
  */
 
 #ifndef __XEN_PUBLIC_SYSCTL_H__
@@ -539,10 +541,43 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupo
 /* Set or get info? */
 #define XEN_SYSCTL_SCHEDOP_putinfo 0
 #define XEN_SYSCTL_SCHEDOP_getinfo 1
+
+/*
+ * This structure is used to pass a new ARINC 653 schedule from a
+ * privileged domain (i.e. Dom0) to Xen.
+ */
+#define ARINC653_MAX_DOMAINS_PER_SCHEDULE   64
+struct xen_sysctl_sched_arinc653_schedule {
+    /* major_frame holds the time for the new schedule's major frame
+     * in nanoseconds. */
+    int64_t     major_frame;
+    /* num_sched_entries holds how many of the entries in the
+     * sched_entries[] array are valid. */
+    uint8_t     num_sched_entries;
+    /* The sched_entries array holds the actual schedule entries. */
+    struct {
+        /* dom_handle must match a domain's UUID */
+        xen_domain_handle_t dom_handle;
+        /* If a domain has multiple VCPUs, vcpu_id specifies which one
+         * this schedule entry applies to. It should be set to 0 if
+         * there is only one VCPU for the domain. */
+        int                 vcpu_id;
+        /* runtime specifies the amount of time that should be allocated
+         * to this VCPU per major frame. It is specified in nanoseconds */
+        int64_t             runtime;
+    } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
+};
+typedef struct xen_sysctl_sched_arinc653_schedule
+    xen_sysctl_sched_arinc653_schedule_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_arinc653_schedule_t);
+
 struct xen_sysctl_scheduler_op {
     uint32_t sched_id;  /* XEN_SCHEDULER_* (domctl.h) */
     uint32_t cmd;       /* XEN_SYSCTL_SCHEDOP_* */
     union {
+        struct xen_sysctl_sched_arinc653 {
+            XEN_GUEST_HANDLE(xen_sysctl_sched_arinc653_schedule_t) schedule;
+        } arinc653;
     } u;
 };
 typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 15:04 [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools) Kathy Hadley
@ 2010-06-16 15:50 ` George Dunlap
  2010-06-16 16:00   ` Kathy Hadley
  0 siblings, 1 reply; 35+ messages in thread
From: George Dunlap @ 2010-06-16 15:50 UTC (permalink / raw)
  To: Kathy Hadley; +Cc: xen-devel, Keir Fraser

On Wed, Jun 16, 2010 at 4:04 PM, Kathy Hadley
<Kathy.Hadley@dornerworks.com> wrote:
> +/**************************************************************************
> + * Global data                                                            *
> +
> **************************************************************************/
> +static arinc653_sched_private_t arinc653_schedule;
[snip]
> +    /* Allocate memory for ARINC 653 scheduler data structure */
> +    prv = &arinc653_schedule;

You didn't actually allocate memory, you just used the static
structure.  The point of cpupools is to allow multiple instances of a
scheduler to coexist -- if people create two pools, both using the
ARINC scheduler, there will be problems with this.  Is there any
reason not to actually call xmalloc() (as is done in
sched_credit.c:csched_init())?  (Perhaps this is a missed FIXME or a
merge-o?)

Some of the notices in headers seems a little excessive; if
sched_arinc653.c credits Dornerworks, surely people can infer who
added the control structure in xen/include/public/sysctl.h, and added
a link to it in scheduler.c?

Not NACK-worthy, but: In struct arin..._sched_private_s, the element
"arinc653_schedule" should probably be named something a bit more
descriptive.  Similarly, having arinc653 in ..._major_frame seems a
bit redundant, and inconsistent with naming for the other elements.

Looks fine to me otherwise.  (I haven't reviewed the algorithm itself.)

 -George

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 15:50 ` George Dunlap
@ 2010-06-16 16:00   ` Kathy Hadley
  2010-06-16 16:13     ` Keir Fraser
  2010-06-16 16:14     ` George Dunlap
  0 siblings, 2 replies; 35+ messages in thread
From: Kathy Hadley @ 2010-06-16 16:00 UTC (permalink / raw)
  To: George Dunlap; +Cc: xen-devel, Keir Fraser

George,
  I actually tried the xmalloc() method first.  I found that when the .adjust_global function was called, the address of the "ops" data structure passed to that function was different from the address of the "ops" data structure when the .init function was called.  I wanted to use .adjust_global to modify the data structure that was created when the .init function was called, but I could not figure out a way to get the address of the second data structure.  Suggestions?

  I can make the modifications you suggest for the other items.  Thanks for the comments.

  Regards,
Kathy Hadley
DornerWorks, Ltd.

> -----Original Message-----
> From: dunlapg@gmail.com [mailto:dunlapg@gmail.com] On Behalf Of George
> Dunlap
> Sent: Wednesday, June 16, 2010 11:50 AM
> To: Kathy Hadley
> Cc: xen-devel@lists.xensource.com; Keir Fraser
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> On Wed, Jun 16, 2010 at 4:04 PM, Kathy Hadley
> <Kathy.Hadley@dornerworks.com> wrote:
> >
> +/*********************************************************************
> *****
> > + * Global
> data                                                            *
> > +
> >
> ***********************************************************************
> ***/
> > +static arinc653_sched_private_t arinc653_schedule;
> [snip]
> > +    /* Allocate memory for ARINC 653 scheduler data structure */
> > +    prv = &arinc653_schedule;
> 
> You didn't actually allocate memory, you just used the static
> structure.  The point of cpupools is to allow multiple instances of a
> scheduler to coexist -- if people create two pools, both using the
> ARINC scheduler, there will be problems with this.  Is there any
> reason not to actually call xmalloc() (as is done in
> sched_credit.c:csched_init())?  (Perhaps this is a missed FIXME or a
> merge-o?)
> 
> Some of the notices in headers seems a little excessive; if
> sched_arinc653.c credits Dornerworks, surely people can infer who
> added the control structure in xen/include/public/sysctl.h, and added
> a link to it in scheduler.c?
> 
> Not NACK-worthy, but: In struct arin..._sched_private_s, the element
> "arinc653_schedule" should probably be named something a bit more
> descriptive.  Similarly, having arinc653 in ..._major_frame seems a
> bit redundant, and inconsistent with naming for the other elements.
> 
> Looks fine to me otherwise.  (I haven't reviewed the algorithm itself.)
> 
>  -George

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:00   ` Kathy Hadley
@ 2010-06-16 16:13     ` Keir Fraser
  2010-06-16 16:14     ` George Dunlap
  1 sibling, 0 replies; 35+ messages in thread
From: Keir Fraser @ 2010-06-16 16:13 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: xen-devel

On 16/06/2010 17:00, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> George,
>   I actually tried the xmalloc() method first.  I found that when the
> .adjust_global function was called, the address of the "ops" data structure
> passed to that function was different from the address of the "ops" data
> structure when the .init function was called.  I wanted to use .adjust_global
> to modify the data structure that was created when the .init function was
> called, but I could not figure out a way to get the address of the second data
> structure.  Suggestions?

You should modify the structure you are passed -- that is ops and your
private structure as pointed at via ops->sched_data. The latter should
always point at a private structure you previously initialised via your
.init hook.

 -- Keir

>   I can make the modifications you suggest for the other items.  Thanks for
> the comments.
> 
>   Regards,
> Kathy Hadley
> DornerWorks, Ltd.
> 
>> -----Original Message-----
>> From: dunlapg@gmail.com [mailto:dunlapg@gmail.com] On Behalf Of George
>> Dunlap
>> Sent: Wednesday, June 16, 2010 11:50 AM
>> To: Kathy Hadley
>> Cc: xen-devel@lists.xensource.com; Keir Fraser
>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
>> to add support for CPU pools)
>> 
>> On Wed, Jun 16, 2010 at 4:04 PM, Kathy Hadley
>> <Kathy.Hadley@dornerworks.com> wrote:
>>> 
>> +/*********************************************************************
>> *****
>>> + * Global
>> data                                                            *
>>> +
>>> 
>> ***********************************************************************
>> ***/
>>> +static arinc653_sched_private_t arinc653_schedule;
>> [snip]
>>> +    /* Allocate memory for ARINC 653 scheduler data structure */
>>> +    prv = &arinc653_schedule;
>> 
>> You didn't actually allocate memory, you just used the static
>> structure.  The point of cpupools is to allow multiple instances of a
>> scheduler to coexist -- if people create two pools, both using the
>> ARINC scheduler, there will be problems with this.  Is there any
>> reason not to actually call xmalloc() (as is done in
>> sched_credit.c:csched_init())?  (Perhaps this is a missed FIXME or a
>> merge-o?)
>> 
>> Some of the notices in headers seems a little excessive; if
>> sched_arinc653.c credits Dornerworks, surely people can infer who
>> added the control structure in xen/include/public/sysctl.h, and added
>> a link to it in scheduler.c?
>> 
>> Not NACK-worthy, but: In struct arin..._sched_private_s, the element
>> "arinc653_schedule" should probably be named something a bit more
>> descriptive.  Similarly, having arinc653 in ..._major_frame seems a
>> bit redundant, and inconsistent with naming for the other elements.
>> 
>> Looks fine to me otherwise.  (I haven't reviewed the algorithm itself.)
>> 
>>  -George

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:00   ` Kathy Hadley
  2010-06-16 16:13     ` Keir Fraser
@ 2010-06-16 16:14     ` George Dunlap
  2010-06-16 16:20       ` Keir Fraser
  1 sibling, 1 reply; 35+ messages in thread
From: George Dunlap @ 2010-06-16 16:14 UTC (permalink / raw)
  To: Kathy Hadley; +Cc: xen-devel, Juergen Gross, Keir Fraser

On Wed, Jun 16, 2010 at 5:00 PM, Kathy Hadley
<Kathy.Hadley@dornerworks.com> wrote:
> George,
>  I actually tried the xmalloc() method first.  I found that when the .adjust_global function was called, the address of the "ops" data structure passed to that function was different from the address of the "ops" data structure when the .init function was called.  I wanted to use .adjust_global to modify the data structure that was created when the .init function was called, but I could not figure out a way to get the address of the second data structure.  Suggestions?

It's been a month or two since I trawled through the cpupools code;
but I seem to recall that .init is called twice -- once for the
"default pool" (cpupool0), and once for an actually in-use pool.
(Juergen, can you correct me if I'm wrong?)  Is it possible that
that's the difference in the pointers that you're seeing?

 -George

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:14     ` George Dunlap
@ 2010-06-16 16:20       ` Keir Fraser
  2010-06-16 16:25         ` Kathy Hadley
                           ` (2 more replies)
  0 siblings, 3 replies; 35+ messages in thread
From: Keir Fraser @ 2010-06-16 16:20 UTC (permalink / raw)
  To: George Dunlap, Kathy Hadley; +Cc: xen-devel, Gross, Juergen

On 16/06/2010 17:14, "George Dunlap" <George.Dunlap@eu.citrix.com> wrote:

>>  I actually tried the xmalloc() method first.  I found that when the
>> .adjust_global function was called, the address of the "ops" data structure
>> passed to that function was different from the address of the "ops" data
>> structure when the .init function was called.  I wanted to use .adjust_global
>> to modify the data structure that was created when the .init function was
>> called, but I could not figure out a way to get the address of the second
>> data structure.  Suggestions?
> 
> It's been a month or two since I trawled through the cpupools code;
> but I seem to recall that .init is called twice -- once for the
> "default pool" (cpupool0), and once for an actually in-use pool.
> (Juergen, can you correct me if I'm wrong?)  Is it possible that
> that's the difference in the pointers that you're seeing?

Oh yes, that was the old behaviour. I took a hatchet to the
scheduler/cpupool interfaces a few weeks ago and now we should only
initialise the scheduler once, unless extra cpupools are manually created.
The fact that Kathy is seeing two different ops structures probably
indicates that her xen-unstable tree is very out of date. Which may also
mean that the patch will not apply to current tip.

 -- Keir

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:20       ` Keir Fraser
@ 2010-06-16 16:25         ` Kathy Hadley
  2010-06-16 16:31           ` Keir Fraser
  2010-06-16 16:25         ` George Dunlap
  2010-06-17  5:02         ` Juergen Gross
  2 siblings, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-16 16:25 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel, Juergen Gross

Keir,
  I only saw the .init function called once.  I downloaded xen-unstable on May 27.  Were your updates after that?

  Thanks,
Kathy Hadley


> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Wednesday, June 16, 2010 12:20 PM
> To: George Dunlap; Kathy Hadley
> Cc: xen-devel@lists.xensource.com; Juergen Gross
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> On 16/06/2010 17:14, "George Dunlap" <George.Dunlap@eu.citrix.com>
> wrote:
> 
> >>  I actually tried the xmalloc() method first.  I found that when the
> >> .adjust_global function was called, the address of the "ops" data
> structure
> >> passed to that function was different from the address of the "ops"
> data
> >> structure when the .init function was called.  I wanted to use
> .adjust_global
> >> to modify the data structure that was created when the .init
> function was
> >> called, but I could not figure out a way to get the address of the
> second
> >> data structure.  Suggestions?
> >
> > It's been a month or two since I trawled through the cpupools code;
> > but I seem to recall that .init is called twice -- once for the
> > "default pool" (cpupool0), and once for an actually in-use pool.
> > (Juergen, can you correct me if I'm wrong?)  Is it possible that
> > that's the difference in the pointers that you're seeing?
> 
> Oh yes, that was the old behaviour. I took a hatchet to the
> scheduler/cpupool interfaces a few weeks ago and now we should only
> initialise the scheduler once, unless extra cpupools are manually
> created.
> The fact that Kathy is seeing two different ops structures probably
> indicates that her xen-unstable tree is very out of date. Which may
> also
> mean that the patch will not apply to current tip.
> 
>  -- Keir
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:20       ` Keir Fraser
  2010-06-16 16:25         ` Kathy Hadley
@ 2010-06-16 16:25         ` George Dunlap
  2010-06-17  5:02         ` Juergen Gross
  2 siblings, 0 replies; 35+ messages in thread
From: George Dunlap @ 2010-06-16 16:25 UTC (permalink / raw)
  To: Keir Fraser; +Cc: Kathy Hadley, Gross, xen-devel

It applied pretty cleanly for me.  I didn't try to build it though. :-)

 -George

On Wed, Jun 16, 2010 at 5:20 PM, Keir Fraser <keir.fraser@eu.citrix.com> wrote:
> On 16/06/2010 17:14, "George Dunlap" <George.Dunlap@eu.citrix.com> wrote:
>
>>>  I actually tried the xmalloc() method first.  I found that when the
>>> .adjust_global function was called, the address of the "ops" data structure
>>> passed to that function was different from the address of the "ops" data
>>> structure when the .init function was called.  I wanted to use .adjust_global
>>> to modify the data structure that was created when the .init function was
>>> called, but I could not figure out a way to get the address of the second
>>> data structure.  Suggestions?
>>
>> It's been a month or two since I trawled through the cpupools code;
>> but I seem to recall that .init is called twice -- once for the
>> "default pool" (cpupool0), and once for an actually in-use pool.
>> (Juergen, can you correct me if I'm wrong?)  Is it possible that
>> that's the difference in the pointers that you're seeing?
>
> Oh yes, that was the old behaviour. I took a hatchet to the
> scheduler/cpupool interfaces a few weeks ago and now we should only
> initialise the scheduler once, unless extra cpupools are manually created.
> The fact that Kathy is seeing two different ops structures probably
> indicates that her xen-unstable tree is very out of date. Which may also
> mean that the patch will not apply to current tip.
>
>  -- Keir
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
>

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:25         ` Kathy Hadley
@ 2010-06-16 16:31           ` Keir Fraser
  2010-06-16 16:40             ` Kathy Hadley
  0 siblings, 1 reply; 35+ messages in thread
From: Keir Fraser @ 2010-06-16 16:31 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: xen-devel, Gross, Juergen

On 16/06/2010 17:25, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> Keir,
>   I only saw the .init function called once.  I downloaded xen-unstable on May
> 27.  Were your updates after that?

My changes were done before May 27, and that ties in with you seeing .init
called only once. That being the case, you should not see multiple different
ops structures ('struct scheduler' instances). The only ops struct that
should exist in the system in this case should be the one statically defined
near the top of common/schedule.c.

 -- Keir

>   Thanks,
> Kathy Hadley
> 
> 
>> -----Original Message-----
>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>> Sent: Wednesday, June 16, 2010 12:20 PM
>> To: George Dunlap; Kathy Hadley
>> Cc: xen-devel@lists.xensource.com; Juergen Gross
>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
>> to add support for CPU pools)
>> 
>> On 16/06/2010 17:14, "George Dunlap" <George.Dunlap@eu.citrix.com>
>> wrote:
>> 
>>>>  I actually tried the xmalloc() method first.  I found that when the
>>>> .adjust_global function was called, the address of the "ops" data
>> structure
>>>> passed to that function was different from the address of the "ops"
>> data
>>>> structure when the .init function was called.  I wanted to use
>> .adjust_global
>>>> to modify the data structure that was created when the .init
>> function was
>>>> called, but I could not figure out a way to get the address of the
>> second
>>>> data structure.  Suggestions?
>>> 
>>> It's been a month or two since I trawled through the cpupools code;
>>> but I seem to recall that .init is called twice -- once for the
>>> "default pool" (cpupool0), and once for an actually in-use pool.
>>> (Juergen, can you correct me if I'm wrong?)  Is it possible that
>>> that's the difference in the pointers that you're seeing?
>> 
>> Oh yes, that was the old behaviour. I took a hatchet to the
>> scheduler/cpupool interfaces a few weeks ago and now we should only
>> initialise the scheduler once, unless extra cpupools are manually
>> created.
>> The fact that Kathy is seeing two different ops structures probably
>> indicates that her xen-unstable tree is very out of date. Which may
>> also
>> mean that the patch will not apply to current tip.
>> 
>>  -- Keir
>> 
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:31           ` Keir Fraser
@ 2010-06-16 16:40             ` Kathy Hadley
  2010-06-16 16:49               ` Keir Fraser
  0 siblings, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-16 16:40 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel, Juergen Gross

Keir, George, et. al.,
  I definitely saw two "ops" values.  When the .init function was called, ops = 0xFF213DC0; I then used xmalloc() to allocate memory for the scheduler data structure and set ops->sched_data equal to the address of that memory block (similar to what is done in csched_init in sched_credit.c).  When the .adjust_global function was called, ops = 0xFF2112D0 and ops->sched_data was not equal to the address of the memory block allocated in the .init function (it was equal to the value set when "sched_arinc653_def" was declared).

  Regards,
Kathy

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Wednesday, June 16, 2010 12:32 PM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com; Juergen Gross
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> On 16/06/2010 17:25, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > Keir,
> >   I only saw the .init function called once.  I downloaded xen-
> unstable on May
> > 27.  Were your updates after that?
> 
> My changes were done before May 27, and that ties in with you seeing
> .init
> called only once. That being the case, you should not see multiple
> different
> ops structures ('struct scheduler' instances). The only ops struct that
> should exist in the system in this case should be the one statically
> defined
> near the top of common/schedule.c.
> 
>  -- Keir
> 
> >   Thanks,
> > Kathy Hadley
> >
> >
> >> -----Original Message-----
> >> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >> Sent: Wednesday, June 16, 2010 12:20 PM
> >> To: George Dunlap; Kathy Hadley
> >> Cc: xen-devel@lists.xensource.com; Juergen Gross
> >> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> >> to add support for CPU pools)
> >>
> >> On 16/06/2010 17:14, "George Dunlap" <George.Dunlap@eu.citrix.com>
> >> wrote:
> >>
> >>>>  I actually tried the xmalloc() method first.  I found that when
> the
> >>>> .adjust_global function was called, the address of the "ops" data
> >> structure
> >>>> passed to that function was different from the address of the
> "ops"
> >> data
> >>>> structure when the .init function was called.  I wanted to use
> >> .adjust_global
> >>>> to modify the data structure that was created when the .init
> >> function was
> >>>> called, but I could not figure out a way to get the address of the
> >> second
> >>>> data structure.  Suggestions?
> >>>
> >>> It's been a month or two since I trawled through the cpupools code;
> >>> but I seem to recall that .init is called twice -- once for the
> >>> "default pool" (cpupool0), and once for an actually in-use pool.
> >>> (Juergen, can you correct me if I'm wrong?)  Is it possible that
> >>> that's the difference in the pointers that you're seeing?
> >>
> >> Oh yes, that was the old behaviour. I took a hatchet to the
> >> scheduler/cpupool interfaces a few weeks ago and now we should only
> >> initialise the scheduler once, unless extra cpupools are manually
> >> created.
> >> The fact that Kathy is seeing two different ops structures probably
> >> indicates that her xen-unstable tree is very out of date. Which may
> >> also
> >> mean that the patch will not apply to current tip.
> >>
> >>  -- Keir
> >>
> >
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:40             ` Kathy Hadley
@ 2010-06-16 16:49               ` Keir Fraser
  2010-06-16 18:03                 ` Kathy Hadley
  0 siblings, 1 reply; 35+ messages in thread
From: Keir Fraser @ 2010-06-16 16:49 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: xen-devel, Gross, Juergen

Oh, I see. Well, the cause is that the
common/schedule.c:sched_adjust_global() is broken. But, what should it
actually do, given that multiple schedulers of same or differing types may
exist in a system now? Perhaps the sysctl should take a cpupool id, to
uniquely identify the scheduler instance to be adjusted?

 -- Keir

On 16/06/2010 17:40, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> Keir, George, et. al.,
>   I definitely saw two "ops" values.  When the .init function was called, ops
> = 0xFF213DC0; I then used xmalloc() to allocate memory for the scheduler data
> structure and set ops->sched_data equal to the address of that memory block
> (similar to what is done in csched_init in sched_credit.c).  When the
> .adjust_global function was called, ops = 0xFF2112D0 and ops->sched_data was
> not equal to the address of the memory block allocated in the .init function
> (it was equal to the value set when "sched_arinc653_def" was declared).
> 
>   Regards,
> Kathy
> 
>> -----Original Message-----
>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>> Sent: Wednesday, June 16, 2010 12:32 PM
>> To: Kathy Hadley; George Dunlap
>> Cc: xen-devel@lists.xensource.com; Juergen Gross
>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
>> to add support for CPU pools)
>> 
>> On 16/06/2010 17:25, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>> wrote:
>> 
>>> Keir,
>>>   I only saw the .init function called once.  I downloaded xen-
>> unstable on May
>>> 27.  Were your updates after that?
>> 
>> My changes were done before May 27, and that ties in with you seeing
>> .init
>> called only once. That being the case, you should not see multiple
>> different
>> ops structures ('struct scheduler' instances). The only ops struct that
>> should exist in the system in this case should be the one statically
>> defined
>> near the top of common/schedule.c.
>> 
>>  -- Keir
>> 
>>>   Thanks,
>>> Kathy Hadley
>>> 
>>> 
>>>> -----Original Message-----
>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>>>> Sent: Wednesday, June 16, 2010 12:20 PM
>>>> To: George Dunlap; Kathy Hadley
>>>> Cc: xen-devel@lists.xensource.com; Juergen Gross
>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
>> (updated
>>>> to add support for CPU pools)
>>>> 
>>>> On 16/06/2010 17:14, "George Dunlap" <George.Dunlap@eu.citrix.com>
>>>> wrote:
>>>> 
>>>>>>  I actually tried the xmalloc() method first.  I found that when
>> the
>>>>>> .adjust_global function was called, the address of the "ops" data
>>>> structure
>>>>>> passed to that function was different from the address of the
>> "ops"
>>>> data
>>>>>> structure when the .init function was called.  I wanted to use
>>>> .adjust_global
>>>>>> to modify the data structure that was created when the .init
>>>> function was
>>>>>> called, but I could not figure out a way to get the address of the
>>>> second
>>>>>> data structure.  Suggestions?
>>>>> 
>>>>> It's been a month or two since I trawled through the cpupools code;
>>>>> but I seem to recall that .init is called twice -- once for the
>>>>> "default pool" (cpupool0), and once for an actually in-use pool.
>>>>> (Juergen, can you correct me if I'm wrong?)  Is it possible that
>>>>> that's the difference in the pointers that you're seeing?
>>>> 
>>>> Oh yes, that was the old behaviour. I took a hatchet to the
>>>> scheduler/cpupool interfaces a few weeks ago and now we should only
>>>> initialise the scheduler once, unless extra cpupools are manually
>>>> created.
>>>> The fact that Kathy is seeing two different ops structures probably
>>>> indicates that her xen-unstable tree is very out of date. Which may
>>>> also
>>>> mean that the patch will not apply to current tip.
>>>> 
>>>>  -- Keir
>>>> 
>>> 
>> 
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:49               ` Keir Fraser
@ 2010-06-16 18:03                 ` Kathy Hadley
  2010-06-17  7:04                   ` Keir Fraser
  0 siblings, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-16 18:03 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel, Juergen Gross

That sounds reasonable to me.

Kathy

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Wednesday, June 16, 2010 12:50 PM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com; Juergen Gross
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> Oh, I see. Well, the cause is that the
> common/schedule.c:sched_adjust_global() is broken. But, what should it
> actually do, given that multiple schedulers of same or differing types
> may
> exist in a system now? Perhaps the sysctl should take a cpupool id, to
> uniquely identify the scheduler instance to be adjusted?
> 
>  -- Keir
> 
> On 16/06/2010 17:40, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > Keir, George, et. al.,
> >   I definitely saw two "ops" values.  When the .init function was
> called, ops
> > = 0xFF213DC0; I then used xmalloc() to allocate memory for the
> scheduler data
> > structure and set ops->sched_data equal to the address of that memory
> block
> > (similar to what is done in csched_init in sched_credit.c).  When the
> > .adjust_global function was called, ops = 0xFF2112D0 and ops-
> >sched_data was
> > not equal to the address of the memory block allocated in the .init
> function
> > (it was equal to the value set when "sched_arinc653_def" was
> declared).
> >
> >   Regards,
> > Kathy
> >
> >> -----Original Message-----
> >> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >> Sent: Wednesday, June 16, 2010 12:32 PM
> >> To: Kathy Hadley; George Dunlap
> >> Cc: xen-devel@lists.xensource.com; Juergen Gross
> >> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> >> to add support for CPU pools)
> >>
> >> On 16/06/2010 17:25, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> >> wrote:
> >>
> >>> Keir,
> >>>   I only saw the .init function called once.  I downloaded xen-
> >> unstable on May
> >>> 27.  Were your updates after that?
> >>
> >> My changes were done before May 27, and that ties in with you seeing
> >> .init
> >> called only once. That being the case, you should not see multiple
> >> different
> >> ops structures ('struct scheduler' instances). The only ops struct
> that
> >> should exist in the system in this case should be the one statically
> >> defined
> >> near the top of common/schedule.c.
> >>
> >>  -- Keir
> >>
> >>>   Thanks,
> >>> Kathy Hadley
> >>>
> >>>
> >>>> -----Original Message-----
> >>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >>>> Sent: Wednesday, June 16, 2010 12:20 PM
> >>>> To: George Dunlap; Kathy Hadley
> >>>> Cc: xen-devel@lists.xensource.com; Juergen Gross
> >>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> >> (updated
> >>>> to add support for CPU pools)
> >>>>
> >>>> On 16/06/2010 17:14, "George Dunlap" <George.Dunlap@eu.citrix.com>
> >>>> wrote:
> >>>>
> >>>>>>  I actually tried the xmalloc() method first.  I found that when
> >> the
> >>>>>> .adjust_global function was called, the address of the "ops"
> data
> >>>> structure
> >>>>>> passed to that function was different from the address of the
> >> "ops"
> >>>> data
> >>>>>> structure when the .init function was called.  I wanted to use
> >>>> .adjust_global
> >>>>>> to modify the data structure that was created when the .init
> >>>> function was
> >>>>>> called, but I could not figure out a way to get the address of
> the
> >>>> second
> >>>>>> data structure.  Suggestions?
> >>>>>
> >>>>> It's been a month or two since I trawled through the cpupools
> code;
> >>>>> but I seem to recall that .init is called twice -- once for the
> >>>>> "default pool" (cpupool0), and once for an actually in-use pool.
> >>>>> (Juergen, can you correct me if I'm wrong?)  Is it possible that
> >>>>> that's the difference in the pointers that you're seeing?
> >>>>
> >>>> Oh yes, that was the old behaviour. I took a hatchet to the
> >>>> scheduler/cpupool interfaces a few weeks ago and now we should
> only
> >>>> initialise the scheduler once, unless extra cpupools are manually
> >>>> created.
> >>>> The fact that Kathy is seeing two different ops structures
> probably
> >>>> indicates that her xen-unstable tree is very out of date. Which
> may
> >>>> also
> >>>> mean that the patch will not apply to current tip.
> >>>>
> >>>>  -- Keir
> >>>>
> >>>
> >>
> >
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 16:20       ` Keir Fraser
  2010-06-16 16:25         ` Kathy Hadley
  2010-06-16 16:25         ` George Dunlap
@ 2010-06-17  5:02         ` Juergen Gross
  2010-06-17  6:09           ` Keir Fraser
  2 siblings, 1 reply; 35+ messages in thread
From: Juergen Gross @ 2010-06-17  5:02 UTC (permalink / raw)
  To: Keir Fraser; +Cc: George Dunlap, Kathy Hadley, xen-devel

On 06/16/2010 06:20 PM, Keir Fraser wrote:
> On 16/06/2010 17:14, "George Dunlap"<George.Dunlap@eu.citrix.com>  wrote:
>
>>>   I actually tried the xmalloc() method first.  I found that when the
>>> .adjust_global function was called, the address of the "ops" data structure
>>> passed to that function was different from the address of the "ops" data
>>> structure when the .init function was called.  I wanted to use .adjust_global
>>> to modify the data structure that was created when the .init function was
>>> called, but I could not figure out a way to get the address of the second
>>> data structure.  Suggestions?
>>
>> It's been a month or two since I trawled through the cpupools code;
>> but I seem to recall that .init is called twice -- once for the
>> "default pool" (cpupool0), and once for an actually in-use pool.
>> (Juergen, can you correct me if I'm wrong?)  Is it possible that
>> that's the difference in the pointers that you're seeing?
>
> Oh yes, that was the old behaviour. I took a hatchet to the
> scheduler/cpupool interfaces a few weeks ago and now we should only
> initialise the scheduler once, unless extra cpupools are manually created.

Keir, what do you think about creating an "idle-scheduler" for the cpus not in
any cpupool? It would only schedule the idle vcpu and could be VERY minimal.
This could reduce the complexity of moving cpus from and to cpupools.

I could try to setup a patch if you support this idea (I'm asking for your
opinion before starting this, as I'm rather busy with other tasks).


Juergen

P.S.: George, you still seem to use my old mail address which isn't valid any
       more...

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-17  5:02         ` Juergen Gross
@ 2010-06-17  6:09           ` Keir Fraser
  0 siblings, 0 replies; 35+ messages in thread
From: Keir Fraser @ 2010-06-17  6:09 UTC (permalink / raw)
  To: Juergen Gross; +Cc: George Dunlap, Kathy Hadley, xen-devel

On 17/06/2010 06:02, "Juergen Gross" <juergen.gross@ts.fujitsu.com> wrote:

>> Oh yes, that was the old behaviour. I took a hatchet to the
>> scheduler/cpupool interfaces a few weeks ago and now we should only
>> initialise the scheduler once, unless extra cpupools are manually created.
> 
> Keir, what do you think about creating an "idle-scheduler" for the cpus not in
> any cpupool? It would only schedule the idle vcpu and could be VERY minimal.
> This could reduce the complexity of moving cpus from and to cpupools.
> 
> I could try to setup a patch if you support this idea (I'm asking for your
> opinion before starting this, as I'm rather busy with other tasks).

What we have now is fine (which is, basically, cpupool0 and the 'no-cpupool'
schedulers are one and the same thing). I don't want yet another scheduler
thanks. ;-)

 -- Keir

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-16 18:03                 ` Kathy Hadley
@ 2010-06-17  7:04                   ` Keir Fraser
  2010-06-17 18:16                     ` Kathy Hadley
                                       ` (2 more replies)
  0 siblings, 3 replies; 35+ messages in thread
From: Keir Fraser @ 2010-06-17  7:04 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: xen-devel, Gross, Juergen

On 16/06/2010 19:03, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> That sounds reasonable to me.

Fixed as of changeset 21626, in the staging tree
(http://xenbits.xensource.com/staging/xen-unstable.hg).

 K.

> Kathy
> 
>> -----Original Message-----
>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>> Sent: Wednesday, June 16, 2010 12:50 PM
>> To: Kathy Hadley; George Dunlap
>> Cc: xen-devel@lists.xensource.com; Juergen Gross
>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
>> to add support for CPU pools)
>> 
>> Oh, I see. Well, the cause is that the
>> common/schedule.c:sched_adjust_global() is broken. But, what should it
>> actually do, given that multiple schedulers of same or differing types
>> may
>> exist in a system now? Perhaps the sysctl should take a cpupool id, to
>> uniquely identify the scheduler instance to be adjusted?
>> 
>>  -- Keir
>> 
>> On 16/06/2010 17:40, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>> wrote:
>> 
>>> Keir, George, et. al.,
>>>   I definitely saw two "ops" values.  When the .init function was
>> called, ops
>>> = 0xFF213DC0; I then used xmalloc() to allocate memory for the
>> scheduler data
>>> structure and set ops->sched_data equal to the address of that memory
>> block
>>> (similar to what is done in csched_init in sched_credit.c).  When the
>>> .adjust_global function was called, ops = 0xFF2112D0 and ops-
>>> sched_data was
>>> not equal to the address of the memory block allocated in the .init
>> function
>>> (it was equal to the value set when "sched_arinc653_def" was
>> declared).
>>> 
>>>   Regards,
>>> Kathy
>>> 
>>>> -----Original Message-----
>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>>>> Sent: Wednesday, June 16, 2010 12:32 PM
>>>> To: Kathy Hadley; George Dunlap
>>>> Cc: xen-devel@lists.xensource.com; Juergen Gross
>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
>> (updated
>>>> to add support for CPU pools)
>>>> 
>>>> On 16/06/2010 17:25, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>>>> wrote:
>>>> 
>>>>> Keir,
>>>>>   I only saw the .init function called once.  I downloaded xen-
>>>> unstable on May
>>>>> 27.  Were your updates after that?
>>>> 
>>>> My changes were done before May 27, and that ties in with you seeing
>>>> .init
>>>> called only once. That being the case, you should not see multiple
>>>> different
>>>> ops structures ('struct scheduler' instances). The only ops struct
>> that
>>>> should exist in the system in this case should be the one statically
>>>> defined
>>>> near the top of common/schedule.c.
>>>> 
>>>>  -- Keir
>>>> 
>>>>>   Thanks,
>>>>> Kathy Hadley
>>>>> 
>>>>> 
>>>>>> -----Original Message-----
>>>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>>>>>> Sent: Wednesday, June 16, 2010 12:20 PM
>>>>>> To: George Dunlap; Kathy Hadley
>>>>>> Cc: xen-devel@lists.xensource.com; Juergen Gross
>>>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
>>>> (updated
>>>>>> to add support for CPU pools)
>>>>>> 
>>>>>> On 16/06/2010 17:14, "George Dunlap" <George.Dunlap@eu.citrix.com>
>>>>>> wrote:
>>>>>> 
>>>>>>>>  I actually tried the xmalloc() method first.  I found that when
>>>> the
>>>>>>>> .adjust_global function was called, the address of the "ops"
>> data
>>>>>> structure
>>>>>>>> passed to that function was different from the address of the
>>>> "ops"
>>>>>> data
>>>>>>>> structure when the .init function was called.  I wanted to use
>>>>>> .adjust_global
>>>>>>>> to modify the data structure that was created when the .init
>>>>>> function was
>>>>>>>> called, but I could not figure out a way to get the address of
>> the
>>>>>> second
>>>>>>>> data structure.  Suggestions?
>>>>>>> 
>>>>>>> It's been a month or two since I trawled through the cpupools
>> code;
>>>>>>> but I seem to recall that .init is called twice -- once for the
>>>>>>> "default pool" (cpupool0), and once for an actually in-use pool.
>>>>>>> (Juergen, can you correct me if I'm wrong?)  Is it possible that
>>>>>>> that's the difference in the pointers that you're seeing?
>>>>>> 
>>>>>> Oh yes, that was the old behaviour. I took a hatchet to the
>>>>>> scheduler/cpupool interfaces a few weeks ago and now we should
>> only
>>>>>> initialise the scheduler once, unless extra cpupools are manually
>>>>>> created.
>>>>>> The fact that Kathy is seeing two different ops structures
>> probably
>>>>>> indicates that her xen-unstable tree is very out of date. Which
>> may
>>>>>> also
>>>>>> mean that the patch will not apply to current tip.
>>>>>> 
>>>>>>  -- Keir
>>>>>> 
>>>>> 
>>>> 
>>> 
>> 
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-17  7:04                   ` Keir Fraser
@ 2010-06-17 18:16                     ` Kathy Hadley
  2010-06-17 18:26                       ` Keir Fraser
  2010-06-18 17:35                     ` Kathy Hadley
  2010-06-22 19:10                     ` Kathy Hadley
  2 siblings, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-17 18:16 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel

I downloaded the xen-unstable staging tree, through changeset 21632.
After building ("make xen", "make install-xen", "make tools", "make
install-tools", "make stubdom" and "make install-stubdom") and
restarting, xend is not running (attempting to execute "xm list" returns
"Error: Unable to connect to xend: No such file or directory. Is xend
running?").

/var/log/xend.log contains the following:

[2010-06-17 13:58:50 2303] ERROR (SrvDaemon:349) Exception starting xend
((2, 'No such file or directory'))
Traceback (most recent call last):
  File "/usr/lib/python2.6/dist-packages/xen/xend/server/SrvDaemon.py",
line 341, in run
    servers = SrvServer.create()
  File "/usr/lib/python2.6/dist-packages/xen/xend/server/SrvServer.py",
line 258, in create
    root.putChild('xend', SrvRoot())
  File "/usr/lib/python2.6/dist-packages/xen/xend/server/SrvRoot.py",
line 40, in __init__
    self.get(name)
  File "/usr/lib/python2.6/dist-packages/xen/web/SrvDir.py", line 84, in
get
    val = val.getobj()
  File "/usr/lib/python2.6/dist-packages/xen/web/SrvDir.py", line 52, in
getobj
    self.obj = klassobj()
  File "/usr/lib/python2.6/dist-packages/xen/xend/server/SrvNode.py",
line 30, in __init__
    self.xn = XendNode.instance()
  File "/usr/lib/python2.6/dist-packages/xen/xend/XendNode.py", line
1176, in instance
    inst = XendNode()
  File "/usr/lib/python2.6/dist-packages/xen/xend/XendNode.py", line
163, in __init__
    self._init_cpu_pools()
  File "/usr/lib/python2.6/dist-packages/xen/xend/XendNode.py", line
377, in _init_cpu_pools
    XendCPUPool.recreate_active_pools()
  File "/usr/lib/python2.6/dist-packages/xen/xend/XendCPUPool.py", line
754, in recreate_active_pools
    uuid = xstransact.Read(path, 'uuid')
  File
"/usr/lib/python2.6/dist-packages/xen/xend/xenstore/xstransact.py", line
307, in Read
    return complete(path, lambda t: t.read(*args))
  File
"/usr/lib/python2.6/dist-packages/xen/xend/xenstore/xstransact.py", line
361, in complete
    t = xstransact(path)
  File
"/usr/lib/python2.6/dist-packages/xen/xend/xenstore/xstransact.py", line
29, in __init__
    self.transaction = xshandle().transaction_start()
  File "/usr/lib/python2.6/dist-packages/xen/xend/xenstore/xsutil.py",
line 18, in xshandle
    xs_handle = xen.lowlevel.xs.xs()
Error: (2, 'No such file or directory')
[2010-06-17 13:58:50 2302] INFO (SrvDaemon:220) Xend exited with status
1.

Any suggestions?

Thank you,
  Kathy Hadley
  DornerWorks, Ltd.

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Thursday, June 17, 2010 3:04 AM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com; Juergen Gross
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> On 16/06/2010 19:03, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > That sounds reasonable to me.
> 
> Fixed as of changeset 21626, in the staging tree
> (http://xenbits.xensource.com/staging/xen-unstable.hg).
> 
>  K.
> 
> > Kathy 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-17 18:16                     ` Kathy Hadley
@ 2010-06-17 18:26                       ` Keir Fraser
  0 siblings, 0 replies; 35+ messages in thread
From: Keir Fraser @ 2010-06-17 18:26 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: xen-devel

On 17/06/2010 19:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> line 18, in xshandle
>     xs_handle = xen.lowlevel.xs.xs()
> Error: (2, 'No such file or directory')
> [2010-06-17 13:58:50 2302] INFO (SrvDaemon:220) Xend exited with status
> 1.
> 
> Any suggestions?

Xend no longer automatically starts xenstored for you (that was moved to
example init scripts which you may not have installed). So you need to
'xenstored; xenconsoled; xend start'.

 -- Keir

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-17  7:04                   ` Keir Fraser
  2010-06-17 18:16                     ` Kathy Hadley
@ 2010-06-18 17:35                     ` Kathy Hadley
  2010-06-18 17:49                       ` Keir Fraser
  2010-06-19 11:14                       ` George Dunlap
  2010-06-22 19:10                     ` Kathy Hadley
  2 siblings, 2 replies; 35+ messages in thread
From: Kathy Hadley @ 2010-06-18 17:35 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel

I migrated to c/s 21632 in the staging tree.  I see that xen_sysctl_scheduler_op in sysctl.h was updated in c/s 21626 to add cpupool_id to the structure.  I call the following function from an application executing in Dom0 to adjust the ARINC 653 schedule:

xc_sched_arinc653_sched_set(
    xc_interface *xch,
    xen_sysctl_sched_arinc653_schedule_t * sched)
{
    DECLARE_SYSCTL;

    sysctl.cmd = XEN_SYSCTL_scheduler_op;
    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
    set_xen_guest_handle(sysctl.u.scheduler_op.u.arinc653.schedule, sched);

    return do_sysctl(xch, &sysctl);
}

In this function, which executes in Dom0, how do I determine the cpupool_id that I need to set in the sysctl data structure?

Thank you,
  Kathy Hadley

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Thursday, June 17, 2010 3:04 AM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com; Juergen Gross
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> On 16/06/2010 19:03, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > That sounds reasonable to me.
> 
> Fixed as of changeset 21626, in the staging tree
> (http://xenbits.xensource.com/staging/xen-unstable.hg).
> 
>  K.
> 
> > Kathy
> >
> >> -----Original Message-----
> >> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >> Sent: Wednesday, June 16, 2010 12:50 PM
> >> To: Kathy Hadley; George Dunlap
> >> Cc: xen-devel@lists.xensource.com; Juergen Gross
> >> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> >> to add support for CPU pools)
> >>
> >> Oh, I see. Well, the cause is that the
> >> common/schedule.c:sched_adjust_global() is broken. But, what should
> it
> >> actually do, given that multiple schedulers of same or differing
> types
> >> may
> >> exist in a system now? Perhaps the sysctl should take a cpupool id,
> to
> >> uniquely identify the scheduler instance to be adjusted?
> >>
> >>  -- Keir
> >>
> >> On 16/06/2010 17:40, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> >> wrote:
> >>
> >>> Keir, George, et. al.,
> >>>   I definitely saw two "ops" values.  When the .init function was
> >> called, ops
> >>> = 0xFF213DC0; I then used xmalloc() to allocate memory for the
> >> scheduler data
> >>> structure and set ops->sched_data equal to the address of that
> memory
> >> block
> >>> (similar to what is done in csched_init in sched_credit.c).  When
> the
> >>> .adjust_global function was called, ops = 0xFF2112D0 and ops-
> >>> sched_data was
> >>> not equal to the address of the memory block allocated in the .init
> >> function
> >>> (it was equal to the value set when "sched_arinc653_def" was
> >> declared).
> >>>
> >>>   Regards,
> >>> Kathy
> >>>
> >>>> -----Original Message-----
> >>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >>>> Sent: Wednesday, June 16, 2010 12:32 PM
> >>>> To: Kathy Hadley; George Dunlap
> >>>> Cc: xen-devel@lists.xensource.com; Juergen Gross
> >>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> >> (updated
> >>>> to add support for CPU pools)
> >>>>
> >>>> On 16/06/2010 17:25, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> >>>> wrote:
> >>>>
> >>>>> Keir,
> >>>>>   I only saw the .init function called once.  I downloaded xen-
> >>>> unstable on May
> >>>>> 27.  Were your updates after that?
> >>>>
> >>>> My changes were done before May 27, and that ties in with you
> seeing
> >>>> .init
> >>>> called only once. That being the case, you should not see multiple
> >>>> different
> >>>> ops structures ('struct scheduler' instances). The only ops struct
> >> that
> >>>> should exist in the system in this case should be the one
> statically
> >>>> defined
> >>>> near the top of common/schedule.c.
> >>>>
> >>>>  -- Keir
> >>>>
> >>>>>   Thanks,
> >>>>> Kathy Hadley
> >>>>>
> >>>>>
> >>>>>> -----Original Message-----
> >>>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >>>>>> Sent: Wednesday, June 16, 2010 12:20 PM
> >>>>>> To: George Dunlap; Kathy Hadley
> >>>>>> Cc: xen-devel@lists.xensource.com; Juergen Gross
> >>>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> >>>> (updated
> >>>>>> to add support for CPU pools)
> >>>>>>
> >>>>>> On 16/06/2010 17:14, "George Dunlap"
> <George.Dunlap@eu.citrix.com>
> >>>>>> wrote:
> >>>>>>
> >>>>>>>>  I actually tried the xmalloc() method first.  I found that
> when
> >>>> the
> >>>>>>>> .adjust_global function was called, the address of the "ops"
> >> data
> >>>>>> structure
> >>>>>>>> passed to that function was different from the address of the
> >>>> "ops"
> >>>>>> data
> >>>>>>>> structure when the .init function was called.  I wanted to use
> >>>>>> .adjust_global
> >>>>>>>> to modify the data structure that was created when the .init
> >>>>>> function was
> >>>>>>>> called, but I could not figure out a way to get the address of
> >> the
> >>>>>> second
> >>>>>>>> data structure.  Suggestions?
> >>>>>>>
> >>>>>>> It's been a month or two since I trawled through the cpupools
> >> code;
> >>>>>>> but I seem to recall that .init is called twice -- once for the
> >>>>>>> "default pool" (cpupool0), and once for an actually in-use
> pool.
> >>>>>>> (Juergen, can you correct me if I'm wrong?)  Is it possible
> that
> >>>>>>> that's the difference in the pointers that you're seeing?
> >>>>>>
> >>>>>> Oh yes, that was the old behaviour. I took a hatchet to the
> >>>>>> scheduler/cpupool interfaces a few weeks ago and now we should
> >> only
> >>>>>> initialise the scheduler once, unless extra cpupools are
> manually
> >>>>>> created.
> >>>>>> The fact that Kathy is seeing two different ops structures
> >> probably
> >>>>>> indicates that her xen-unstable tree is very out of date. Which
> >> may
> >>>>>> also
> >>>>>> mean that the patch will not apply to current tip.
> >>>>>>
> >>>>>>  -- Keir
> >>>>>>
> >>>>>
> >>>>
> >>>
> >>
> >
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-18 17:35                     ` Kathy Hadley
@ 2010-06-18 17:49                       ` Keir Fraser
  2010-06-19 11:14                       ` George Dunlap
  1 sibling, 0 replies; 35+ messages in thread
From: Keir Fraser @ 2010-06-18 17:49 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: xen-devel

On 18/06/2010 18:35, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> xc_sched_arinc653_sched_set(
>     xc_interface *xch,
>     xen_sysctl_sched_arinc653_schedule_t * sched)
> {
>     DECLARE_SYSCTL;
> 
>     sysctl.cmd = XEN_SYSCTL_scheduler_op;
>     sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
>     sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
>     set_xen_guest_handle(sysctl.u.scheduler_op.u.arinc653.schedule, sched);
> 
>     return do_sysctl(xch, &sysctl);
> }
> 
> In this function, which executes in Dom0, how do I determine the cpupool_id
> that I need to set in the sysctl data structure?

It would be passed in to you?

Or if you don't care about cpu pools you can just set it to zero. That's the
default cpupool created at boot time which all CPUs are bound to by default.

 -- Keir

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-18 17:35                     ` Kathy Hadley
  2010-06-18 17:49                       ` Keir Fraser
@ 2010-06-19 11:14                       ` George Dunlap
  1 sibling, 0 replies; 35+ messages in thread
From: George Dunlap @ 2010-06-19 11:14 UTC (permalink / raw)
  To: Kathy Hadley; +Cc: xen-devel, Keir Fraser

On Fri, Jun 18, 2010 at 6:35 PM, Kathy Hadley
<Kathy.Hadley@dornerworks.com> wrote:
> I migrated to c/s 21632 in the staging tree.  I see that xen_sysctl_scheduler_op in sysctl.h was updated in c/s 21626 to add cpupool_id to the structure.  I call the following function from an application executing in Dom0 to adjust the ARINC 653 schedule:
>
> xc_sched_arinc653_sched_set(
>    xc_interface *xch,
>    xen_sysctl_sched_arinc653_schedule_t * sched)
> {
>    DECLARE_SYSCTL;
>
>    sysctl.cmd = XEN_SYSCTL_scheduler_op;
>    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
>    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
>    set_xen_guest_handle(sysctl.u.scheduler_op.u.arinc653.schedule, sched);
>
>    return do_sysctl(xch, &sysctl);
> }
>
> In this function, which executes in Dom0, how do I determine the cpupool_id that I need to set in the sysctl data structure?

It should take a cpupool_id argument.  It may be that you're the only
consumer of this scheduler, and that your toolstack will always pass a
cpupool_id of 0 (the pool created by default on boot).  But on the off
chance that someone decides they want to make two pools, one of which
will use your scheduler, we want them to be able to specify one. :-)

 -George

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-17  7:04                   ` Keir Fraser
  2010-06-17 18:16                     ` Kathy Hadley
  2010-06-18 17:35                     ` Kathy Hadley
@ 2010-06-22 19:10                     ` Kathy Hadley
  2010-06-22 19:16                       ` Keir Fraser
  2 siblings, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-22 19:10 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel

I've migrated to changeset 21650 in xen-unstable.  Dom0 boots fine, but I am unable to start any other domains.  In particular, when I use the command:
    # xm create -c gentoo.xen [where gentoo.xen is a proven Xen configuration file]

the newly-created domain appears to hang during start-up.  I see the following output:

Using config file "/etc/xen/gentoo.xen".
Started domain gentoo (id=1)

and then nothing else.

Looking at the output to the serial channel, I see the following message (which was not output on an earlier version of Xen [xen-unstable downloaded May 27] where I can successfully start other domains):

(XEN) traps.c:2301:d2 Domain attempted WRMSR c0000080 from 0x0000080000000000 to 0x0000080000000800.

Would this explain why the new domain appears to hang?

Thanks,
  Kathy Hadley

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Thursday, June 17, 2010 3:04 AM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com; Juergen Gross
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> On 16/06/2010 19:03, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > That sounds reasonable to me.
> 
> Fixed as of changeset 21626, in the staging tree
> (http://xenbits.xensource.com/staging/xen-unstable.hg).
> 
>  K.
> 
> > Kathy
> >
> >> -----Original Message-----
> >> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >> Sent: Wednesday, June 16, 2010 12:50 PM
> >> To: Kathy Hadley; George Dunlap
> >> Cc: xen-devel@lists.xensource.com; Juergen Gross
> >> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> >> to add support for CPU pools)
> >>
> >> Oh, I see. Well, the cause is that the
> >> common/schedule.c:sched_adjust_global() is broken. But, what should
> it
> >> actually do, given that multiple schedulers of same or differing
> types
> >> may
> >> exist in a system now? Perhaps the sysctl should take a cpupool id,
> to
> >> uniquely identify the scheduler instance to be adjusted?
> >>
> >>  -- Keir
> >>
> >> On 16/06/2010 17:40, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> >> wrote:
> >>
> >>> Keir, George, et. al.,
> >>>   I definitely saw two "ops" values.  When the .init function was
> >> called, ops
> >>> = 0xFF213DC0; I then used xmalloc() to allocate memory for the
> >> scheduler data
> >>> structure and set ops->sched_data equal to the address of that
> memory
> >> block
> >>> (similar to what is done in csched_init in sched_credit.c).  When
> the
> >>> .adjust_global function was called, ops = 0xFF2112D0 and ops-
> >>> sched_data was
> >>> not equal to the address of the memory block allocated in the .init
> >> function
> >>> (it was equal to the value set when "sched_arinc653_def" was
> >> declared).
> >>>
> >>>   Regards,
> >>> Kathy
> >>>
> >>>> -----Original Message-----
> >>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >>>> Sent: Wednesday, June 16, 2010 12:32 PM
> >>>> To: Kathy Hadley; George Dunlap
> >>>> Cc: xen-devel@lists.xensource.com; Juergen Gross
> >>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> >> (updated
> >>>> to add support for CPU pools)
> >>>>
> >>>> On 16/06/2010 17:25, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> >>>> wrote:
> >>>>
> >>>>> Keir,
> >>>>>   I only saw the .init function called once.  I downloaded xen-
> >>>> unstable on May
> >>>>> 27.  Were your updates after that?
> >>>>
> >>>> My changes were done before May 27, and that ties in with you
> seeing
> >>>> .init
> >>>> called only once. That being the case, you should not see multiple
> >>>> different
> >>>> ops structures ('struct scheduler' instances). The only ops struct
> >> that
> >>>> should exist in the system in this case should be the one
> statically
> >>>> defined
> >>>> near the top of common/schedule.c.
> >>>>
> >>>>  -- Keir
> >>>>
> >>>>>   Thanks,
> >>>>> Kathy Hadley
> >>>>>
> >>>>>
> >>>>>> -----Original Message-----
> >>>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >>>>>> Sent: Wednesday, June 16, 2010 12:20 PM
> >>>>>> To: George Dunlap; Kathy Hadley
> >>>>>> Cc: xen-devel@lists.xensource.com; Juergen Gross
> >>>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> >>>> (updated
> >>>>>> to add support for CPU pools)
> >>>>>>
> >>>>>> On 16/06/2010 17:14, "George Dunlap"
> <George.Dunlap@eu.citrix.com>
> >>>>>> wrote:
> >>>>>>
> >>>>>>>>  I actually tried the xmalloc() method first.  I found that
> when
> >>>> the
> >>>>>>>> .adjust_global function was called, the address of the "ops"
> >> data
> >>>>>> structure
> >>>>>>>> passed to that function was different from the address of the
> >>>> "ops"
> >>>>>> data
> >>>>>>>> structure when the .init function was called.  I wanted to use
> >>>>>> .adjust_global
> >>>>>>>> to modify the data structure that was created when the .init
> >>>>>> function was
> >>>>>>>> called, but I could not figure out a way to get the address of
> >> the
> >>>>>> second
> >>>>>>>> data structure.  Suggestions?
> >>>>>>>
> >>>>>>> It's been a month or two since I trawled through the cpupools
> >> code;
> >>>>>>> but I seem to recall that .init is called twice -- once for the
> >>>>>>> "default pool" (cpupool0), and once for an actually in-use
> pool.
> >>>>>>> (Juergen, can you correct me if I'm wrong?)  Is it possible
> that
> >>>>>>> that's the difference in the pointers that you're seeing?
> >>>>>>
> >>>>>> Oh yes, that was the old behaviour. I took a hatchet to the
> >>>>>> scheduler/cpupool interfaces a few weeks ago and now we should
> >> only
> >>>>>> initialise the scheduler once, unless extra cpupools are
> manually
> >>>>>> created.
> >>>>>> The fact that Kathy is seeing two different ops structures
> >> probably
> >>>>>> indicates that her xen-unstable tree is very out of date. Which
> >> may
> >>>>>> also
> >>>>>> mean that the patch will not apply to current tip.
> >>>>>>
> >>>>>>  -- Keir
> >>>>>>
> >>>>>
> >>>>
> >>>
> >>
> >
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-22 19:10                     ` Kathy Hadley
@ 2010-06-22 19:16                       ` Keir Fraser
  2010-06-23 19:57                         ` Kathy Hadley
  0 siblings, 1 reply; 35+ messages in thread
From: Keir Fraser @ 2010-06-22 19:16 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: xen-devel

On 22/06/2010 20:10, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> Looking at the output to the serial channel, I see the following message
> (which was not output on an earlier version of Xen [xen-unstable downloaded
> May 27] where I can successfully start other domains):
> 
> (XEN) traps.c:2301:d2 Domain attempted WRMSR c0000080 from 0x0000080000000000
> to 0x0000080000000800.
> 
> Would this explain why the new domain appears to hang?

I think it might be a side effect of some MSR cleanup patches in the
hypervisor, and pribably not related to your hang. You'll need to track down
where your guest kernel is hanging. Add some early printks or maybe use the
xenctx tool to get an EIP value and map that to a location in the kernel
code.

 -- Keir

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-22 19:16                       ` Keir Fraser
@ 2010-06-23 19:57                         ` Kathy Hadley
  2010-06-23 20:23                           ` Keir Fraser
  0 siblings, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-23 19:57 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel

Keir,
  Thanks for the suggestions.  When I use the xenctx tool, I see the
following:

cs:eip: 0061:c01013a7 hypercall_page+0x3a7 
flags: 00001246 i z p
ss:esp: 0069:c03fff90
eax: 00000000    ebx: deadbeef    ecx: deadbeef    edx: 00000001
esi: 00000000    edi: c0433180    ebp: 00000000
 ds:     007b     es:     007b     fs:     0000     gs:     0000
Code (instr addr c01013a7)
cc cc cc cc cc cc cc cc cc cc cc cc cc cc b8 1d 00 00 00 cd 82 <c3> cc
cc cc cc cc cc cc cc cc cc 


Stack:
 c0109005 f9d66eef 0000035d 00000000 ffffffff c0433100 c0104789 c010482d
 00000000 00000020 00000020 c04369e4 c12148e4 c1211000 c0404895 c033b1d0
 c04041d0 00000000 3fbccf00 00000e39 00008000 000038e4 c043aaa0 00020800
 c04e5000 00000000 00000000 00000000

Call Trace:
  [<c01013a7>] hypercall_page+0x3a7  <--
  [<c0109005>] raw_safe_halt+0xa5 
  [<c0104789>] xen_idle+0x49 
  [<c010482d>] cpu_idle+0x8d 
  [<c0404895>] start_kernel+0x3f5 
  [<c04041d0>] do_early_param+0x80

  Does this shed any light on the situation?

  Thanks,
Kathy Hadley

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Tuesday, June 22, 2010 3:16 PM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> On 22/06/2010 20:10, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > Looking at the output to the serial channel, I see the following
> message
> > (which was not output on an earlier version of Xen [xen-unstable
> downloaded
> > May 27] where I can successfully start other domains):
> >
> > (XEN) traps.c:2301:d2 Domain attempted WRMSR c0000080 from
> 0x0000080000000000
> > to 0x0000080000000800.
> >
> > Would this explain why the new domain appears to hang?
> 
> I think it might be a side effect of some MSR cleanup patches in the
> hypervisor, and pribably not related to your hang. You'll need to
track
> down
> where your guest kernel is hanging. Add some early printks or maybe
use
> the
> xenctx tool to get an EIP value and map that to a location in the
> kernel
> code.
> 
>  -- Keir
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-23 19:57                         ` Kathy Hadley
@ 2010-06-23 20:23                           ` Keir Fraser
  2010-06-23 21:16                             ` Kathy Hadley
  0 siblings, 1 reply; 35+ messages in thread
From: Keir Fraser @ 2010-06-23 20:23 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: xen-devel

On 23/06/2010 20:57, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> Call Trace:
>   [<c01013a7>] hypercall_page+0x3a7  <--
>   [<c0109005>] raw_safe_halt+0xa5
>   [<c0104789>] xen_idle+0x49
>   [<c010482d>] cpu_idle+0x8d
>   [<c0404895>] start_kernel+0x3f5
>   [<c04041d0>] do_early_param+0x80
> 
>   Does this shed any light on the situation?

Looks like you're in the idle loop. So, no, it doesn't really shed much
useful light.

 -- Keir

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-23 20:23                           ` Keir Fraser
@ 2010-06-23 21:16                             ` Kathy Hadley
  2010-06-23 22:36                               ` Keir Fraser
  0 siblings, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-23 21:16 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel

Keir,
  I see this same behavior when I run the credit scheduler.  It doesn't
look like it's localized to the scheduler I'm working on.  I pulled the
latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg and
rebuilt the kernel earlier today, with no effect.

  Note that I can successfully start the domain with Xen-3.4.1 and
Xen-4.0.0, using the same configuration file as I am using with
xen-unstable.

Kathy

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Wednesday, June 23, 2010 4:23 PM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> On 23/06/2010 20:57, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > Call Trace:
> >   [<c01013a7>] hypercall_page+0x3a7  <--
> >   [<c0109005>] raw_safe_halt+0xa5
> >   [<c0104789>] xen_idle+0x49
> >   [<c010482d>] cpu_idle+0x8d
> >   [<c0404895>] start_kernel+0x3f5
> >   [<c04041d0>] do_early_param+0x80
> >
> >   Does this shed any light on the situation?
> 
> Looks like you're in the idle loop. So, no, it doesn't really shed
much
> useful light.
> 
>  -- Keir
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-23 21:16                             ` Kathy Hadley
@ 2010-06-23 22:36                               ` Keir Fraser
  2010-06-24 12:53                                 ` Kathy Hadley
  2010-06-30 20:44                                 ` Kathy Hadley
  0 siblings, 2 replies; 35+ messages in thread
From: Keir Fraser @ 2010-06-23 22:36 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: xen-devel

I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and booted a
domU just fine. All my builds are 64-bit though whereas yours are 32-bit. I
suppose that could cause a difference (in particular, 32-bit hypervisor is
less tested by people).

 -- Keir

On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> Keir,
>   I see this same behavior when I run the credit scheduler.  It doesn't
> look like it's localized to the scheduler I'm working on.  I pulled the
> latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg and
> rebuilt the kernel earlier today, with no effect.
> 
>   Note that I can successfully start the domain with Xen-3.4.1 and
> Xen-4.0.0, using the same configuration file as I am using with
> xen-unstable.
> 
> Kathy
> 
>> -----Original Message-----
>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>> Sent: Wednesday, June 23, 2010 4:23 PM
>> To: Kathy Hadley; George Dunlap
>> Cc: xen-devel@lists.xensource.com
>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
>> to add support for CPU pools)
>> 
>> On 23/06/2010 20:57, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>> wrote:
>> 
>>> Call Trace:
>>>   [<c01013a7>] hypercall_page+0x3a7  <--
>>>   [<c0109005>] raw_safe_halt+0xa5
>>>   [<c0104789>] xen_idle+0x49
>>>   [<c010482d>] cpu_idle+0x8d
>>>   [<c0404895>] start_kernel+0x3f5
>>>   [<c04041d0>] do_early_param+0x80
>>> 
>>>   Does this shed any light on the situation?
>> 
>> Looks like you're in the idle loop. So, no, it doesn't really shed
> much
>> useful light.
>> 
>>  -- Keir
>> 
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-23 22:36                               ` Keir Fraser
@ 2010-06-24 12:53                                 ` Kathy Hadley
  2010-06-24 13:08                                   ` Dan Magenheimer
  2010-06-30 20:44                                 ` Kathy Hadley
  1 sibling, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-24 12:53 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel

We are using the following set-up:
  Xen-unstable changeset 21650
  Gentoo 2.6.29.6 with Xen patches for Dom0
  Linux 2.6.18-Xen for DomU (downloaded from linux-2.6.18-xen.hg)

Dom0 and DomU run fine with Xen-3.4.1 and Xen-4.0.0 (our scheduler or
the credit scheduler).  Dom0 appears to run fine with xen-unstable, but
DomU "hangs" when our scheduler or the credit scheduler (as discussed in
earlier e-mails).  "xm list" shows that DomU is blocked.

Do you have any suggestions for how I could troubleshoot this issue?
I'm still wondering about the warning I'm seeing issued from traps.c -
while it could have nothing to do with my issue, it is an interesting
coincidence.

Thanks,
  Kathy Hadley

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Wednesday, June 23, 2010 6:36 PM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and
> booted a
> domU just fine. All my builds are 64-bit though whereas yours are 32-
> bit. I
> suppose that could cause a difference (in particular, 32-bit
hypervisor
> is
> less tested by people).
> 
>  -- Keir
> 
> On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > Keir,
> >   I see this same behavior when I run the credit scheduler.  It
> doesn't
> > look like it's localized to the scheduler I'm working on.  I pulled
> the
> > latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg
and
> > rebuilt the kernel earlier today, with no effect.
> >
> >   Note that I can successfully start the domain with Xen-3.4.1 and
> > Xen-4.0.0, using the same configuration file as I am using with
> > xen-unstable.
> >
> > Kathy
> >
> >> -----Original Message-----
> >> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >> Sent: Wednesday, June 23, 2010 4:23 PM
> >> To: Kathy Hadley; George Dunlap
> >> Cc: xen-devel@lists.xensource.com
> >> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> >> to add support for CPU pools)
> >>
> >> On 23/06/2010 20:57, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> >> wrote:
> >>
> >>> Call Trace:
> >>>   [<c01013a7>] hypercall_page+0x3a7  <--
> >>>   [<c0109005>] raw_safe_halt+0xa5
> >>>   [<c0104789>] xen_idle+0x49
> >>>   [<c010482d>] cpu_idle+0x8d
> >>>   [<c0404895>] start_kernel+0x3f5
> >>>   [<c04041d0>] do_early_param+0x80
> >>>
> >>>   Does this shed any light on the situation?
> >>
> >> Looks like you're in the idle loop. So, no, it doesn't really shed
> > much
> >> useful light.
> >>
> >>  -- Keir
> >>
> >
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-24 12:53                                 ` Kathy Hadley
@ 2010-06-24 13:08                                   ` Dan Magenheimer
  2010-06-24 13:18                                     ` Kathy Hadley
  2010-06-24 13:23                                     ` Keir Fraser
  0 siblings, 2 replies; 35+ messages in thread
From: Dan Magenheimer @ 2010-06-24 13:08 UTC (permalink / raw)
  To: Kathy Hadley, Keir Fraser, George Dunlap; +Cc: xen-devel

Just a thought...

With all the recent tool layer changes (involving udev, xend,
bridging etc), any chance that everything in the guest
is working just fine and everything in the hypervisor
is working just fine but the connections to the console
in your distro/configuration are not playing nicely with
the recent xen-unstable tool changes, so you just can't see
that everything (else) is fine?

(if so, please support my recent rant against changes that
cause "unnecessary pain" ;-)

> -----Original Message-----
> From: Kathy Hadley [mailto:Kathy.Hadley@dornerworks.com]
> Sent: Thursday, June 24, 2010 6:54 AM
> To: Keir Fraser; George Dunlap
> Cc: xen-devel@lists.xensource.com
> Subject: RE: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> We are using the following set-up:
>   Xen-unstable changeset 21650
>   Gentoo 2.6.29.6 with Xen patches for Dom0
>   Linux 2.6.18-Xen for DomU (downloaded from linux-2.6.18-xen.hg)
> 
> Dom0 and DomU run fine with Xen-3.4.1 and Xen-4.0.0 (our scheduler or
> the credit scheduler).  Dom0 appears to run fine with xen-unstable, but
> DomU "hangs" when our scheduler or the credit scheduler (as discussed
> in
> earlier e-mails).  "xm list" shows that DomU is blocked.
> 
> Do you have any suggestions for how I could troubleshoot this issue?
> I'm still wondering about the warning I'm seeing issued from traps.c -
> while it could have nothing to do with my issue, it is an interesting
> coincidence.
> 
> Thanks,
>   Kathy Hadley
> 
> > -----Original Message-----
> > From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> > Sent: Wednesday, June 23, 2010 6:36 PM
> > To: Kathy Hadley; George Dunlap
> > Cc: xen-devel@lists.xensource.com
> > Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> > to add support for CPU pools)
> >
> > I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and
> > booted a
> > domU just fine. All my builds are 64-bit though whereas yours are 32-
> > bit. I
> > suppose that could cause a difference (in particular, 32-bit
> hypervisor
> > is
> > less tested by people).
> >
> >  -- Keir
> >
> > On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> > wrote:
> >
> > > Keir,
> > >   I see this same behavior when I run the credit scheduler.  It
> > doesn't
> > > look like it's localized to the scheduler I'm working on.  I pulled
> > the
> > > latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg
> and
> > > rebuilt the kernel earlier today, with no effect.
> > >
> > >   Note that I can successfully start the domain with Xen-3.4.1 and
> > > Xen-4.0.0, using the same configuration file as I am using with
> > > xen-unstable.
> > >
> > > Kathy
> > >
> > >> -----Original Message-----
> > >> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> > >> Sent: Wednesday, June 23, 2010 4:23 PM
> > >> To: Kathy Hadley; George Dunlap
> > >> Cc: xen-devel@lists.xensource.com
> > >> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> > (updated
> > >> to add support for CPU pools)
> > >>
> > >> On 23/06/2010 20:57, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> > >> wrote:
> > >>
> > >>> Call Trace:
> > >>>   [<c01013a7>] hypercall_page+0x3a7  <--
> > >>>   [<c0109005>] raw_safe_halt+0xa5
> > >>>   [<c0104789>] xen_idle+0x49
> > >>>   [<c010482d>] cpu_idle+0x8d
> > >>>   [<c0404895>] start_kernel+0x3f5
> > >>>   [<c04041d0>] do_early_param+0x80
> > >>>
> > >>>   Does this shed any light on the situation?
> > >>
> > >> Looks like you're in the idle loop. So, no, it doesn't really shed
> > > much
> > >> useful light.
> > >>
> > >>  -- Keir
> > >>
> > >
> >
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-24 13:08                                   ` Dan Magenheimer
@ 2010-06-24 13:18                                     ` Kathy Hadley
  2010-06-24 13:23                                     ` Keir Fraser
  1 sibling, 0 replies; 35+ messages in thread
From: Kathy Hadley @ 2010-06-24 13:18 UTC (permalink / raw)
  To: Dan Magenheimer, Keir Fraser, George Dunlap; +Cc: xen-devel

If so, it's a relatively recent development.  Things worked fine with a
clone of changeset 21632 from the xen-unstable "staging" repository
(pulled June 17).  Things went south after I pulled changeset 21650 from
the xen-unstable repository (pulled June 22).  But, then again, that's
when xend stopped being started automatically.

I really don't think that everything else is "fine" because I can't
connect to Dom1 (using "xm console" or ssh), and I never get a Dom1
prompt.

Thanks for the thought,
  Kathy

> -----Original Message-----
> From: Dan Magenheimer [mailto:dan.magenheimer@oracle.com]
> Sent: Thursday, June 24, 2010 9:08 AM
> To: Kathy Hadley; Keir Fraser; George Dunlap
> Cc: xen-devel@lists.xensource.com
> Subject: RE: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> Just a thought...
> 
> With all the recent tool layer changes (involving udev, xend,
> bridging etc), any chance that everything in the guest
> is working just fine and everything in the hypervisor
> is working just fine but the connections to the console
> in your distro/configuration are not playing nicely with
> the recent xen-unstable tool changes, so you just can't see
> that everything (else) is fine?
> 
> (if so, please support my recent rant against changes that
> cause "unnecessary pain" ;-)
> 
> > -----Original Message-----
> > From: Kathy Hadley [mailto:Kathy.Hadley@dornerworks.com]
> > Sent: Thursday, June 24, 2010 6:54 AM
> > To: Keir Fraser; George Dunlap
> > Cc: xen-devel@lists.xensource.com
> > Subject: RE: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
(updated
> > to add support for CPU pools)
> >
> > We are using the following set-up:
> >   Xen-unstable changeset 21650
> >   Gentoo 2.6.29.6 with Xen patches for Dom0
> >   Linux 2.6.18-Xen for DomU (downloaded from linux-2.6.18-xen.hg)
> >
> > Dom0 and DomU run fine with Xen-3.4.1 and Xen-4.0.0 (our scheduler
or
> > the credit scheduler).  Dom0 appears to run fine with xen-unstable,
> but
> > DomU "hangs" when our scheduler or the credit scheduler (as
discussed
> > in
> > earlier e-mails).  "xm list" shows that DomU is blocked.
> >
> > Do you have any suggestions for how I could troubleshoot this issue?
> > I'm still wondering about the warning I'm seeing issued from traps.c
> -
> > while it could have nothing to do with my issue, it is an
interesting
> > coincidence.
> >
> > Thanks,
> >   Kathy Hadley
> >
> > > -----Original Message-----
> > > From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> > > Sent: Wednesday, June 23, 2010 6:36 PM
> > > To: Kathy Hadley; George Dunlap
> > > Cc: xen-devel@lists.xensource.com
> > > Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> > > to add support for CPU pools)
> > >
> > > I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and
> > > booted a
> > > domU just fine. All my builds are 64-bit though whereas yours are
> 32-
> > > bit. I
> > > suppose that could cause a difference (in particular, 32-bit
> > hypervisor
> > > is
> > > less tested by people).
> > >
> > >  -- Keir
> > >
> > > On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> > > wrote:
> > >
> > > > Keir,
> > > >   I see this same behavior when I run the credit scheduler.  It
> > > doesn't
> > > > look like it's localized to the scheduler I'm working on.  I
> pulled
> > > the
> > > > latest code from
http://xenbits.xensource.com/linux-2.6.18-xen.hg
> > and
> > > > rebuilt the kernel earlier today, with no effect.
> > > >
> > > >   Note that I can successfully start the domain with Xen-3.4.1
> and
> > > > Xen-4.0.0, using the same configuration file as I am using with
> > > > xen-unstable.
> > > >
> > > > Kathy
> > > >
> > > >> -----Original Message-----
> > > >> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> > > >> Sent: Wednesday, June 23, 2010 4:23 PM
> > > >> To: Kathy Hadley; George Dunlap
> > > >> Cc: xen-devel@lists.xensource.com
> > > >> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> > > (updated
> > > >> to add support for CPU pools)
> > > >>
> > > >> On 23/06/2010 20:57, "Kathy Hadley"
> <Kathy.Hadley@dornerworks.com>
> > > >> wrote:
> > > >>
> > > >>> Call Trace:
> > > >>>   [<c01013a7>] hypercall_page+0x3a7  <--
> > > >>>   [<c0109005>] raw_safe_halt+0xa5
> > > >>>   [<c0104789>] xen_idle+0x49
> > > >>>   [<c010482d>] cpu_idle+0x8d
> > > >>>   [<c0404895>] start_kernel+0x3f5
> > > >>>   [<c04041d0>] do_early_param+0x80
> > > >>>
> > > >>>   Does this shed any light on the situation?
> > > >>
> > > >> Looks like you're in the idle loop. So, no, it doesn't really
> shed
> > > > much
> > > >> useful light.
> > > >>
> > > >>  -- Keir
> > > >>
> > > >
> > >
> >
> >
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@lists.xensource.com
> > http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-24 13:08                                   ` Dan Magenheimer
  2010-06-24 13:18                                     ` Kathy Hadley
@ 2010-06-24 13:23                                     ` Keir Fraser
  2010-06-24 13:32                                       ` Kathy Hadley
  1 sibling, 1 reply; 35+ messages in thread
From: Keir Fraser @ 2010-06-24 13:23 UTC (permalink / raw)
  To: Dan Magenheimer, Kathy Hadley, George Dunlap; +Cc: xen-devel

Yes, one possibility here is that somehow you do not have xenconsoled
running. You should 'ps auxw' in dom0 and check that xenstored and
xenconsoled are both running.

I now start xend with a little 'xenstored; xenconsoled; xend start' script.
:-)

 -- Keir

On 24/06/2010 14:08, "Dan Magenheimer" <dan.magenheimer@oracle.com> wrote:

> Just a thought...
> 
> With all the recent tool layer changes (involving udev, xend,
> bridging etc), any chance that everything in the guest
> is working just fine and everything in the hypervisor
> is working just fine but the connections to the console
> in your distro/configuration are not playing nicely with
> the recent xen-unstable tool changes, so you just can't see
> that everything (else) is fine?
> 
> (if so, please support my recent rant against changes that
> cause "unnecessary pain" ;-)
> 
>> -----Original Message-----
>> From: Kathy Hadley [mailto:Kathy.Hadley@dornerworks.com]
>> Sent: Thursday, June 24, 2010 6:54 AM
>> To: Keir Fraser; George Dunlap
>> Cc: xen-devel@lists.xensource.com
>> Subject: RE: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
>> to add support for CPU pools)
>> 
>> We are using the following set-up:
>>   Xen-unstable changeset 21650
>>   Gentoo 2.6.29.6 with Xen patches for Dom0
>>   Linux 2.6.18-Xen for DomU (downloaded from linux-2.6.18-xen.hg)
>> 
>> Dom0 and DomU run fine with Xen-3.4.1 and Xen-4.0.0 (our scheduler or
>> the credit scheduler).  Dom0 appears to run fine with xen-unstable, but
>> DomU "hangs" when our scheduler or the credit scheduler (as discussed
>> in
>> earlier e-mails).  "xm list" shows that DomU is blocked.
>> 
>> Do you have any suggestions for how I could troubleshoot this issue?
>> I'm still wondering about the warning I'm seeing issued from traps.c -
>> while it could have nothing to do with my issue, it is an interesting
>> coincidence.
>> 
>> Thanks,
>>   Kathy Hadley
>> 
>>> -----Original Message-----
>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>>> Sent: Wednesday, June 23, 2010 6:36 PM
>>> To: Kathy Hadley; George Dunlap
>>> Cc: xen-devel@lists.xensource.com
>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
>>> to add support for CPU pools)
>>> 
>>> I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and
>>> booted a
>>> domU just fine. All my builds are 64-bit though whereas yours are 32-
>>> bit. I
>>> suppose that could cause a difference (in particular, 32-bit
>> hypervisor
>>> is
>>> less tested by people).
>>> 
>>>  -- Keir
>>> 
>>> On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>>> wrote:
>>> 
>>>> Keir,
>>>>   I see this same behavior when I run the credit scheduler.  It
>>> doesn't
>>>> look like it's localized to the scheduler I'm working on.  I pulled
>>> the
>>>> latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg
>> and
>>>> rebuilt the kernel earlier today, with no effect.
>>>> 
>>>>   Note that I can successfully start the domain with Xen-3.4.1 and
>>>> Xen-4.0.0, using the same configuration file as I am using with
>>>> xen-unstable.
>>>> 
>>>> Kathy
>>>> 
>>>>> -----Original Message-----
>>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>>>>> Sent: Wednesday, June 23, 2010 4:23 PM
>>>>> To: Kathy Hadley; George Dunlap
>>>>> Cc: xen-devel@lists.xensource.com
>>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
>>> (updated
>>>>> to add support for CPU pools)
>>>>> 
>>>>> On 23/06/2010 20:57, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>>>>> wrote:
>>>>> 
>>>>>> Call Trace:
>>>>>>   [<c01013a7>] hypercall_page+0x3a7  <--
>>>>>>   [<c0109005>] raw_safe_halt+0xa5
>>>>>>   [<c0104789>] xen_idle+0x49
>>>>>>   [<c010482d>] cpu_idle+0x8d
>>>>>>   [<c0404895>] start_kernel+0x3f5
>>>>>>   [<c04041d0>] do_early_param+0x80
>>>>>> 
>>>>>>   Does this shed any light on the situation?
>>>>> 
>>>>> Looks like you're in the idle loop. So, no, it doesn't really shed
>>>> much
>>>>> useful light.
>>>>> 
>>>>>  -- Keir
>>>>> 
>>>> 
>>> 
>> 
>> 
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@lists.xensource.com
>> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-24 13:23                                     ` Keir Fraser
@ 2010-06-24 13:32                                       ` Kathy Hadley
  0 siblings, 0 replies; 35+ messages in thread
From: Kathy Hadley @ 2010-06-24 13:32 UTC (permalink / raw)
  To: Keir Fraser, Dan Magenheimer, George Dunlap; +Cc: xen-devel

Looks like xenconsoled is running, see output:

# ps auxw | grep xen
root        10  0.0  0.0      0     0 ?        S<   09:19   0:00
[xenwatch]
root        11  0.0  0.0      0     0 ?        S<   09:19   0:00
[xenbus]
root      3559  0.1  0.0   2104   968 ?        S    09:21   0:00
xenstored
root      3563  0.0  0.0  10200   644 ?        SLl  09:21   0:00
xenconsoled
root      3572  0.0  0.6  12940  7224 ?        S    09:21   0:00
/usr/bin/python /usr/sbin/xend start
root      3573  1.2  1.1  89980 12068 ?        SLl  09:21   0:00
/usr/bin/python /usr/sbin/xend start
root      3839  0.1  0.0  10148   640 pts/3    Sl+  09:21   0:00
/usr/lib/xen/bin/xenconsole 1 --num 0
root      3853  0.0  0.3  28212  3456 ?        Sl   09:21   0:00
/usr/lib/xen/bin/qemu-dm -d 1 -serial pty -domain-name gentoo -videoram
4 -vnc 127.0.0.1:0 -vncunused -M xenpv
root      3964  0.0  0.0   3232   868 pts/2    S+   09:22   0:00 grep
--color=auto xen

Like you, I run "xenstored; xenconsoled; xend start" each time I start
up.

Thanks,
  Kathy

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Thursday, June 24, 2010 9:23 AM
> To: Dan Magenheimer; Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> Yes, one possibility here is that somehow you do not have xenconsoled
> running. You should 'ps auxw' in dom0 and check that xenstored and
> xenconsoled are both running.
> 
> I now start xend with a little 'xenstored; xenconsoled; xend start'
> script.
> :-)
> 
>  -- Keir
> 
> On 24/06/2010 14:08, "Dan Magenheimer" <dan.magenheimer@oracle.com>
> wrote:
> 
> > Just a thought...
> >
> > With all the recent tool layer changes (involving udev, xend,
> > bridging etc), any chance that everything in the guest
> > is working just fine and everything in the hypervisor
> > is working just fine but the connections to the console
> > in your distro/configuration are not playing nicely with
> > the recent xen-unstable tool changes, so you just can't see
> > that everything (else) is fine?
> >
> > (if so, please support my recent rant against changes that
> > cause "unnecessary pain" ;-)
> >
> >> -----Original Message-----
> >> From: Kathy Hadley [mailto:Kathy.Hadley@dornerworks.com]
> >> Sent: Thursday, June 24, 2010 6:54 AM
> >> To: Keir Fraser; George Dunlap
> >> Cc: xen-devel@lists.xensource.com
> >> Subject: RE: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> >> to add support for CPU pools)
> >>
> >> We are using the following set-up:
> >>   Xen-unstable changeset 21650
> >>   Gentoo 2.6.29.6 with Xen patches for Dom0
> >>   Linux 2.6.18-Xen for DomU (downloaded from linux-2.6.18-xen.hg)
> >>
> >> Dom0 and DomU run fine with Xen-3.4.1 and Xen-4.0.0 (our scheduler
> or
> >> the credit scheduler).  Dom0 appears to run fine with xen-unstable,
> but
> >> DomU "hangs" when our scheduler or the credit scheduler (as
> discussed
> >> in
> >> earlier e-mails).  "xm list" shows that DomU is blocked.
> >>
> >> Do you have any suggestions for how I could troubleshoot this
issue?
> >> I'm still wondering about the warning I'm seeing issued from
traps.c
> -
> >> while it could have nothing to do with my issue, it is an
> interesting
> >> coincidence.
> >>
> >> Thanks,
> >>   Kathy Hadley
> >>
> >>> -----Original Message-----
> >>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >>> Sent: Wednesday, June 23, 2010 6:36 PM
> >>> To: Kathy Hadley; George Dunlap
> >>> Cc: xen-devel@lists.xensource.com
> >>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> >>> to add support for CPU pools)
> >>>
> >>> I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and
> >>> booted a
> >>> domU just fine. All my builds are 64-bit though whereas yours are
> 32-
> >>> bit. I
> >>> suppose that could cause a difference (in particular, 32-bit
> >> hypervisor
> >>> is
> >>> less tested by people).
> >>>
> >>>  -- Keir
> >>>
> >>> On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> >>> wrote:
> >>>
> >>>> Keir,
> >>>>   I see this same behavior when I run the credit scheduler.  It
> >>> doesn't
> >>>> look like it's localized to the scheduler I'm working on.  I
> pulled
> >>> the
> >>>> latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg
> >> and
> >>>> rebuilt the kernel earlier today, with no effect.
> >>>>
> >>>>   Note that I can successfully start the domain with Xen-3.4.1
and
> >>>> Xen-4.0.0, using the same configuration file as I am using with
> >>>> xen-unstable.
> >>>>
> >>>> Kathy
> >>>>
> >>>>> -----Original Message-----
> >>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >>>>> Sent: Wednesday, June 23, 2010 4:23 PM
> >>>>> To: Kathy Hadley; George Dunlap
> >>>>> Cc: xen-devel@lists.xensource.com
> >>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> >>> (updated
> >>>>> to add support for CPU pools)
> >>>>>
> >>>>> On 23/06/2010 20:57, "Kathy Hadley"
> <Kathy.Hadley@dornerworks.com>
> >>>>> wrote:
> >>>>>
> >>>>>> Call Trace:
> >>>>>>   [<c01013a7>] hypercall_page+0x3a7  <--
> >>>>>>   [<c0109005>] raw_safe_halt+0xa5
> >>>>>>   [<c0104789>] xen_idle+0x49
> >>>>>>   [<c010482d>] cpu_idle+0x8d
> >>>>>>   [<c0404895>] start_kernel+0x3f5
> >>>>>>   [<c04041d0>] do_early_param+0x80
> >>>>>>
> >>>>>>   Does this shed any light on the situation?
> >>>>>
> >>>>> Looks like you're in the idle loop. So, no, it doesn't really
> shed
> >>>> much
> >>>>> useful light.
> >>>>>
> >>>>>  -- Keir
> >>>>>
> >>>>
> >>>
> >>
> >>
> >> _______________________________________________
> >> Xen-devel mailing list
> >> Xen-devel@lists.xensource.com
> >> http://lists.xensource.com/xen-devel
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-23 22:36                               ` Keir Fraser
  2010-06-24 12:53                                 ` Kathy Hadley
@ 2010-06-30 20:44                                 ` Kathy Hadley
  2010-06-30 20:54                                   ` Keir Fraser
  1 sibling, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-06-30 20:44 UTC (permalink / raw)
  To: Keir Fraser, George Dunlap; +Cc: xen-devel

Good afternoon,
  We have determined that changeset 21507 introduced the error that
prevents unprivileged domains from running on our machine using the
32-bit hypervisor.

  With changeset 21506, we are able to run unprivileged domains using
the credit scheduler.  We cannot do so with changeset 21507 (or
subsequent changesets) -- the unprivileged domains appear to be stuck in
an idle loop (as indicated by the call trace below).

  I'd appreciate help addressing this issue.

  Thanks,
Kathy Hadley

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Wednesday, June 23, 2010 6:36 PM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and
> booted a
> domU just fine. All my builds are 64-bit though whereas yours are 32-
> bit. I
> suppose that could cause a difference (in particular, 32-bit
hypervisor
> is
> less tested by people).
> 
>  -- Keir
> 
> On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > Keir,
> >   I see this same behavior when I run the credit scheduler.  It
> doesn't
> > look like it's localized to the scheduler I'm working on.  I pulled
> the
> > latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg
and
> > rebuilt the kernel earlier today, with no effect.
> >
> >   Note that I can successfully start the domain with Xen-3.4.1 and
> > Xen-4.0.0, using the same configuration file as I am using with
> > xen-unstable.
> >
> > Kathy
> >
> >> -----Original Message-----
> >> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >> Sent: Wednesday, June 23, 2010 4:23 PM
> >> To: Kathy Hadley; George Dunlap
> >> Cc: xen-devel@lists.xensource.com
> >> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> >> to add support for CPU pools)
> >>
> >> On 23/06/2010 20:57, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> >> wrote:
> >>
> >>> Call Trace:
> >>>   [<c01013a7>] hypercall_page+0x3a7  <--
> >>>   [<c0109005>] raw_safe_halt+0xa5
> >>>   [<c0104789>] xen_idle+0x49
> >>>   [<c010482d>] cpu_idle+0x8d
> >>>   [<c0404895>] start_kernel+0x3f5
> >>>   [<c04041d0>] do_early_param+0x80
> >>>
> >>>   Does this shed any light on the situation?
> >>
> >> Looks like you're in the idle loop. So, no, it doesn't really shed
> > much
> >> useful light.
> >>
> >>  -- Keir
> >>
> >
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-30 20:44                                 ` Kathy Hadley
@ 2010-06-30 20:54                                   ` Keir Fraser
  2010-07-14 17:32                                     ` Kathy Hadley
  0 siblings, 1 reply; 35+ messages in thread
From: Keir Fraser @ 2010-06-30 20:54 UTC (permalink / raw)
  To: Kathy Hadley, George Dunlap; +Cc: Ian, Campbell, xen-devel

On 30/06/2010 21:44, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> Good afternoon,
>   We have determined that changeset 21507 introduced the error that
> prevents unprivileged domains from running on our machine using the
> 32-bit hypervisor.
> 
>   With changeset 21506, we are able to run unprivileged domains using
> the credit scheduler.  We cannot do so with changeset 21507 (or
> subsequent changesets) -- the unprivileged domains appear to be stuck in
> an idle loop (as indicated by the call trace below).
> 
>   I'd appreciate help addressing this issue.

The tools no longer automatically create /dev/xen/evtchn and expect it to
already be created by the distro (e.g., via a udev rule) My guess would be
that you are missing /dev/xen/evtchn. Ccing the patch author.

 -- Keir

>   Thanks,
> Kathy Hadley
> 
>> -----Original Message-----
>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>> Sent: Wednesday, June 23, 2010 6:36 PM
>> To: Kathy Hadley; George Dunlap
>> Cc: xen-devel@lists.xensource.com
>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
>> to add support for CPU pools)
>> 
>> I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and
>> booted a
>> domU just fine. All my builds are 64-bit though whereas yours are 32-
>> bit. I
>> suppose that could cause a difference (in particular, 32-bit
> hypervisor
>> is
>> less tested by people).
>> 
>>  -- Keir
>> 
>> On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>> wrote:
>> 
>>> Keir,
>>>   I see this same behavior when I run the credit scheduler.  It
>> doesn't
>>> look like it's localized to the scheduler I'm working on.  I pulled
>> the
>>> latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg
> and
>>> rebuilt the kernel earlier today, with no effect.
>>> 
>>>   Note that I can successfully start the domain with Xen-3.4.1 and
>>> Xen-4.0.0, using the same configuration file as I am using with
>>> xen-unstable.
>>> 
>>> Kathy
>>> 
>>>> -----Original Message-----
>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>>>> Sent: Wednesday, June 23, 2010 4:23 PM
>>>> To: Kathy Hadley; George Dunlap
>>>> Cc: xen-devel@lists.xensource.com
>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
>> (updated
>>>> to add support for CPU pools)
>>>> 
>>>> On 23/06/2010 20:57, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>>>> wrote:
>>>> 
>>>>> Call Trace:
>>>>>   [<c01013a7>] hypercall_page+0x3a7  <--
>>>>>   [<c0109005>] raw_safe_halt+0xa5
>>>>>   [<c0104789>] xen_idle+0x49
>>>>>   [<c010482d>] cpu_idle+0x8d
>>>>>   [<c0404895>] start_kernel+0x3f5
>>>>>   [<c04041d0>] do_early_param+0x80
>>>>> 
>>>>>   Does this shed any light on the situation?
>>>> 
>>>> Looks like you're in the idle loop. So, no, it doesn't really shed
>>> much
>>>> useful light.
>>>> 
>>>>  -- Keir
>>>> 
>>> 
>> 
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* RE: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-06-30 20:54                                   ` Keir Fraser
@ 2010-07-14 17:32                                     ` Kathy Hadley
  2010-07-14 18:04                                       ` Keir Fraser
  0 siblings, 1 reply; 35+ messages in thread
From: Kathy Hadley @ 2010-07-14 17:32 UTC (permalink / raw)
  To: Ian Campbell; +Cc: George Dunlap, xen-devel, Keir Fraser

We've tried the latest changesets in xen-unstable and the xen-unstable
staging area, and still have the same issue.  To re-cap, we are using
the 32-bit hypervisor with the credit scheduler.  Dom0 appears to boot
successfully, but when we attempt to start unprivileged domains they
hang and appear to be stuck in an idle loop.

This appears to have been introduced in changeset 21507.

I'd appreciate suggestions for how to fix this issue.

Thank you,
  Kathy Hadley

> -----Original Message-----
> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> Sent: Wednesday, June 30, 2010 4:54 PM
> To: Kathy Hadley; George Dunlap
> Cc: xen-devel@lists.xensource.com; Ian Campbell
> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
> to add support for CPU pools)
> 
> On 30/06/2010 21:44, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> wrote:
> 
> > Good afternoon,
> >   We have determined that changeset 21507 introduced the error that
> > prevents unprivileged domains from running on our machine using the
> > 32-bit hypervisor.
> >
> >   With changeset 21506, we are able to run unprivileged domains
using
> > the credit scheduler.  We cannot do so with changeset 21507 (or
> > subsequent changesets) -- the unprivileged domains appear to be
stuck
> in
> > an idle loop (as indicated by the call trace below).
> >
> >   I'd appreciate help addressing this issue.
> 
> The tools no longer automatically create /dev/xen/evtchn and expect it
> to
> already be created by the distro (e.g., via a udev rule) My guess
would
> be
> that you are missing /dev/xen/evtchn. Ccing the patch author.
> 
>  -- Keir
> 
> >   Thanks,
> > Kathy Hadley
> >
> >> -----Original Message-----
> >> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >> Sent: Wednesday, June 23, 2010 6:36 PM
> >> To: Kathy Hadley; George Dunlap
> >> Cc: xen-devel@lists.xensource.com
> >> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> (updated
> >> to add support for CPU pools)
> >>
> >> I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and
> >> booted a
> >> domU just fine. All my builds are 64-bit though whereas yours are
> 32-
> >> bit. I
> >> suppose that could cause a difference (in particular, 32-bit
> > hypervisor
> >> is
> >> less tested by people).
> >>
> >>  -- Keir
> >>
> >> On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
> >> wrote:
> >>
> >>> Keir,
> >>>   I see this same behavior when I run the credit scheduler.  It
> >> doesn't
> >>> look like it's localized to the scheduler I'm working on.  I
pulled
> >> the
> >>> latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg
> > and
> >>> rebuilt the kernel earlier today, with no effect.
> >>>
> >>>   Note that I can successfully start the domain with Xen-3.4.1 and
> >>> Xen-4.0.0, using the same configuration file as I am using with
> >>> xen-unstable.
> >>>
> >>> Kathy
> >>>
> >>>> -----Original Message-----
> >>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
> >>>> Sent: Wednesday, June 23, 2010 4:23 PM
> >>>> To: Kathy Hadley; George Dunlap
> >>>> Cc: xen-devel@lists.xensource.com
> >>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
> >> (updated
> >>>> to add support for CPU pools)
> >>>>
> >>>> On 23/06/2010 20:57, "Kathy Hadley"
<Kathy.Hadley@dornerworks.com>
> >>>> wrote:
> >>>>
> >>>>> Call Trace:
> >>>>>   [<c01013a7>] hypercall_page+0x3a7  <--
> >>>>>   [<c0109005>] raw_safe_halt+0xa5
> >>>>>   [<c0104789>] xen_idle+0x49
> >>>>>   [<c010482d>] cpu_idle+0x8d
> >>>>>   [<c0404895>] start_kernel+0x3f5
> >>>>>   [<c04041d0>] do_early_param+0x80
> >>>>>
> >>>>>   Does this shed any light on the situation?
> >>>>
> >>>> Looks like you're in the idle loop. So, no, it doesn't really
shed
> >>> much
> >>>> useful light.
> >>>>
> >>>>  -- Keir
> >>>>
> >>>
> >>
> >
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools)
  2010-07-14 17:32                                     ` Kathy Hadley
@ 2010-07-14 18:04                                       ` Keir Fraser
  0 siblings, 0 replies; 35+ messages in thread
From: Keir Fraser @ 2010-07-14 18:04 UTC (permalink / raw)
  To: Kathy Hadley, Ian Campbell; +Cc: Dunlap, xen-devel

The only thing that patch changes is that /dev/xen/evtchn and
/dev/xen/gntdev no longer get auto-created by the toolstack. So just make
sure you have those device nodes before starting the Xen tools. They ought
to be created by udev really but you could try mknod'ing them instead. You
can find the minor numbers in /proc/misc and then, for example 'mknod
/dev/xen/evtchn c 10 <minor_from_proc_misc>'.

 -- Keir

On 14/07/2010 18:32, "Kathy Hadley" <Kathy.Hadley@dornerworks.com> wrote:

> We've tried the latest changesets in xen-unstable and the xen-unstable
> staging area, and still have the same issue.  To re-cap, we are using
> the 32-bit hypervisor with the credit scheduler.  Dom0 appears to boot
> successfully, but when we attempt to start unprivileged domains they
> hang and appear to be stuck in an idle loop.
> 
> This appears to have been introduced in changeset 21507.
> 
> I'd appreciate suggestions for how to fix this issue.
> 
> Thank you,
>   Kathy Hadley
> 
>> -----Original Message-----
>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>> Sent: Wednesday, June 30, 2010 4:54 PM
>> To: Kathy Hadley; George Dunlap
>> Cc: xen-devel@lists.xensource.com; Ian Campbell
>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler (updated
>> to add support for CPU pools)
>> 
>> On 30/06/2010 21:44, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>> wrote:
>> 
>>> Good afternoon,
>>>   We have determined that changeset 21507 introduced the error that
>>> prevents unprivileged domains from running on our machine using the
>>> 32-bit hypervisor.
>>> 
>>>   With changeset 21506, we are able to run unprivileged domains
> using
>>> the credit scheduler.  We cannot do so with changeset 21507 (or
>>> subsequent changesets) -- the unprivileged domains appear to be
> stuck
>> in
>>> an idle loop (as indicated by the call trace below).
>>> 
>>>   I'd appreciate help addressing this issue.
>> 
>> The tools no longer automatically create /dev/xen/evtchn and expect it
>> to
>> already be created by the distro (e.g., via a udev rule) My guess
> would
>> be
>> that you are missing /dev/xen/evtchn. Ccing the patch author.
>> 
>>  -- Keir
>> 
>>>   Thanks,
>>> Kathy Hadley
>>> 
>>>> -----Original Message-----
>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>>>> Sent: Wednesday, June 23, 2010 6:36 PM
>>>> To: Kathy Hadley; George Dunlap
>>>> Cc: xen-devel@lists.xensource.com
>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
>> (updated
>>>> to add support for CPU pools)
>>>> 
>>>> I've just built latest xen-unstable.hg and linux-2.6.18-xen.hg and
>>>> booted a
>>>> domU just fine. All my builds are 64-bit though whereas yours are
>> 32-
>>>> bit. I
>>>> suppose that could cause a difference (in particular, 32-bit
>>> hypervisor
>>>> is
>>>> less tested by people).
>>>> 
>>>>  -- Keir
>>>> 
>>>> On 23/06/2010 22:16, "Kathy Hadley" <Kathy.Hadley@dornerworks.com>
>>>> wrote:
>>>> 
>>>>> Keir,
>>>>>   I see this same behavior when I run the credit scheduler.  It
>>>> doesn't
>>>>> look like it's localized to the scheduler I'm working on.  I
> pulled
>>>> the
>>>>> latest code from http://xenbits.xensource.com/linux-2.6.18-xen.hg
>>> and
>>>>> rebuilt the kernel earlier today, with no effect.
>>>>> 
>>>>>   Note that I can successfully start the domain with Xen-3.4.1 and
>>>>> Xen-4.0.0, using the same configuration file as I am using with
>>>>> xen-unstable.
>>>>> 
>>>>> Kathy
>>>>> 
>>>>>> -----Original Message-----
>>>>>> From: Keir Fraser [mailto:keir.fraser@eu.citrix.com]
>>>>>> Sent: Wednesday, June 23, 2010 4:23 PM
>>>>>> To: Kathy Hadley; George Dunlap
>>>>>> Cc: xen-devel@lists.xensource.com
>>>>>> Subject: Re: [Xen-devel] [PATCH 1/1] Xen ARINC 653 Scheduler
>>>> (updated
>>>>>> to add support for CPU pools)
>>>>>> 
>>>>>> On 23/06/2010 20:57, "Kathy Hadley"
> <Kathy.Hadley@dornerworks.com>
>>>>>> wrote:
>>>>>> 
>>>>>>> Call Trace:
>>>>>>>   [<c01013a7>] hypercall_page+0x3a7  <--
>>>>>>>   [<c0109005>] raw_safe_halt+0xa5
>>>>>>>   [<c0104789>] xen_idle+0x49
>>>>>>>   [<c010482d>] cpu_idle+0x8d
>>>>>>>   [<c0404895>] start_kernel+0x3f5
>>>>>>>   [<c04041d0>] do_early_param+0x80
>>>>>>> 
>>>>>>>   Does this shed any light on the situation?
>>>>>> 
>>>>>> Looks like you're in the idle loop. So, no, it doesn't really
> shed
>>>>> much
>>>>>> useful light.
>>>>>> 
>>>>>>  -- Keir
>>>>>> 
>>>>> 
>>>> 
>>> 
>> 
> 

^ permalink raw reply	[flat|nested] 35+ messages in thread

end of thread, other threads:[~2010-07-14 18:04 UTC | newest]

Thread overview: 35+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-06-16 15:04 [PATCH 1/1] Xen ARINC 653 Scheduler (updated to add support for CPU pools) Kathy Hadley
2010-06-16 15:50 ` George Dunlap
2010-06-16 16:00   ` Kathy Hadley
2010-06-16 16:13     ` Keir Fraser
2010-06-16 16:14     ` George Dunlap
2010-06-16 16:20       ` Keir Fraser
2010-06-16 16:25         ` Kathy Hadley
2010-06-16 16:31           ` Keir Fraser
2010-06-16 16:40             ` Kathy Hadley
2010-06-16 16:49               ` Keir Fraser
2010-06-16 18:03                 ` Kathy Hadley
2010-06-17  7:04                   ` Keir Fraser
2010-06-17 18:16                     ` Kathy Hadley
2010-06-17 18:26                       ` Keir Fraser
2010-06-18 17:35                     ` Kathy Hadley
2010-06-18 17:49                       ` Keir Fraser
2010-06-19 11:14                       ` George Dunlap
2010-06-22 19:10                     ` Kathy Hadley
2010-06-22 19:16                       ` Keir Fraser
2010-06-23 19:57                         ` Kathy Hadley
2010-06-23 20:23                           ` Keir Fraser
2010-06-23 21:16                             ` Kathy Hadley
2010-06-23 22:36                               ` Keir Fraser
2010-06-24 12:53                                 ` Kathy Hadley
2010-06-24 13:08                                   ` Dan Magenheimer
2010-06-24 13:18                                     ` Kathy Hadley
2010-06-24 13:23                                     ` Keir Fraser
2010-06-24 13:32                                       ` Kathy Hadley
2010-06-30 20:44                                 ` Kathy Hadley
2010-06-30 20:54                                   ` Keir Fraser
2010-07-14 17:32                                     ` Kathy Hadley
2010-07-14 18:04                                       ` Keir Fraser
2010-06-16 16:25         ` George Dunlap
2010-06-17  5:02         ` Juergen Gross
2010-06-17  6:09           ` Keir Fraser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.