All of lore.kernel.org
 help / color / mirror / Atom feed
* [Patch][resend] implementation of cpupool support in xl
@ 2010-09-15  7:26 Juergen Gross
  2010-09-15  8:29 ` Ian Campbell
                   ` (2 more replies)
  0 siblings, 3 replies; 17+ messages in thread
From: Juergen Gross @ 2010-09-15  7:26 UTC (permalink / raw)
  To: xen-devel

[-- Attachment #1: Type: text/plain, Size: 479 bytes --]

Hi,

attached patch implements cpupool support in xl. Rewrite of previous patch
after rework of libxl.


Juergen

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html

[-- Attachment #2: xl-pools.patch --]
[-- Type: text/x-patch, Size: 43183 bytes --]

Signed-off-by: juergen.gross@ts.fujitsu.com

diff -r 3985fea87987 tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxc/xc_cpupool.c	Wed Sep 15 09:19:02 2010 +0200
@@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface *
     return ret;
 }
 
+static int get_cpumap_size(xc_interface *xch)
+{
+    static int cpumap_size = 0;
+    xc_physinfo_t physinfo;
+
+    if ( cpumap_size )
+        return cpumap_size;
+
+    if ( !xc_physinfo(xch, &physinfo) )
+        cpumap_size = (physinfo.max_cpu_id + 8) / 8;
+
+    return cpumap_size;
+}
+
 int xc_cpupool_create(xc_interface *xch,
                       uint32_t *ppoolid,
                       uint32_t sched_id)
@@ -64,50 +78,56 @@ int xc_cpupool_destroy(xc_interface *xch
     return do_sysctl_save(xch, &sysctl);
 }
 
-int xc_cpupool_getinfo(xc_interface *xch, 
-                       uint32_t first_poolid,
-                       uint32_t n_max, 
-                       xc_cpupoolinfo_t *info)
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, 
+                       uint32_t poolid)
 {
     int err = 0;
-    int p;
-    uint32_t poolid = first_poolid;
-    uint8_t local[sizeof (info->cpumap)];
+    xc_cpupoolinfo_t *info;
+    uint8_t *local;
+    int local_size;
+    int cpumap_size;
+    int size;
     DECLARE_SYSCTL;
 
-    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+    local_size = get_cpumap_size(xch);
+    cpumap_size = (local_size + 7) / 8;
+    size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
+    info = malloc(size);
+    if ( !info )
+        return NULL;
 
-    for (p = 0; p < n_max; p++)
+    memset(info, 0, size);
+    info->cpumap_size = local_size * 8;
+    info->cpumap = (uint64_t *)(info + 1);
+    local = (uint8_t *)(info->cpumap + cpumap_size);
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
+
+    if ( (err = lock_pages(local, local_size)) != 0 )
     {
-        sysctl.cmd = XEN_SYSCTL_cpupool_op;
-        sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
-        sysctl.u.cpupool_op.cpupool_id = poolid;
-        set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-        sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+        PERROR("Could not lock memory for Xen hypercall");
+        free(info);
+        return NULL;
+    }
+    err = do_sysctl_save(xch, &sysctl);
+    unlock_pages(local, local_size);
 
-        if ( (err = lock_pages(local, sizeof(local))) != 0 )
-        {
-            PERROR("Could not lock memory for Xen hypercall");
-            break;
-        }
-        err = do_sysctl_save(xch, &sysctl);
-        unlock_pages(local, sizeof (local));
-
-        if ( err < 0 )
-            break;
-
-        info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
-        info->sched_id = sysctl.u.cpupool_op.sched_id;
-        info->n_dom = sysctl.u.cpupool_op.n_dom;
-        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
-        poolid = sysctl.u.cpupool_op.cpupool_id + 1;
-        info++;
+    if ( err < 0 )
+    {
+        free(info);
+        return NULL;
     }
 
-    if ( p == 0 )
-        return err;
+    info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+    info->sched_id = sysctl.u.cpupool_op.sched_id;
+    info->n_dom = sysctl.u.cpupool_op.n_dom;
+    bitmap_byte_to_64(info->cpumap, local, local_size * 8);
 
-    return p;
+    return info;
 }
 
 int xc_cpupool_addcpu(xc_interface *xch,
@@ -150,30 +170,37 @@ int xc_cpupool_movedomain(xc_interface *
 }
 
 int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap)
+                        uint64_t *cpumap,
+                        int cpusize)
 {
     int err;
-    uint8_t local[sizeof (*cpumap)];
+    uint8_t *local;
     DECLARE_SYSCTL;
+
+    local = malloc(cpusize);
+    if (local == NULL)
+        return -ENOMEM;
 
     sysctl.cmd = XEN_SYSCTL_cpupool_op;
     sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
     set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-    sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+    sysctl.u.cpupool_op.cpumap.nr_cpus = cpusize * 8;
 
-    if ( (err = lock_pages(local, sizeof(local))) != 0 )
+    if ( (err = lock_pages(local, cpusize)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
+        free(local);
         return err;
     }
 
     err = do_sysctl_save(xch, &sysctl);
-    unlock_pages(local, sizeof (local));
+    unlock_pages(local, cpusize);
+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+
+    free(local);
 
     if (err < 0)
         return err;
 
-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
-
     return 0;
 }
diff -r 3985fea87987 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxc/xenctrl.h	Wed Sep 15 09:19:02 2010 +0200
@@ -535,7 +535,8 @@ typedef struct xc_cpupoolinfo {
     uint32_t cpupool_id;
     uint32_t sched_id;
     uint32_t n_dom;
-    uint64_t cpumap;
+    uint32_t cpumap_size;    /* max number of cpus in map */
+    uint64_t *cpumap;
 } xc_cpupoolinfo_t;
 
 /**
@@ -564,15 +565,11 @@ int xc_cpupool_destroy(xc_interface *xch
  * Get cpupool info. Returns info for up to the specified number of cpupools
  * starting at the given id.
  * @parm xc_handle a handle to an open hypervisor interface
- * @parm first_poolid lowest id for which info is returned
- * @parm n_max maximum number of cpupools to return info
- * @parm info pointer to xc_cpupoolinfo_t array
- * return number of cpupool infos
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (obtained by malloc)
  */
-int xc_cpupool_getinfo(xc_interface *xch,
-                       uint32_t first_poolid,
-                       uint32_t n_max,
-                       xc_cpupoolinfo_t *info);
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+                       uint32_t poolid);
 
 /**
  * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
@@ -615,10 +612,12 @@ int xc_cpupool_movedomain(xc_interface *
  *
  * @parm xc_handle a handle to an open hypervisor interface
  * @parm cpumap pointer where to store the cpumap
+ * @parm cpusize size of cpumap array in bytes
  * return 0 on success, -1 on failure
  */
 int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 
 
 /*
diff -r 3985fea87987 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl.c	Wed Sep 15 09:19:02 2010 +0200
@@ -594,26 +594,46 @@ libxl_poolinfo * libxl_list_pool(libxl_c
 libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool)
 {
     libxl_poolinfo *ptr;
-    int i, ret;
-    xc_cpupoolinfo_t info[256];
-    int size = 256;
+    int i, m;
+    xc_cpupoolinfo_t *info;
+    int size;
+    uint32_t poolid;
+    libxl_physinfo physinfo;
+    int mapsize;
+    uint64_t *cpumap;
 
-    ptr = calloc(size, sizeof(libxl_poolinfo));
+    if (libxl_get_physinfo(ctx, &physinfo) != 0) {
+        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting phys info");
+        return NULL;
+    }
+    mapsize = (physinfo.max_cpu_id + 64) / 64;
+    size = physinfo.max_cpu_id + 32;
+
+    ptr = calloc(size, sizeof(libxl_poolinfo) + mapsize * sizeof(*cpumap));
     if (!ptr) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
         return NULL;
     }
+    cpumap = (uint64_t *)(ptr + size);
 
-    ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info);
-    if (ret<0) {
-        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting cpupool info");
-        return NULL;
+    poolid = 0;
+    for (i = 0; i < size; i++) {
+        info = xc_cpupool_getinfo(ctx->xch, poolid);
+        if (info == NULL)
+            break;
+        ptr[i].poolid = info->cpupool_id;
+        ptr[i].sched_id = info->sched_id;
+        ptr[i].n_dom = info->n_dom;
+        ptr[i].cpumap_size = info->cpumap_size;
+        ptr[i].cpumap = cpumap;
+        for (m = 0; m < mapsize; m++)
+            cpumap[m] = info->cpumap[m];
+        cpumap += mapsize;
+        poolid = info->cpupool_id + 1;
+        free(info);
     }
 
-    for (i = 0; i < ret; i++) {
-        ptr[i].poolid = info[i].cpupool_id;
-    }
-    *nb_pool = ret;
+    *nb_pool = i;
     return ptr;
 }
 
@@ -3340,3 +3360,182 @@ void libxl_file_reference_destroy(libxl_
     libxl__file_reference_unmap(f);
     free(f->path);
 }
+
+int libxl_get_freecpus(libxl_ctx *ctx, int *n_cpus, uint64_t **cpumap)
+{
+    libxl_physinfo info;
+    int mapsize;
+
+    if (libxl_get_physinfo(ctx, &info) != 0)
+        return ERROR_FAIL;
+
+    mapsize = (info.max_cpu_id + 64) / 64;
+    *cpumap = calloc(mapsize, sizeof(**cpumap));
+    if (!*cpumap)
+        return ERROR_NOMEM;
+
+    if (xc_cpupool_freeinfo(ctx->xch, *cpumap, (info.max_cpu_id + 8) / 8)) {
+        free(*cpumap);
+        *cpumap = NULL;
+        return ERROR_FAIL;
+    }
+
+    *n_cpus = info.max_cpu_id + 1;
+    return 0;
+}
+
+int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
+                         uint64_t *cpumap, int n_cpus, libxl_uuid *uuid,
+                         uint32_t *poolid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc;
+    int i;
+    xs_transaction_t t;
+    char *uuid_string;
+
+    uuid_string = libxl__uuid2string(&gc, *uuid);
+    if (!uuid_string)
+        return ERROR_NOMEM;
+
+    rc = xc_cpupool_create(ctx->xch, poolid, schedid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+           "Could not create cpupool");
+        return ERROR_FAIL;
+    }
+
+    for (i = 0; i < n_cpus; i++)
+        if (cpumap[i / 64] & (1L << (i % 64))) {
+            rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
+            if (rc) {
+                LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+                    "Error moving cpu to cpupool");
+                return ERROR_FAIL;
+            }
+        }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_mkdir(ctx->xsh, t, libxl__sprintf(&gc, "/local/pool/%d", *poolid));
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "/local/pool/%d/uuid", *poolid),
+                 uuid_string);
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "/local/pool/%d/name", *poolid),
+                 name);
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            return 0;
+    }
+}
+
+int libxl_destroy_cpupool(libxl_ctx *ctx, uint32_t poolid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc, i;
+    xc_cpupoolinfo_t *info;
+    xs_transaction_t t;
+
+    info = xc_cpupool_getinfo(ctx->xch, poolid);
+    if (info == NULL)
+        return ERROR_NOMEM;
+
+    rc = ERROR_INVAL;
+    if ((info->cpupool_id != poolid) || (info->n_dom))
+        goto out;
+
+    for (i = 0; i < info->cpumap_size; i++)
+        if (info->cpumap[i / 64] & (1L << (i % 64))) {
+            rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
+            if (rc) {
+                LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+                    "Error removing cpu from cpupool");
+                rc = ERROR_FAIL;
+                goto out;
+            }
+        }
+
+    rc = xc_cpupool_destroy(ctx->xch, poolid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "Could not destroy cpupool");
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_rm(ctx->xsh, XBT_NULL, libxl__sprintf(&gc, "/local/pool/%d", poolid));
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            break;
+    }
+
+    rc = 0;
+
+out:
+    free(info);
+
+    return rc;
+}
+
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    int rc;
+
+    rc = xc_cpupool_addcpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error moving cpu to cpupool");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    int rc;
+
+    rc = xc_cpupool_removecpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error removing cpu from cpupool");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc;
+    char *dom_path;
+    char *vm_path;
+    char *poolname;
+    xs_transaction_t t;
+
+    dom_path = libxl__xs_get_dompath(&gc, domid);
+    if (!dom_path) {
+        return ERROR_FAIL;
+    }
+
+    rc = xc_cpupool_movedomain(ctx->xch, poolid, domid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error moving domain to cpupool");
+        return ERROR_FAIL;
+    }
+
+    poolname = libxl__poolid_to_name(&gc, poolid);
+    vm_path = libxl__xs_read(&gc, XBT_NULL, libxl__sprintf(&gc, "%s/vm", dom_path));
+    for (; vm_path;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "%s/pool_name", vm_path), poolname);
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            break;
+    }
+
+    return 0;
+}
diff -r 3985fea87987 tools/libxl/libxl.h
--- a/tools/libxl/libxl.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl.h	Wed Sep 15 09:19:02 2010 +0200
@@ -471,6 +471,15 @@ int libxl_device_net2_del(libxl_ctx *ctx
 int libxl_device_net2_del(libxl_ctx *ctx, libxl_device_net2 *net2,
                           int wait);
 
+int libxl_get_freecpus(libxl_ctx *ctx, int *n_cpus, uint64_t **cpumap);
+int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
+                         uint64_t *cpumap, int n_cpus, libxl_uuid *uuid,
+                         uint32_t *poolid);
+int libxl_destroy_cpupool(libxl_ctx *ctx, uint32_t poolid);
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
+
 /* common paths */
 const char *libxl_sbindir_path(void);
 const char *libxl_bindir_path(void);
diff -r 3985fea87987 tools/libxl/libxl.idl
--- a/tools/libxl/libxl.idl	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl.idl	Wed Sep 15 09:19:02 2010 +0200
@@ -43,7 +43,11 @@ SHUTDOWN_* constant."""),
     ], destructor_fn=None)
 
 libxl_poolinfo = Struct("poolinfo", [
-    ("poolid", uint32)
+    ("poolid",      uint32),
+    ("sched_id",    uint32),
+    ("n_dom",       uint32),
+    ("cpumap_size", uint32),
+    ("cpumap",      libxl_cpumap)
     ], destructor_fn=None)
 
 libxl_vminfo = Struct("vminfo", [
diff -r 3985fea87987 tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl_utils.c	Wed Sep 15 09:19:02 2010 +0200
@@ -31,6 +31,17 @@
 #include "libxl_utils.h"
 #include "libxl_internal.h"
 
+struct schedid_name {
+    char *name;
+    int id;
+};
+
+static struct schedid_name schedid_name[] = {
+    { "credit", XEN_SCHEDULER_CREDIT },
+    { "sedf", XEN_SCHEDULER_SEDF },
+    { "credit2", XEN_SCHEDULER_CREDIT2 },
+    { NULL, -1 }
+};
 
 unsigned long libxl_get_required_shadow_memory(unsigned long maxmem_kb, unsigned int smp_cpus)
 {
@@ -137,6 +148,28 @@ int libxl_name_to_poolid(libxl_ctx *ctx,
     }
     free(poolinfo);
     return ret;
+}
+
+int libxl_name_to_schedid(libxl_ctx *ctx, const char *name)
+{
+    int i;
+
+    for (i = 0; schedid_name[i].name != NULL; i++)
+        if (strcmp(name, schedid_name[i].name) == 0)
+            return schedid_name[i].id;
+
+    return -1;
+}
+
+char *libxl_schedid_to_name(libxl_ctx *ctx, int schedid)
+{
+    int i;
+
+    for (i = 0; schedid_name[i].name != NULL; i++)
+        if (schedid_name[i].id == schedid)
+            return schedid_name[i].name;
+
+    return "unknown";
 }
 
 int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid)
diff -r 3985fea87987 tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl_utils.h	Wed Sep 15 09:19:02 2010 +0200
@@ -23,6 +23,8 @@ char *libxl_domid_to_name(libxl_ctx *ctx
 char *libxl_domid_to_name(libxl_ctx *ctx, uint32_t domid);
 int libxl_name_to_poolid(libxl_ctx *ctx, const char *name, uint32_t *poolid);
 char *libxl_poolid_to_name(libxl_ctx *ctx, uint32_t poolid);
+int libxl_name_to_schedid(libxl_ctx *ctx, const char *name);
+char *libxl_schedid_to_name(libxl_ctx *ctx, int schedid);
 int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid);
 int libxl_is_stubdom(libxl_ctx *ctx, uint32_t domid, uint32_t *target_domid);
 int libxl_create_logfile(libxl_ctx *ctx, char *name, char **full_name);
diff -r 3985fea87987 tools/libxl/libxlu_cfg_l.c
--- a/tools/libxl/libxlu_cfg_l.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxlu_cfg_l.c	Wed Sep 15 09:19:02 2010 +0200
@@ -54,6 +54,7 @@ typedef unsigned char flex_uint8_t;
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -83,8 +84,6 @@ typedef unsigned int flex_uint32_t;
 #ifndef UINT32_MAX
 #define UINT32_MAX             (4294967295U)
 #endif
-
-#endif /* ! C99 */
 
 #endif /* ! FLEXINT_H */
 
@@ -159,15 +158,7 @@ typedef void* yyscan_t;
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k.
- * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
- * Ditto for the __ia64__ case accordingly.
- */
-#define YY_BUF_SIZE 32768
-#else
 #define YY_BUF_SIZE 16384
-#endif /* __ia64__ */
 #endif
 
 /* The state buf must be large enough to hold one state per character in the main buffer.
@@ -496,7 +487,7 @@ void xlu__cfg_yyset_column(int  column_n
 void xlu__cfg_yyset_column(int  column_no, yyscan_t yyscanner);
 
 
-#line 500 "libxlu_cfg_l.c"
+#line 491 "libxlu_cfg_l.c"
 
 #define INITIAL 0
 #define lexerr 1
@@ -632,12 +623,7 @@ static int input (yyscan_t yyscanner );
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k */
-#define YY_READ_BUF_SIZE 16384
-#else
 #define YY_READ_BUF_SIZE 8192
-#endif /* __ia64__ */
 #endif
 
 /* Copy whatever the last rule matched to the standard output. */
@@ -645,7 +631,7 @@ static int input (yyscan_t yyscanner );
 /* This used to be an fputs(), but since the string might contain NUL's,
  * we now use fwrite().
  */
-#define ECHO do { if (fwrite( yytext, yyleng, 1, yyout )) {} } while (0)
+#define ECHO fwrite( yytext, yyleng, 1, yyout )
 #endif
 
 /* Gets input and stuffs it into "buf".  number of characters read, or YY_NULL,
@@ -656,7 +642,7 @@ static int input (yyscan_t yyscanner );
 	if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
 		{ \
 		int c = '*'; \
-		size_t n; \
+		int n; \
 		for ( n = 0; n < max_size && \
 			     (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
 			buf[n] = (char) c; \
@@ -744,7 +730,7 @@ YY_DECL
 #line 37 "libxlu_cfg_l.l"
 
 
-#line 748 "libxlu_cfg_l.c"
+#line 734 "libxlu_cfg_l.c"
 
     yylval = yylval_param;
 
@@ -944,7 +930,7 @@ YY_RULE_SETUP
 #line 82 "libxlu_cfg_l.l"
 YY_FATAL_ERROR( "flex scanner jammed" );
 	YY_BREAK
-#line 948 "libxlu_cfg_l.c"
+#line 934 "libxlu_cfg_l.c"
 case YY_STATE_EOF(INITIAL):
 case YY_STATE_EOF(lexerr):
 	yyterminate();
@@ -1687,8 +1673,8 @@ YY_BUFFER_STATE xlu__cfg_yy_scan_string 
 
 /** Setup the input buffer state to scan the given bytes. The next call to xlu__cfg_yylex() will
  * scan from a @e copy of @a bytes.
- * @param yybytes the byte buffer to scan
- * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
+ * @param bytes the byte buffer to scan
+ * @param len the number of bytes in the buffer pointed to by @a bytes.
  * @param yyscanner The scanner object.
  * @return the newly allocated buffer state object.
  */
diff -r 3985fea87987 tools/libxl/libxlu_cfg_l.h
--- a/tools/libxl/libxlu_cfg_l.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxlu_cfg_l.h	Wed Sep 15 09:19:02 2010 +0200
@@ -58,6 +58,7 @@ typedef unsigned char flex_uint8_t;
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -87,8 +88,6 @@ typedef unsigned int flex_uint32_t;
 #ifndef UINT32_MAX
 #define UINT32_MAX             (4294967295U)
 #endif
-
-#endif /* ! C99 */
 
 #endif /* ! FLEXINT_H */
 
@@ -132,15 +131,7 @@ typedef void* yyscan_t;
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k.
- * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
- * Ditto for the __ia64__ case accordingly.
- */
-#define YY_BUF_SIZE 32768
-#else
 #define YY_BUF_SIZE 16384
-#endif /* __ia64__ */
 #endif
 
 #ifndef YY_TYPEDEF_YY_BUFFER_STATE
@@ -310,12 +301,7 @@ static int yy_flex_strlen (yyconst char 
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k */
-#define YY_READ_BUF_SIZE 16384
-#else
 #define YY_READ_BUF_SIZE 8192
-#endif /* __ia64__ */
 #endif
 
 /* Number of entries by which start-condition stack grows. */
@@ -352,6 +338,6 @@ extern int xlu__cfg_yylex \
 
 #line 82 "libxlu_cfg_l.l"
 
-#line 356 "libxlu_cfg_l.h"
+#line 342 "libxlu_cfg_l.h"
 #undef xlu__cfg_yyIN_HEADER
 #endif /* xlu__cfg_yyHEADER_H */
diff -r 3985fea87987 tools/libxl/xl.h
--- a/tools/libxl/xl.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/xl.h	Wed Sep 15 09:19:02 2010 +0200
@@ -79,6 +79,12 @@ int main_network2attach(int argc, char *
 int main_network2attach(int argc, char **argv);
 int main_network2list(int argc, char **argv);
 int main_network2detach(int argc, char **argv);
+int main_poolcreate(int argc, char **argv);
+int main_poollist(int argc, char **argv);
+int main_pooldestroy(int argc, char **argv);
+int main_poolcpuadd(int argc, char **argv);
+int main_poolcpuremove(int argc, char **argv);
+int main_poolmigrate(int argc, char **argv);
 
 void help(char *command);
 
diff -r 3985fea87987 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/xl_cmdimpl.c	Wed Sep 15 09:19:02 2010 +0200
@@ -420,7 +420,7 @@ static void printf_info(int domid,
     printf("\t(ssidref %d)\n", c_info->ssidref);
     printf("\t(name %s)\n", c_info->name);
     printf("\t(uuid " LIBXL_UUID_FMT ")\n", LIBXL_UUID_BYTES(c_info->uuid));
-    printf("\t(cpupool %s (%d))\n", c_info->poolname, c_info->poolid);
+    printf("\t(cpupool %s)\n", c_info->poolname);
     if (c_info->xsdata)
         printf("\t(xsdata contains data)\n");
     else
@@ -3569,10 +3569,7 @@ static void output_xeninfo(void)
     printf("xen_minor              : %d\n", info->xen_version_minor);
     printf("xen_extra              : %s\n", info->xen_version_extra);
     printf("xen_caps               : %s\n", info->capabilities);
-    printf("xen_scheduler          : %s\n",
-        sched_id == XEN_SCHEDULER_SEDF ? "sedf" :
-        sched_id == XEN_SCHEDULER_CREDIT ? "credit" :
-        sched_id == XEN_SCHEDULER_CREDIT2 ? "credit2" : "unknown");
+    printf("xen_scheduler          : %s\n", libxl_schedid_to_name(&ctx, sched_id));
     printf("xen_pagesize           : %lu\n", info->pagesize);
     printf("platform_params        : virt_start=0x%lx\n", info->virt_start);
     printf("xen_changeset          : %s\n", info->changeset);
@@ -3603,6 +3600,8 @@ static void output_physinfo(void)
     libxl_physinfo info;
     const libxl_version_info *vinfo;
     unsigned int i;
+    uint64_t *cpumap;
+    int n_cpus, n = 0;
 
     if (libxl_get_physinfo(&ctx, &info) != 0) {
         fprintf(stderr, "libxl_physinfo failed.\n");
@@ -3629,6 +3628,13 @@ static void output_physinfo(void)
         printf("total_memory           : %"PRIu64"\n", info.total_pages / i);
         printf("free_memory            : %"PRIu64"\n", info.free_pages / i);
     }
+    if (!libxl_get_freecpus(&ctx, &n_cpus, &cpumap)) {
+        for (i = 0; i < n_cpus; i++)
+            if (cpumap[i / 64] & ((uint64_t)1 << (i % 64)))
+                n++;
+        printf("free_cpus              : %d\n", n);
+        free(cpumap);
+    }
 
     return;
 }
@@ -5044,3 +5050,444 @@ int main_tmem_freeable(int argc, char **
     printf("%d\n", mb);
     return 0;
 }
+
+int main_poolcreate(int argc, char **argv)
+{
+    char *filename = NULL;
+    char *p, extra_config[1024];
+    int dryrun = 0;
+    int opt;
+    int option_index = 0;
+    static struct option long_options[] = {
+        {"help", 0, 0, 'h'},
+        {"defconfig", 1, 0, 'f'},
+        {"dryrun", 0, 0, 'n'},
+        {0, 0, 0, 0}
+    };
+    int ret;
+    void *config_data = 0;
+    int config_len = 0;
+    XLU_Config *config;
+    const char *buf;
+    char *name, *sched;
+    uint32_t poolid;
+    int schedid = -1;
+    XLU_ConfigList *cpus;
+    int max_cpus, n_cpus, i, n;
+    uint64_t *freemap = NULL;
+    uint64_t *cpumap = NULL;
+    libxl_uuid uuid;
+
+    while (1) {
+        opt = getopt_long(argc, argv, "hnf:", long_options, &option_index);
+        if (opt == -1)
+            break;
+
+        switch (opt) {
+        case 'f':
+            filename = optarg;
+            break;
+        case 'h':
+            help("pool-create");
+            return 0;
+        case 'n':
+            dryrun = 1;
+            break;
+        default:
+            fprintf(stderr, "option not supported\n");
+            break;
+        }
+    }
+
+    memset(extra_config, 0, sizeof(extra_config));
+    while (optind < argc) {
+        if ((p = strchr(argv[optind], '='))) {
+            if (strlen(extra_config) + 1 < sizeof(extra_config)) {
+                if (strlen(extra_config))
+                    strcat(extra_config, "\n");
+                strcat(extra_config, argv[optind]);
+            }
+        } else if (!filename) {
+            filename = argv[optind];
+        } else {
+            help("pool-create");
+            return -ERROR_FAIL;
+        }
+        optind++;
+    }
+
+    if (!filename) {
+        help("pool-create");
+        return -ERROR_FAIL;
+    }
+
+    if (libxl_read_file_contents(&ctx, filename, &config_data, &config_len)) {
+        fprintf(stderr, "Failed to read config file: %s: %s\n",
+                filename, strerror(errno));
+        return -ERROR_FAIL;
+    }
+    if (strlen(extra_config)) {
+        if (config_len > INT_MAX - (strlen(extra_config) + 2)) {
+            fprintf(stderr, "Failed to attach extra configration\n");
+            return -ERROR_FAIL;
+        }
+        config_data = realloc(config_data,
+                              config_len + strlen(extra_config) + 2);
+        if (!config_data) {
+            fprintf(stderr, "Failed to realloc config_data\n");
+            return -ERROR_FAIL;
+        }
+        strcat(config_data, "\n");
+        strcat(config_data, extra_config);
+        strcat(config_data, "\n");
+        config_len += (strlen(extra_config) + 2);
+    }
+
+    config = xlu_cfg_init(stderr, filename);
+    if (!config) {
+        fprintf(stderr, "Failed to allocate for configuration\n");
+        return -ERROR_FAIL;
+    }
+
+    ret = xlu_cfg_readdata(config, config_data, config_len);
+    if (ret) {
+        fprintf(stderr, "Failed to parse config file: %s\n", strerror(ret));
+        return -ERROR_FAIL;
+    }
+
+    if (!xlu_cfg_get_string (config, "name", &buf))
+        name = strdup(buf);
+    else
+        name = basename(filename);
+    if (!libxl_name_to_poolid(&ctx, name, &poolid)) {
+        fprintf(stderr, "Pool name \"%s\" already exists\n", name);
+        return -ERROR_FAIL;
+    }
+
+    if (!xlu_cfg_get_string (config, "sched", &buf)) {
+        if ((schedid = libxl_name_to_schedid(&ctx, buf)) < 0) {
+            fprintf(stderr, "Unknown scheduler\n");
+            return -ERROR_FAIL;
+        }
+    } else {
+        if ((schedid = libxl_get_sched_id(&ctx)) < 0) {
+            fprintf(stderr, "get_sched_id sysctl failed.\n");
+            return -ERROR_FAIL;
+        }
+    }
+    sched = libxl_schedid_to_name(&ctx, schedid);
+
+    if (libxl_get_freecpus(&ctx, &max_cpus, &freemap)) {
+        fprintf(stderr, "libxl_get_freecpus failed\n");
+        return -ERROR_FAIL;
+    }
+    cpumap = calloc((max_cpus + 63) / 64, sizeof (*cpumap));
+    if (!cpumap) {
+        fprintf(stderr, "Failed to allocate cpumap\n");
+        return -ERROR_FAIL;
+    }
+    if (!xlu_cfg_get_list(config, "cpus", &cpus, 0)) {
+        n_cpus = 0;
+        while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
+            i = atoi(buf);
+            if ((i < 0) || (i >= max_cpus) ||
+                !(freemap[i / 64] & (1L << (i % 64)))) {
+                fprintf(stderr, "cpu %d illegal or not free\n", i);
+                return -ERROR_FAIL;
+            }
+            cpumap[i / 64] |= 1L << (i % 64);
+            n_cpus++;
+        }
+    } else {
+        n_cpus = 1;
+        n = 0;
+        for (i = 0; i < max_cpus; i++)
+            if (freemap[i / 64] & (1L << (i % 64))) {
+                n++;
+                cpumap[i / 64] = 1L << (i % 64);
+                break;
+            }
+        if (n != n_cpus) {
+            fprintf(stderr, "no free cpu found\n");
+            return -ERROR_FAIL;
+        }
+    }
+
+    libxl_uuid_generate(&uuid);
+
+    printf("Using config file \"%s\"\n", filename);
+    printf("pool name:      %s\n", name);
+    printf("scheduler:      %s\n", sched);
+    printf("number of cpus: %d\n", n_cpus);
+
+    if (dryrun)
+        return 0;
+
+    poolid = 0;
+    if (libxl_create_cpupool(&ctx, name, schedid, cpumap, max_cpus, &uuid,
+                             &poolid)) {
+        fprintf(stderr, "error on creating cpupool\n");
+        return -ERROR_FAIL;
+    }
+
+    return 0;
+}
+
+int main_poollist(int argc, char **argv)
+{
+    int opt;
+    int option_index = 0;
+    static struct option long_options[] = {
+        {"help", 0, 0, 'h'},
+        {"long", 0, 0, 'l'},
+        {"cpus", 0, 0, 'c'},
+        {0, 0, 0, 0}
+    };
+    int opt_long = 0;
+    int opt_cpus = 0;
+    char *pool = NULL;
+    libxl_poolinfo *poolinfo;
+    int n_pools, p, c, n;
+    uint32_t poolid;
+    char *name;
+
+    while (1) {
+        opt = getopt_long(argc, argv, "hlc", long_options, &option_index);
+        if (opt == -1)
+            break;
+
+        switch (opt) {
+        case 'h':
+            help("pool-list");
+            return 0;
+        case 'l':
+            opt_long = 1;
+            break;
+        case 'c':
+            opt_cpus = 1;
+            break;
+        default:
+            fprintf(stderr, "option not supported\n");
+            break;
+        }
+    }
+
+    if ((optind + 1) < argc) {
+        help("pool-list");
+        return -ERROR_FAIL;
+    }
+    if (optind < argc) {
+        pool = argv[optind];
+        if (libxl_name_to_poolid(&ctx, pool, &poolid)) {
+            fprintf(stderr, "Pool \'%s\' does not exist\n", pool);
+            return -ERROR_FAIL;
+        }
+    }
+
+    poolinfo = libxl_list_pool(&ctx, &n_pools);
+    if (!poolinfo) {
+        fprintf(stderr, "error getting cpupool info\n");
+        return -ERROR_NOMEM;
+    }
+
+    if (!opt_long) {
+        printf("%-19s", "Name");
+        if (opt_cpus)
+            printf("CPU list\n");
+        else
+            printf("CPUs   Sched     Active   Domain count\n");
+    }
+
+    for (p = 0; p < n_pools; p++) {
+        if (pool && (poolinfo[p].poolid != poolid))
+            continue;
+        name = libxl_poolid_to_name(&ctx, poolinfo[p].poolid);
+        if (!name) {
+            fprintf(stderr, "error getting cpupool info\n");
+            return -ERROR_NOMEM;
+        }
+        if (opt_long) {
+            return -ERROR_NI;
+        } else {
+            printf("%-19s", name);
+            n = 0;
+            for (c = 0; c < poolinfo[p].cpumap_size; c++)
+                if (poolinfo[p].cpumap[c / 64] & (1L << (c % 64))) {
+                    if (n && opt_cpus) printf(",");
+                    if (opt_cpus) printf("%d", c);
+                    n++;
+                }
+            if (!opt_cpus) {
+                printf("%3d %9s       y       %4d", n,
+                       libxl_schedid_to_name(&ctx, poolinfo[p].sched_id),
+                       poolinfo[p].n_dom);
+            }
+            printf("\n");
+        }
+    }
+
+    return 0;
+}
+
+int main_pooldestroy(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-destroy");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-destroy");
+        return -ERROR_FAIL;
+    }
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_destroy_cpupool(&ctx, poolid);
+}
+
+int main_poolcpuadd(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    int cpu;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-cpu-add");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-cpu-add");
+        return -ERROR_FAIL;
+    }
+
+    if (!argv[optind]) {
+        fprintf(stderr, "no cpu specified\n");
+        help("pool-cpu-add");
+        return -ERROR_FAIL;
+    }
+    cpu = atoi(argv[optind]);
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_cpuadd(&ctx, poolid, cpu);
+}
+
+int main_poolcpuremove(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    int cpu;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-cpu-remove");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-cpu-remove");
+        return -ERROR_FAIL;
+    }
+
+    if (!argv[optind]) {
+        fprintf(stderr, "no cpu specified\n");
+        help("pool-cpu-remove");
+        return -ERROR_FAIL;
+    }
+    cpu = atoi(argv[optind]);
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_cpuremove(&ctx, poolid, cpu);
+}
+
+int main_poolmigrate(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    char *dom;
+    uint32_t domid;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-migrate");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    dom = argv[optind++];
+    if (!dom) {
+       fprintf(stderr, "no domain specified\n");
+        help("pool-migrate");
+        return -ERROR_FAIL;
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-migrate");
+        return -ERROR_FAIL;
+    }
+
+    if (domain_qualifier_to_domid(dom, &domid, NULL) ||
+        !libxl_domid_to_name(&ctx, domid)) {
+        fprintf(stderr, "unknown domain \'%s\'\n", dom);
+        return -ERROR_FAIL;
+    }
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_movedomain(&ctx, poolid, domid);
+}
diff -r 3985fea87987 tools/libxl/xl_cmdtable.c
--- a/tools/libxl/xl_cmdtable.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/xl_cmdtable.c	Wed Sep 15 09:19:02 2010 +0200
@@ -338,6 +338,41 @@ struct cmd_spec cmd_table[] = {
       "destroy a domain's version 2 virtual network device",
       "<Domain> <DevId>",
     },
+    { "pool-create",
+      &main_poolcreate,
+      "Create a CPU pool based an ConfigFile",
+      "[options] <ConfigFile> [vars]",
+      "-h, --help                   Print this help.\n"
+      "-f=FILE, --defconfig=FILE    Use the given configuration file.\n"
+      "-n, --dryrun                 Dry run - prints the resulting configuration."
+    },
+    { "pool-list",
+      &main_poollist,
+      "List CPU pools on host",
+      "[-l|--long] [-c|--cpus] [<CPU Pool>]",
+      "-l, --long                     Output all CPU pool details.\n"
+      "-c, --cpus                     Output list of CPUs used by a pool"
+    },
+    { "pool-destroy",
+      &main_pooldestroy,
+      "Deactivates a CPU pool",
+      "<CPU Pool>",
+    },
+    { "pool-cpu-add",
+      &main_poolcpuadd,
+      "Adds a CPU to a CPU pool",
+      "<CPU Pool> <CPU nr>",
+    },
+    { "pool-cpu-remove",
+      &main_poolcpuremove,
+      "Removes a CPU from a CPU pool",
+      "<CPU Pool> <CPU nr>",
+    },
+    { "pool-migrate",
+      &main_poolmigrate,
+      "Moves a domain into a CPU pool",
+      "<Domain> <CPU Pool>",
+    },
 };
 
 int cmdtable_len = sizeof(cmd_table)/sizeof(struct cmd_spec);
diff -r 3985fea87987 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c	Wed Sep 15 09:19:02 2010 +0200
@@ -241,7 +241,7 @@ static PyObject *pyxc_vcpu_setaffinity(X
     if ( xc_physinfo(self->xc_handle, &info) != 0 )
         return pyxc_error_to_exception(self->xc_handle);
   
-    nr_cpus = info.nr_cpus;
+    nr_cpus = info.max_cpu_id + 1;
 
     size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
     cpumap = malloc(cpumap_size * size);
@@ -400,7 +400,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
 
     if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
         return pyxc_error_to_exception(self->xc_handle);
-    nr_cpus = pinfo.nr_cpus;
+    nr_cpus = pinfo.max_cpu_id + 1;
 
     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
     if ( rc < 0 )
@@ -1906,22 +1906,23 @@ static PyObject *pyxc_dom_set_memshr(XcO
     return zero;
 }
 
-static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
 {
     PyObject *cpulist = NULL;
-    uint32_t i;
+    int i;
 
     cpulist = PyList_New(0);
-    for ( i = 0; cpumap != 0; i++ )
+    for ( i = 0; i < cpusize; i++ )
     {
-        if ( cpumap & 1 )
+        if ( *cpumap & (1L << (i % 64)) )
         {
             PyObject* pyint = PyInt_FromLong(i);
 
             PyList_Append(cpulist, pyint);
             Py_DECREF(pyint);
         }
-        cpumap >>= 1;
+        if ( (i % 64) == 63 )
+            cpumap++;
     }
     return cpulist;
 }
@@ -1966,7 +1967,7 @@ static PyObject *pyxc_cpupool_getinfo(Xc
     PyObject *list, *info_dict;
 
     uint32_t first_pool = 0;
-    int max_pools = 1024, nr_pools, i;
+    int max_pools = 1024, i;
     xc_cpupoolinfo_t *info;
 
     static char *kwd_list[] = { "first_pool", "max_pools", NULL };
@@ -1975,38 +1976,31 @@ static PyObject *pyxc_cpupool_getinfo(Xc
                                       &first_pool, &max_pools) )
         return NULL;
 
-    info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
-    if (info == NULL)
-        return PyErr_NoMemory();
-
-    nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
-
-    if (nr_pools < 0)
+    list = PyList_New(0);
+    for (i = 0; i < max_pools; i++)
     {
-        free(info);
-        return pyxc_error_to_exception(self->xc_handle);
-    }
-
-    list = PyList_New(nr_pools);
-    for ( i = 0 ; i < nr_pools; i++ )
-    {
+        info = xc_cpupool_getinfo(self->xc_handle, first_pool);
+        if (info == NULL)
+            break;
         info_dict = Py_BuildValue(
             "{s:i,s:i,s:i,s:N}",
-            "cpupool",         (int)info[i].cpupool_id,
-            "sched",           info[i].sched_id,
-            "n_dom",           info[i].n_dom,
-            "cpulist",         cpumap_to_cpulist(info[i].cpumap));
+            "cpupool",         (int)info->cpupool_id,
+            "sched",           info->sched_id,
+            "n_dom",           info->n_dom,
+            "cpulist",         cpumap_to_cpulist(info->cpumap,
+                                                 info->cpumap_size));
+        first_pool = info->cpupool_id + 1;
+        free(info);
+
         if ( info_dict == NULL )
         {
             Py_DECREF(list);
-            if ( info_dict != NULL ) { Py_DECREF(info_dict); }
-            free(info);
             return NULL;
         }
-        PyList_SetItem(list, i, info_dict);
+
+        PyList_Append(list, info_dict);
+        Py_DECREF(info_dict);
     }
-
-    free(info);
 
     return list;
 }
@@ -2072,12 +2066,28 @@ static PyObject *pyxc_cpupool_movedomain
 
 static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
 {
-    uint64_t cpumap;
+    uint64_t *cpumap;
+    xc_physinfo_t physinfo;
+    int ret;
+    PyObject *info = NULL;
 
-    if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+    if (xc_physinfo(self->xc_handle, &physinfo))
         return pyxc_error_to_exception(self->xc_handle);
 
-    return cpumap_to_cpulist(cpumap);
+    cpumap = calloc((physinfo.max_cpu_id + 64) / 64, sizeof(uint64_t));
+    if (!cpumap) {
+        errno = -ENOMEM;
+        return PyErr_SetFromErrno(xc_error_obj);
+    }
+
+    ret = xc_cpupool_freeinfo(self->xc_handle, cpumap,
+                              (physinfo.max_cpu_id + 8) / 8);
+    if (!ret)
+        info = cpumap_to_cpulist(cpumap, physinfo.max_cpu_id + 1);
+
+    free(cpumap);
+
+    return ret ? pyxc_error_to_exception(self->xc_handle) : info;
 }
 
 static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15  7:26 [Patch][resend] implementation of cpupool support in xl Juergen Gross
@ 2010-09-15  8:29 ` Ian Campbell
  2010-09-15  8:45   ` Juergen Gross
  2010-09-16  5:58 ` Vasiliy G Tolstov
  2010-09-16 10:45 ` Ian Campbell
  2 siblings, 1 reply; 17+ messages in thread
From: Ian Campbell @ 2010-09-15  8:29 UTC (permalink / raw)
  To: Juergen Gross; +Cc: xen-devel

On Wed, 2010-09-15 at 08:26 +0100, Juergen Gross wrote:
> diff -r 3985fea87987 tools/libxl/libxl.idl
> --- a/tools/libxl/libxl.idl     Fri Sep 10 19:06:33 2010 +0100
> +++ b/tools/libxl/libxl.idl     Wed Sep 15 09:19:02 2010 +0200
> @@ -43,7 +43,11 @@ SHUTDOWN_* constant."""),
>      ], destructor_fn=None)
>  
>  libxl_poolinfo = Struct("poolinfo", [
> -    ("poolid", uint32)
> +    ("poolid",      uint32),
> +    ("sched_id",    uint32),
> +    ("n_dom",       uint32),
> +    ("cpumap_size", uint32),
> +    ("cpumap",      libxl_cpumap)
>      ], destructor_fn=None)
>  
>  libxl_vminfo = Struct("vminfo", [ 

Does the addition of the cpumap field here mean that we now need to
generate a destructor function (by removing destructor_fn=None) and call
it e.g. from main_pool*?

Would it make sense to turn libxl_cpumap into a struct containing both
the size and the data pointer?

> diff -r 3985fea87987 tools/libxl/libxl.h
> --- a/tools/libxl/libxl.h	Fri Sep 10 19:06:33 2010 +0100
> +++ b/tools/libxl/libxl.h	Wed Sep 15 09:19:02 2010 +0200
> @@ -471,6 +471,15 @@ int libxl_device_net2_del(libxl_ctx *ctx
>  int libxl_device_net2_del(libxl_ctx *ctx, libxl_device_net2 *net2,
>                            int wait);
>  
> +int libxl_get_freecpus(libxl_ctx *ctx, int *n_cpus, uint64_t **cpumap);
> +int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
> +                         uint64_t *cpumap, int n_cpus, libxl_uuid *uuid,
> +                         uint32_t *poolid);

Should these cpumap parameters be libxl_cpumap* or are they a different sort of cpumap?

Ian.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15  8:29 ` Ian Campbell
@ 2010-09-15  8:45   ` Juergen Gross
  2010-09-15  9:16     ` Ian Campbell
  0 siblings, 1 reply; 17+ messages in thread
From: Juergen Gross @ 2010-09-15  8:45 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel

On 09/15/10 10:29, Ian Campbell wrote:
> On Wed, 2010-09-15 at 08:26 +0100, Juergen Gross wrote:
>> diff -r 3985fea87987 tools/libxl/libxl.idl
>> --- a/tools/libxl/libxl.idl     Fri Sep 10 19:06:33 2010 +0100
>> +++ b/tools/libxl/libxl.idl     Wed Sep 15 09:19:02 2010 +0200
>> @@ -43,7 +43,11 @@ SHUTDOWN_* constant."""),
>>       ], destructor_fn=None)
>>
>>   libxl_poolinfo = Struct("poolinfo", [
>> -    ("poolid", uint32)
>> +    ("poolid",      uint32),
>> +    ("sched_id",    uint32),
>> +    ("n_dom",       uint32),
>> +    ("cpumap_size", uint32),
>> +    ("cpumap",      libxl_cpumap)
>>       ], destructor_fn=None)
>>
>>   libxl_vminfo = Struct("vminfo", [
>
> Does the addition of the cpumap field here mean that we now need to
> generate a destructor function (by removing destructor_fn=None) and call
> it e.g. from main_pool*?

I took care of this by allocating the space for the cpumap(s) together with
the poolinfo structure(s).
If you don't like this, a destructor would be the correct solution, I think.

> Would it make sense to turn libxl_cpumap into a struct containing both
> the size and the data pointer?

IMO this would make sense. You ALWAYS need the size of a cpumap to handle it
correctly.

>
>> diff -r 3985fea87987 tools/libxl/libxl.h
>> --- a/tools/libxl/libxl.h	Fri Sep 10 19:06:33 2010 +0100
>> +++ b/tools/libxl/libxl.h	Wed Sep 15 09:19:02 2010 +0200
>> @@ -471,6 +471,15 @@ int libxl_device_net2_del(libxl_ctx *ctx
>>   int libxl_device_net2_del(libxl_ctx *ctx, libxl_device_net2 *net2,
>>                             int wait);
>>
>> +int libxl_get_freecpus(libxl_ctx *ctx, int *n_cpus, uint64_t **cpumap);
>> +int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
>> +                         uint64_t *cpumap, int n_cpus, libxl_uuid *uuid,
>> +                         uint32_t *poolid);
>
> Should these cpumap parameters be libxl_cpumap* or are they a different sort of cpumap?

You are right, these should be libxl_cpumap.
I'll update the patch. It would be nice to know whether you are planning to
change libxl_cpumap to include the size or not.


Juergen

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15  8:45   ` Juergen Gross
@ 2010-09-15  9:16     ` Ian Campbell
  2010-09-15  9:23       ` Juergen Gross
  0 siblings, 1 reply; 17+ messages in thread
From: Ian Campbell @ 2010-09-15  9:16 UTC (permalink / raw)
  To: Juergen Gross; +Cc: xen-devel

On Wed, 2010-09-15 at 09:45 +0100, Juergen Gross wrote:
> On 09/15/10 10:29, Ian Campbell wrote:
> > On Wed, 2010-09-15 at 08:26 +0100, Juergen Gross wrote:
> >> diff -r 3985fea87987 tools/libxl/libxl.idl
> >> --- a/tools/libxl/libxl.idl     Fri Sep 10 19:06:33 2010 +0100
> >> +++ b/tools/libxl/libxl.idl     Wed Sep 15 09:19:02 2010 +0200
> >> @@ -43,7 +43,11 @@ SHUTDOWN_* constant."""),
> >>       ], destructor_fn=None)
> >>
> >>   libxl_poolinfo = Struct("poolinfo", [
> >> -    ("poolid", uint32)
> >> +    ("poolid",      uint32),
> >> +    ("sched_id",    uint32),
> >> +    ("n_dom",       uint32),
> >> +    ("cpumap_size", uint32),
> >> +    ("cpumap",      libxl_cpumap)
> >>       ], destructor_fn=None)
> >>
> >>   libxl_vminfo = Struct("vminfo", [
> >
> > Does the addition of the cpumap field here mean that we now need to
> > generate a destructor function (by removing destructor_fn=None) and call
> > it e.g. from main_pool*?
> 
> I took care of this by allocating the space for the cpumap(s) together with
> the poolinfo structure(s).
> If you don't like this, a destructor would be the correct solution, I think.

Personally I would prefer using the destructor style for consistency
with other libxl types.

> I'll update the patch. It would be nice to know whether you are planning to
> change libxl_cpumap to include the size or not.

I wasn't immediately planning on it but I can if you don't want to do it
as part of this patchset.

Thanks,
Ian.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15  9:16     ` Ian Campbell
@ 2010-09-15  9:23       ` Juergen Gross
  2010-09-15  9:37         ` Ian Campbell
  0 siblings, 1 reply; 17+ messages in thread
From: Juergen Gross @ 2010-09-15  9:23 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel

On 09/15/10 11:16, Ian Campbell wrote:
> On Wed, 2010-09-15 at 09:45 +0100, Juergen Gross wrote:
>> On 09/15/10 10:29, Ian Campbell wrote:
>>> On Wed, 2010-09-15 at 08:26 +0100, Juergen Gross wrote:
>>>> diff -r 3985fea87987 tools/libxl/libxl.idl
>>>> --- a/tools/libxl/libxl.idl     Fri Sep 10 19:06:33 2010 +0100
>>>> +++ b/tools/libxl/libxl.idl     Wed Sep 15 09:19:02 2010 +0200
>>>> @@ -43,7 +43,11 @@ SHUTDOWN_* constant."""),
>>>>        ], destructor_fn=None)
>>>>
>>>>    libxl_poolinfo = Struct("poolinfo", [
>>>> -    ("poolid", uint32)
>>>> +    ("poolid",      uint32),
>>>> +    ("sched_id",    uint32),
>>>> +    ("n_dom",       uint32),
>>>> +    ("cpumap_size", uint32),
>>>> +    ("cpumap",      libxl_cpumap)
>>>>        ], destructor_fn=None)
>>>>
>>>>    libxl_vminfo = Struct("vminfo", [
>>>
>>> Does the addition of the cpumap field here mean that we now need to
>>> generate a destructor function (by removing destructor_fn=None) and call
>>> it e.g. from main_pool*?
>>
>> I took care of this by allocating the space for the cpumap(s) together with
>> the poolinfo structure(s).
>> If you don't like this, a destructor would be the correct solution, I think.
>
> Personally I would prefer using the destructor style for consistency
> with other libxl types.

Okay, I'll change it accordingly.

>
>> I'll update the patch. It would be nice to know whether you are planning to
>> change libxl_cpumap to include the size or not.
>
> I wasn't immediately planning on it but I can if you don't want to do it
> as part of this patchset.

I think it shouldn't be included in this patch. :-)
And I'm not sure I'll get the generating of the bindings correctly.
So yes, please do it!


Juergen

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15  9:23       ` Juergen Gross
@ 2010-09-15  9:37         ` Ian Campbell
  2010-09-15 10:22           ` Ian Campbell
  0 siblings, 1 reply; 17+ messages in thread
From: Ian Campbell @ 2010-09-15  9:37 UTC (permalink / raw)
  To: Juergen Gross; +Cc: xen-devel

On Wed, 2010-09-15 at 10:23 +0100, Juergen Gross wrote:
> 
> And I'm not sure I'll get the generating of the bindings correctly.
> So yes, please do it! 

Will do.

>From looking at your patch it seems that the intention is that the size
is the number of bytes in the map, rather than the number of CPUs which
can be represented or the number of uint64_t's, is that right and/or
what you would like to use?

Ian.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15  9:37         ` Ian Campbell
@ 2010-09-15 10:22           ` Ian Campbell
  2010-09-15 11:28             ` Juergen Gross
  0 siblings, 1 reply; 17+ messages in thread
From: Ian Campbell @ 2010-09-15 10:22 UTC (permalink / raw)
  To: Juergen Gross; +Cc: xen-devel

On Wed, 2010-09-15 at 10:37 +0100, Ian Campbell wrote:
> On Wed, 2010-09-15 at 10:23 +0100, Juergen Gross wrote:
> > 
> > And I'm not sure I'll get the generating of the bindings correctly.
> > So yes, please do it! 
> 
> Will do.
> 
> >From looking at your patch it seems that the intention is that the size
> is the number of bytes in the map, rather than the number of CPUs which
> can be represented or the number of uint64_t's, is that right and/or
> what you would like to use?

Seems like libxc counts bytes but libxl (with your patch) counts 64 bit
words.

There doesn't seem to be any users of the size currently so I went with
bytes in the below (compile tested + examined generated code only) but
since your patch adds the first actual user feel free to change it if
that makes the libxl interface more suitable for your needs or whatever.

Ian.

diff -r 098790dd9327 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c	Thu Sep 09 17:59:33 2010 +0100
+++ b/tools/libxl/libxl.c	Wed Sep 15 11:19:35 2010 +0100
@@ -2924,8 +2924,9 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
 
     num_cpuwords = ((physinfo.max_cpu_id + 64) / 64);
     for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
-        ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap));
-        if (!ptr->cpumap) {
+        ptr->cpumap.size = num_cpuwords * sizeof(*ptr->cpumap.map);
+        ptr->cpumap.map = malloc(ptr->cpumap.size);
+        if (!ptr->cpumap.map) {
             return NULL;
         }
         if (xc_vcpu_getinfo(ctx->xch, domid, *nb_vcpu, &vcpuinfo) == -1) {
@@ -2933,7 +2934,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
             return NULL;
         }
         if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu,
-            ptr->cpumap, ((*nrcpus) + 7) / 8) == -1) {
+            ptr->cpumap.map, ((*nrcpus) + 7) / 8) == -1) {
             LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity");
             return NULL;
         }
diff -r 098790dd9327 tools/libxl/libxl.h
--- a/tools/libxl/libxl.h	Thu Sep 09 17:59:33 2010 +0100
+++ b/tools/libxl/libxl.h	Wed Sep 15 11:19:35 2010 +0100
@@ -138,8 +138,6 @@ typedef char **libxl_string_list;
 typedef char **libxl_string_list;
 
 typedef char **libxl_key_value_list;
-
-typedef uint64_t *libxl_cpumap;
 
 typedef uint32_t libxl_hwcap[8];
 
diff -r 098790dd9327 tools/libxl/libxl.idl
--- a/tools/libxl/libxl.idl	Thu Sep 09 17:59:33 2010 +0100
+++ b/tools/libxl/libxl.idl	Wed Sep 15 11:19:35 2010 +0100
@@ -14,8 +14,6 @@ libxl_nic_type = Builtin("nic_type")
 
 libxl_string_list = Builtin("string_list", destructor_fn="libxl_string_list_destroy", passby=PASS_BY_REFERENCE)
 libxl_key_value_list = Builtin("key_value_list", destructor_fn="libxl_key_value_list_destroy", passby=PASS_BY_REFERENCE)
-
-libxl_cpumap = Builtin("cpumap", destructor_fn="free")
 
 libxl_hwcap = Builtin("hwcap")
 
@@ -42,6 +40,11 @@ SHUTDOWN_* constant."""),
     ("vcpu_online", uint32),
     ], destructor_fn=None)
 
+libxl_cpumap = Struct("cpumap", [
+    ("size", uint32),
+    ("map",  Reference(uint64)),
+    ])
+                      
 libxl_poolinfo = Struct("poolinfo", [
     ("poolid", uint32)
     ], destructor_fn=None)
diff -r 098790dd9327 tools/libxl/libxltypes.py
--- a/tools/libxl/libxltypes.py	Thu Sep 09 17:59:33 2010 +0100
+++ b/tools/libxl/libxltypes.py	Wed Sep 15 11:19:35 2010 +0100
@@ -119,7 +119,10 @@ class Reference(Type):
         kwargs.setdefault('passby', PASS_BY_VALUE)
         
         kwargs.setdefault('namespace', ty.namespace)
-        typename = ty.typename[len(kwargs['namespace']):]
+
+        typename = ty.typename
+        if ty.namespace:
+            typename = typename[len(kwargs['namespace']):]
         Type.__init__(self, typename + " *", **kwargs)
 
 #
diff -r 098790dd9327 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c	Thu Sep 09 17:59:33 2010 +0100
+++ b/tools/libxl/xl_cmdimpl.c	Wed Sep 15 11:19:35 2010 +0100
@@ -3289,7 +3289,7 @@ static void print_vcpuinfo(uint32_t tdom
     printf("%9.1f  ", ((float)vcpuinfo->vcpu_time / 1e9));
     /* CPU AFFINITY */
     pcpumap = nr_cpus > 64 ? (uint64_t)-1 : ((1ULL << nr_cpus) - 1);
-    for (cpumap = vcpuinfo->cpumap; nr_cpus; ++cpumap) {
+    for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
         if (*cpumap < pcpumap) {
             break;
         }
@@ -3304,7 +3304,7 @@ static void print_vcpuinfo(uint32_t tdom
     if (!nr_cpus) {
         printf("any cpu\n");
     } else {
-        for (cpumap = vcpuinfo->cpumap; nr_cpus; ++cpumap) {
+        for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
             pcpumap = *cpumap;
             for (i = 0; !(pcpumap & 1); ++i, pcpumap >>= 1)
                 ;

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15 10:22           ` Ian Campbell
@ 2010-09-15 11:28             ` Juergen Gross
  2010-09-15 15:35               ` Ian Campbell
  0 siblings, 1 reply; 17+ messages in thread
From: Juergen Gross @ 2010-09-15 11:28 UTC (permalink / raw)
  To: Ian Campbell, xen-devel

[-- Attachment #1: Type: text/plain, Size: 1602 bytes --]

On 09/15/10 12:22, Ian Campbell wrote:
> On Wed, 2010-09-15 at 10:37 +0100, Ian Campbell wrote:
>> On Wed, 2010-09-15 at 10:23 +0100, Juergen Gross wrote:
>>>
>>> And I'm not sure I'll get the generating of the bindings correctly.
>>> So yes, please do it!
>>
>> Will do.
>>
>> > From looking at your patch it seems that the intention is that the size
>> is the number of bytes in the map, rather than the number of CPUs which
>> can be represented or the number of uint64_t's, is that right and/or
>> what you would like to use?
>
> Seems like libxc counts bytes but libxl (with your patch) counts 64 bit
> words.
>
> There doesn't seem to be any users of the size currently so I went with
> bytes in the below (compile tested + examined generated code only) but
> since your patch adds the first actual user feel free to change it if
> that makes the libxl interface more suitable for your needs or whatever.

Thanks, updated patch is attached.
I didn't add the destructor, because this would have made things much more
complicated as libxl_list_pool returns an array of libxl_poolinfo elements.
Doing only one free() at caller side is much easier than cycling through
the elements and free-ing all extra allocated cpumaps.


Juergen

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html

[-- Attachment #2: xl-pools.patch --]
[-- Type: text/x-patch, Size: 46198 bytes --]

Signed-off-by: juergen.gross@ts.fujitsu.com

diff -r 3985fea87987 tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxc/xc_cpupool.c	Wed Sep 15 13:14:35 2010 +0200
@@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface *
     return ret;
 }
 
+static int get_cpumap_size(xc_interface *xch)
+{
+    static int cpumap_size = 0;
+    xc_physinfo_t physinfo;
+
+    if ( cpumap_size )
+        return cpumap_size;
+
+    if ( !xc_physinfo(xch, &physinfo) )
+        cpumap_size = (physinfo.max_cpu_id + 8) / 8;
+
+    return cpumap_size;
+}
+
 int xc_cpupool_create(xc_interface *xch,
                       uint32_t *ppoolid,
                       uint32_t sched_id)
@@ -64,50 +78,56 @@ int xc_cpupool_destroy(xc_interface *xch
     return do_sysctl_save(xch, &sysctl);
 }
 
-int xc_cpupool_getinfo(xc_interface *xch, 
-                       uint32_t first_poolid,
-                       uint32_t n_max, 
-                       xc_cpupoolinfo_t *info)
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, 
+                       uint32_t poolid)
 {
     int err = 0;
-    int p;
-    uint32_t poolid = first_poolid;
-    uint8_t local[sizeof (info->cpumap)];
+    xc_cpupoolinfo_t *info;
+    uint8_t *local;
+    int local_size;
+    int cpumap_size;
+    int size;
     DECLARE_SYSCTL;
 
-    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+    local_size = get_cpumap_size(xch);
+    cpumap_size = (local_size + 7) / 8;
+    size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
+    info = malloc(size);
+    if ( !info )
+        return NULL;
 
-    for (p = 0; p < n_max; p++)
+    memset(info, 0, size);
+    info->cpumap_size = local_size * 8;
+    info->cpumap = (uint64_t *)(info + 1);
+    local = (uint8_t *)(info->cpumap + cpumap_size);
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
+
+    if ( (err = lock_pages(local, local_size)) != 0 )
     {
-        sysctl.cmd = XEN_SYSCTL_cpupool_op;
-        sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
-        sysctl.u.cpupool_op.cpupool_id = poolid;
-        set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-        sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+        PERROR("Could not lock memory for Xen hypercall");
+        free(info);
+        return NULL;
+    }
+    err = do_sysctl_save(xch, &sysctl);
+    unlock_pages(local, local_size);
 
-        if ( (err = lock_pages(local, sizeof(local))) != 0 )
-        {
-            PERROR("Could not lock memory for Xen hypercall");
-            break;
-        }
-        err = do_sysctl_save(xch, &sysctl);
-        unlock_pages(local, sizeof (local));
-
-        if ( err < 0 )
-            break;
-
-        info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
-        info->sched_id = sysctl.u.cpupool_op.sched_id;
-        info->n_dom = sysctl.u.cpupool_op.n_dom;
-        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
-        poolid = sysctl.u.cpupool_op.cpupool_id + 1;
-        info++;
+    if ( err < 0 )
+    {
+        free(info);
+        return NULL;
     }
 
-    if ( p == 0 )
-        return err;
+    info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+    info->sched_id = sysctl.u.cpupool_op.sched_id;
+    info->n_dom = sysctl.u.cpupool_op.n_dom;
+    bitmap_byte_to_64(info->cpumap, local, local_size * 8);
 
-    return p;
+    return info;
 }
 
 int xc_cpupool_addcpu(xc_interface *xch,
@@ -150,30 +170,37 @@ int xc_cpupool_movedomain(xc_interface *
 }
 
 int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap)
+                        uint64_t *cpumap,
+                        int cpusize)
 {
     int err;
-    uint8_t local[sizeof (*cpumap)];
+    uint8_t *local;
     DECLARE_SYSCTL;
+
+    local = malloc(cpusize);
+    if (local == NULL)
+        return -ENOMEM;
 
     sysctl.cmd = XEN_SYSCTL_cpupool_op;
     sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
     set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-    sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+    sysctl.u.cpupool_op.cpumap.nr_cpus = cpusize * 8;
 
-    if ( (err = lock_pages(local, sizeof(local))) != 0 )
+    if ( (err = lock_pages(local, cpusize)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
+        free(local);
         return err;
     }
 
     err = do_sysctl_save(xch, &sysctl);
-    unlock_pages(local, sizeof (local));
+    unlock_pages(local, cpusize);
+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+
+    free(local);
 
     if (err < 0)
         return err;
 
-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
-
     return 0;
 }
diff -r 3985fea87987 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxc/xenctrl.h	Wed Sep 15 13:14:35 2010 +0200
@@ -535,7 +535,8 @@ typedef struct xc_cpupoolinfo {
     uint32_t cpupool_id;
     uint32_t sched_id;
     uint32_t n_dom;
-    uint64_t cpumap;
+    uint32_t cpumap_size;    /* max number of cpus in map */
+    uint64_t *cpumap;
 } xc_cpupoolinfo_t;
 
 /**
@@ -564,15 +565,11 @@ int xc_cpupool_destroy(xc_interface *xch
  * Get cpupool info. Returns info for up to the specified number of cpupools
  * starting at the given id.
  * @parm xc_handle a handle to an open hypervisor interface
- * @parm first_poolid lowest id for which info is returned
- * @parm n_max maximum number of cpupools to return info
- * @parm info pointer to xc_cpupoolinfo_t array
- * return number of cpupool infos
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (obtained by malloc)
  */
-int xc_cpupool_getinfo(xc_interface *xch,
-                       uint32_t first_poolid,
-                       uint32_t n_max,
-                       xc_cpupoolinfo_t *info);
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+                       uint32_t poolid);
 
 /**
  * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
@@ -615,10 +612,12 @@ int xc_cpupool_movedomain(xc_interface *
  *
  * @parm xc_handle a handle to an open hypervisor interface
  * @parm cpumap pointer where to store the cpumap
+ * @parm cpusize size of cpumap array in bytes
  * return 0 on success, -1 on failure
  */
 int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 
 
 /*
diff -r 3985fea87987 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl.c	Wed Sep 15 13:14:35 2010 +0200
@@ -594,26 +594,46 @@ libxl_poolinfo * libxl_list_pool(libxl_c
 libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool)
 {
     libxl_poolinfo *ptr;
-    int i, ret;
-    xc_cpupoolinfo_t info[256];
-    int size = 256;
+    int i, m;
+    xc_cpupoolinfo_t *info;
+    int size;
+    uint32_t poolid;
+    libxl_physinfo physinfo;
+    int mapsize;
+    uint64_t *cpumap;
 
-    ptr = calloc(size, sizeof(libxl_poolinfo));
+    if (libxl_get_physinfo(ctx, &physinfo) != 0) {
+        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting phys info");
+        return NULL;
+    }
+    mapsize = (physinfo.max_cpu_id + 64) / 64;
+    size = physinfo.max_cpu_id + 32;
+
+    ptr = calloc(size, sizeof(libxl_poolinfo) + mapsize * sizeof(*cpumap));
     if (!ptr) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
         return NULL;
     }
+    cpumap = (uint64_t *)(ptr + size);
 
-    ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info);
-    if (ret<0) {
-        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting cpupool info");
-        return NULL;
+    poolid = 0;
+    for (i = 0; i < size; i++) {
+        info = xc_cpupool_getinfo(ctx->xch, poolid);
+        if (info == NULL)
+            break;
+        ptr[i].poolid = info->cpupool_id;
+        ptr[i].sched_id = info->sched_id;
+        ptr[i].n_dom = info->n_dom;
+        ptr[i].cpumap.size = mapsize * sizeof(*cpumap);
+        ptr[i].cpumap.map = cpumap;
+        for (m = 0; m < mapsize; m++)
+            cpumap[m] = info->cpumap[m];
+        cpumap += mapsize;
+        poolid = info->cpupool_id + 1;
+        free(info);
     }
 
-    for (i = 0; i < ret; i++) {
-        ptr[i].poolid = info[i].cpupool_id;
-    }
-    *nb_pool = ret;
+    *nb_pool = i;
     return ptr;
 }
 
@@ -2927,8 +2947,9 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
 
     num_cpuwords = ((physinfo.max_cpu_id + 64) / 64);
     for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
-        ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap));
-        if (!ptr->cpumap) {
+        ptr->cpumap.size = num_cpuwords * sizeof(*ptr->cpumap.map);
+        ptr->cpumap.map = malloc(ptr->cpumap.size);
+        if (!ptr->cpumap.map) {
             return NULL;
         }
         if (xc_vcpu_getinfo(ctx->xch, domid, *nb_vcpu, &vcpuinfo) == -1) {
@@ -2936,7 +2957,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
             return NULL;
         }
         if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu,
-            ptr->cpumap, ((*nrcpus) + 7) / 8) == -1) {
+            ptr->cpumap.map, ((*nrcpus) + 7) / 8) == -1) {
             LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity");
             return NULL;
         }
@@ -3340,3 +3361,182 @@ void libxl_file_reference_destroy(libxl_
     libxl__file_reference_unmap(f);
     free(f->path);
 }
+
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap)
+{
+    libxl_physinfo info;
+    int mapsize;
+
+    if (libxl_get_physinfo(ctx, &info) != 0)
+        return ERROR_FAIL;
+
+    mapsize = (info.max_cpu_id + 64) / 64;
+    cpumap->map = calloc(mapsize, sizeof(*cpumap->map));
+    if (!cpumap->map)
+        return ERROR_NOMEM;
+    cpumap->size = mapsize * sizeof(*cpumap->map);
+
+    if (xc_cpupool_freeinfo(ctx->xch, cpumap->map, cpumap->size)) {
+        free(cpumap->map);
+        cpumap->map = NULL;
+        return ERROR_FAIL;
+    }
+
+    return 0;
+}
+
+int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
+                         libxl_cpumap cpumap, libxl_uuid *uuid,
+                         uint32_t *poolid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc;
+    int i;
+    xs_transaction_t t;
+    char *uuid_string;
+
+    uuid_string = libxl__uuid2string(&gc, *uuid);
+    if (!uuid_string)
+        return ERROR_NOMEM;
+
+    rc = xc_cpupool_create(ctx->xch, poolid, schedid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+           "Could not create cpupool");
+        return ERROR_FAIL;
+    }
+
+    for (i = 0; i < cpumap.size * 8; i++)
+        if (cpumap.map[i / 64] & (1L << (i % 64))) {
+            rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
+            if (rc) {
+                LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+                    "Error moving cpu to cpupool");
+                return ERROR_FAIL;
+            }
+        }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_mkdir(ctx->xsh, t, libxl__sprintf(&gc, "/local/pool/%d", *poolid));
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "/local/pool/%d/uuid", *poolid),
+                 uuid_string);
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "/local/pool/%d/name", *poolid),
+                 name);
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            return 0;
+    }
+}
+
+int libxl_destroy_cpupool(libxl_ctx *ctx, uint32_t poolid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc, i;
+    xc_cpupoolinfo_t *info;
+    xs_transaction_t t;
+
+    info = xc_cpupool_getinfo(ctx->xch, poolid);
+    if (info == NULL)
+        return ERROR_NOMEM;
+
+    rc = ERROR_INVAL;
+    if ((info->cpupool_id != poolid) || (info->n_dom))
+        goto out;
+
+    for (i = 0; i < info->cpumap_size; i++)
+        if (info->cpumap[i / 64] & (1L << (i % 64))) {
+            rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
+            if (rc) {
+                LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+                    "Error removing cpu from cpupool");
+                rc = ERROR_FAIL;
+                goto out;
+            }
+        }
+
+    rc = xc_cpupool_destroy(ctx->xch, poolid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "Could not destroy cpupool");
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_rm(ctx->xsh, XBT_NULL, libxl__sprintf(&gc, "/local/pool/%d", poolid));
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            break;
+    }
+
+    rc = 0;
+
+out:
+    free(info);
+
+    return rc;
+}
+
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    int rc;
+
+    rc = xc_cpupool_addcpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error moving cpu to cpupool");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    int rc;
+
+    rc = xc_cpupool_removecpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error removing cpu from cpupool");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc;
+    char *dom_path;
+    char *vm_path;
+    char *poolname;
+    xs_transaction_t t;
+
+    dom_path = libxl__xs_get_dompath(&gc, domid);
+    if (!dom_path) {
+        return ERROR_FAIL;
+    }
+
+    rc = xc_cpupool_movedomain(ctx->xch, poolid, domid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error moving domain to cpupool");
+        return ERROR_FAIL;
+    }
+
+    poolname = libxl__poolid_to_name(&gc, poolid);
+    vm_path = libxl__xs_read(&gc, XBT_NULL, libxl__sprintf(&gc, "%s/vm", dom_path));
+    for (; vm_path;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "%s/pool_name", vm_path), poolname);
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            break;
+    }
+
+    return 0;
+}
diff -r 3985fea87987 tools/libxl/libxl.h
--- a/tools/libxl/libxl.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl.h	Wed Sep 15 13:14:35 2010 +0200
@@ -138,8 +138,6 @@ typedef char **libxl_string_list;
 typedef char **libxl_string_list;
 
 typedef char **libxl_key_value_list;
-
-typedef uint64_t *libxl_cpumap;
 
 typedef uint32_t libxl_hwcap[8];
 
@@ -471,6 +469,15 @@ int libxl_device_net2_del(libxl_ctx *ctx
 int libxl_device_net2_del(libxl_ctx *ctx, libxl_device_net2 *net2,
                           int wait);
 
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap);
+int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
+                         libxl_cpumap cpumap, libxl_uuid *uuid,
+                         uint32_t *poolid);
+int libxl_destroy_cpupool(libxl_ctx *ctx, uint32_t poolid);
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
+
 /* common paths */
 const char *libxl_sbindir_path(void);
 const char *libxl_bindir_path(void);
diff -r 3985fea87987 tools/libxl/libxl.idl
--- a/tools/libxl/libxl.idl	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl.idl	Wed Sep 15 13:14:35 2010 +0200
@@ -14,8 +14,6 @@ libxl_nic_type = Builtin("nic_type")
 
 libxl_string_list = Builtin("string_list", destructor_fn="libxl_string_list_destroy", passby=PASS_BY_REFERENCE)
 libxl_key_value_list = Builtin("key_value_list", destructor_fn="libxl_key_value_list_destroy", passby=PASS_BY_REFERENCE)
-
-libxl_cpumap = Builtin("cpumap", destructor_fn="free")
 
 libxl_hwcap = Builtin("hwcap")
 
@@ -42,8 +40,16 @@ SHUTDOWN_* constant."""),
     ("vcpu_online", uint32),
     ], destructor_fn=None)
 
+libxl_cpumap = Struct("cpumap", [
+    ("size", uint32),
+    ("map",  Reference(uint64)),
+    ])
+                      
 libxl_poolinfo = Struct("poolinfo", [
-    ("poolid", uint32)
+    ("poolid",      uint32),
+    ("sched_id",    uint32),
+    ("n_dom",       uint32),
+    ("cpumap",      libxl_cpumap)
     ], destructor_fn=None)
 
 libxl_vminfo = Struct("vminfo", [
diff -r 3985fea87987 tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl_utils.c	Wed Sep 15 13:14:35 2010 +0200
@@ -31,6 +31,17 @@
 #include "libxl_utils.h"
 #include "libxl_internal.h"
 
+struct schedid_name {
+    char *name;
+    int id;
+};
+
+static struct schedid_name schedid_name[] = {
+    { "credit", XEN_SCHEDULER_CREDIT },
+    { "sedf", XEN_SCHEDULER_SEDF },
+    { "credit2", XEN_SCHEDULER_CREDIT2 },
+    { NULL, -1 }
+};
 
 unsigned long libxl_get_required_shadow_memory(unsigned long maxmem_kb, unsigned int smp_cpus)
 {
@@ -137,6 +148,28 @@ int libxl_name_to_poolid(libxl_ctx *ctx,
     }
     free(poolinfo);
     return ret;
+}
+
+int libxl_name_to_schedid(libxl_ctx *ctx, const char *name)
+{
+    int i;
+
+    for (i = 0; schedid_name[i].name != NULL; i++)
+        if (strcmp(name, schedid_name[i].name) == 0)
+            return schedid_name[i].id;
+
+    return -1;
+}
+
+char *libxl_schedid_to_name(libxl_ctx *ctx, int schedid)
+{
+    int i;
+
+    for (i = 0; schedid_name[i].name != NULL; i++)
+        if (schedid_name[i].id == schedid)
+            return schedid_name[i].name;
+
+    return "unknown";
 }
 
 int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid)
diff -r 3985fea87987 tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxl_utils.h	Wed Sep 15 13:14:35 2010 +0200
@@ -23,6 +23,8 @@ char *libxl_domid_to_name(libxl_ctx *ctx
 char *libxl_domid_to_name(libxl_ctx *ctx, uint32_t domid);
 int libxl_name_to_poolid(libxl_ctx *ctx, const char *name, uint32_t *poolid);
 char *libxl_poolid_to_name(libxl_ctx *ctx, uint32_t poolid);
+int libxl_name_to_schedid(libxl_ctx *ctx, const char *name);
+char *libxl_schedid_to_name(libxl_ctx *ctx, int schedid);
 int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid);
 int libxl_is_stubdom(libxl_ctx *ctx, uint32_t domid, uint32_t *target_domid);
 int libxl_create_logfile(libxl_ctx *ctx, char *name, char **full_name);
diff -r 3985fea87987 tools/libxl/libxltypes.py
--- a/tools/libxl/libxltypes.py	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxltypes.py	Wed Sep 15 13:14:35 2010 +0200
@@ -119,7 +119,10 @@ class Reference(Type):
         kwargs.setdefault('passby', PASS_BY_VALUE)
         
         kwargs.setdefault('namespace', ty.namespace)
-        typename = ty.typename[len(kwargs['namespace']):]
+
+        typename = ty.typename
+        if ty.namespace:
+            typename = typename[len(kwargs['namespace']):]
         Type.__init__(self, typename + " *", **kwargs)
 
 #
diff -r 3985fea87987 tools/libxl/libxlu_cfg_l.c
--- a/tools/libxl/libxlu_cfg_l.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxlu_cfg_l.c	Wed Sep 15 13:14:35 2010 +0200
@@ -54,6 +54,7 @@ typedef unsigned char flex_uint8_t;
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -83,8 +84,6 @@ typedef unsigned int flex_uint32_t;
 #ifndef UINT32_MAX
 #define UINT32_MAX             (4294967295U)
 #endif
-
-#endif /* ! C99 */
 
 #endif /* ! FLEXINT_H */
 
@@ -159,15 +158,7 @@ typedef void* yyscan_t;
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k.
- * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
- * Ditto for the __ia64__ case accordingly.
- */
-#define YY_BUF_SIZE 32768
-#else
 #define YY_BUF_SIZE 16384
-#endif /* __ia64__ */
 #endif
 
 /* The state buf must be large enough to hold one state per character in the main buffer.
@@ -496,7 +487,7 @@ void xlu__cfg_yyset_column(int  column_n
 void xlu__cfg_yyset_column(int  column_no, yyscan_t yyscanner);
 
 
-#line 500 "libxlu_cfg_l.c"
+#line 491 "libxlu_cfg_l.c"
 
 #define INITIAL 0
 #define lexerr 1
@@ -632,12 +623,7 @@ static int input (yyscan_t yyscanner );
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k */
-#define YY_READ_BUF_SIZE 16384
-#else
 #define YY_READ_BUF_SIZE 8192
-#endif /* __ia64__ */
 #endif
 
 /* Copy whatever the last rule matched to the standard output. */
@@ -645,7 +631,7 @@ static int input (yyscan_t yyscanner );
 /* This used to be an fputs(), but since the string might contain NUL's,
  * we now use fwrite().
  */
-#define ECHO do { if (fwrite( yytext, yyleng, 1, yyout )) {} } while (0)
+#define ECHO fwrite( yytext, yyleng, 1, yyout )
 #endif
 
 /* Gets input and stuffs it into "buf".  number of characters read, or YY_NULL,
@@ -656,7 +642,7 @@ static int input (yyscan_t yyscanner );
 	if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
 		{ \
 		int c = '*'; \
-		size_t n; \
+		int n; \
 		for ( n = 0; n < max_size && \
 			     (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
 			buf[n] = (char) c; \
@@ -744,7 +730,7 @@ YY_DECL
 #line 37 "libxlu_cfg_l.l"
 
 
-#line 748 "libxlu_cfg_l.c"
+#line 734 "libxlu_cfg_l.c"
 
     yylval = yylval_param;
 
@@ -944,7 +930,7 @@ YY_RULE_SETUP
 #line 82 "libxlu_cfg_l.l"
 YY_FATAL_ERROR( "flex scanner jammed" );
 	YY_BREAK
-#line 948 "libxlu_cfg_l.c"
+#line 934 "libxlu_cfg_l.c"
 case YY_STATE_EOF(INITIAL):
 case YY_STATE_EOF(lexerr):
 	yyterminate();
@@ -1687,8 +1673,8 @@ YY_BUFFER_STATE xlu__cfg_yy_scan_string 
 
 /** Setup the input buffer state to scan the given bytes. The next call to xlu__cfg_yylex() will
  * scan from a @e copy of @a bytes.
- * @param yybytes the byte buffer to scan
- * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
+ * @param bytes the byte buffer to scan
+ * @param len the number of bytes in the buffer pointed to by @a bytes.
  * @param yyscanner The scanner object.
  * @return the newly allocated buffer state object.
  */
diff -r 3985fea87987 tools/libxl/libxlu_cfg_l.h
--- a/tools/libxl/libxlu_cfg_l.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/libxlu_cfg_l.h	Wed Sep 15 13:14:35 2010 +0200
@@ -58,6 +58,7 @@ typedef unsigned char flex_uint8_t;
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -87,8 +88,6 @@ typedef unsigned int flex_uint32_t;
 #ifndef UINT32_MAX
 #define UINT32_MAX             (4294967295U)
 #endif
-
-#endif /* ! C99 */
 
 #endif /* ! FLEXINT_H */
 
@@ -132,15 +131,7 @@ typedef void* yyscan_t;
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k.
- * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
- * Ditto for the __ia64__ case accordingly.
- */
-#define YY_BUF_SIZE 32768
-#else
 #define YY_BUF_SIZE 16384
-#endif /* __ia64__ */
 #endif
 
 #ifndef YY_TYPEDEF_YY_BUFFER_STATE
@@ -310,12 +301,7 @@ static int yy_flex_strlen (yyconst char 
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k */
-#define YY_READ_BUF_SIZE 16384
-#else
 #define YY_READ_BUF_SIZE 8192
-#endif /* __ia64__ */
 #endif
 
 /* Number of entries by which start-condition stack grows. */
@@ -352,6 +338,6 @@ extern int xlu__cfg_yylex \
 
 #line 82 "libxlu_cfg_l.l"
 
-#line 356 "libxlu_cfg_l.h"
+#line 342 "libxlu_cfg_l.h"
 #undef xlu__cfg_yyIN_HEADER
 #endif /* xlu__cfg_yyHEADER_H */
diff -r 3985fea87987 tools/libxl/xl.h
--- a/tools/libxl/xl.h	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/xl.h	Wed Sep 15 13:14:35 2010 +0200
@@ -79,6 +79,12 @@ int main_network2attach(int argc, char *
 int main_network2attach(int argc, char **argv);
 int main_network2list(int argc, char **argv);
 int main_network2detach(int argc, char **argv);
+int main_poolcreate(int argc, char **argv);
+int main_poollist(int argc, char **argv);
+int main_pooldestroy(int argc, char **argv);
+int main_poolcpuadd(int argc, char **argv);
+int main_poolcpuremove(int argc, char **argv);
+int main_poolmigrate(int argc, char **argv);
 
 void help(char *command);
 
diff -r 3985fea87987 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/xl_cmdimpl.c	Wed Sep 15 13:14:35 2010 +0200
@@ -420,7 +420,7 @@ static void printf_info(int domid,
     printf("\t(ssidref %d)\n", c_info->ssidref);
     printf("\t(name %s)\n", c_info->name);
     printf("\t(uuid " LIBXL_UUID_FMT ")\n", LIBXL_UUID_BYTES(c_info->uuid));
-    printf("\t(cpupool %s (%d))\n", c_info->poolname, c_info->poolid);
+    printf("\t(cpupool %s)\n", c_info->poolname);
     if (c_info->xsdata)
         printf("\t(xsdata contains data)\n");
     else
@@ -3283,7 +3283,7 @@ static void print_vcpuinfo(uint32_t tdom
     printf("%9.1f  ", ((float)vcpuinfo->vcpu_time / 1e9));
     /* CPU AFFINITY */
     pcpumap = nr_cpus > 64 ? (uint64_t)-1 : ((1ULL << nr_cpus) - 1);
-    for (cpumap = vcpuinfo->cpumap; nr_cpus; ++cpumap) {
+    for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
         if (*cpumap < pcpumap) {
             break;
         }
@@ -3298,7 +3298,7 @@ static void print_vcpuinfo(uint32_t tdom
     if (!nr_cpus) {
         printf("any cpu\n");
     } else {
-        for (cpumap = vcpuinfo->cpumap; nr_cpus; ++cpumap) {
+        for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
             pcpumap = *cpumap;
             for (i = 0; !(pcpumap & 1); ++i, pcpumap >>= 1)
                 ;
@@ -3569,10 +3569,7 @@ static void output_xeninfo(void)
     printf("xen_minor              : %d\n", info->xen_version_minor);
     printf("xen_extra              : %s\n", info->xen_version_extra);
     printf("xen_caps               : %s\n", info->capabilities);
-    printf("xen_scheduler          : %s\n",
-        sched_id == XEN_SCHEDULER_SEDF ? "sedf" :
-        sched_id == XEN_SCHEDULER_CREDIT ? "credit" :
-        sched_id == XEN_SCHEDULER_CREDIT2 ? "credit2" : "unknown");
+    printf("xen_scheduler          : %s\n", libxl_schedid_to_name(&ctx, sched_id));
     printf("xen_pagesize           : %lu\n", info->pagesize);
     printf("platform_params        : virt_start=0x%lx\n", info->virt_start);
     printf("xen_changeset          : %s\n", info->changeset);
@@ -3603,6 +3600,8 @@ static void output_physinfo(void)
     libxl_physinfo info;
     const libxl_version_info *vinfo;
     unsigned int i;
+    libxl_cpumap cpumap;
+    int n = 0;
 
     if (libxl_get_physinfo(&ctx, &info) != 0) {
         fprintf(stderr, "libxl_physinfo failed.\n");
@@ -3629,6 +3628,13 @@ static void output_physinfo(void)
         printf("total_memory           : %"PRIu64"\n", info.total_pages / i);
         printf("free_memory            : %"PRIu64"\n", info.free_pages / i);
     }
+    if (!libxl_get_freecpus(&ctx, &cpumap)) {
+        for (i = 0; i < cpumap.size * 8; i++)
+            if (cpumap.map[i / 64] & ((uint64_t)1 << (i % 64)))
+                n++;
+        printf("free_cpus              : %d\n", n);
+        free(cpumap.map);
+    }
 
     return;
 }
@@ -5044,3 +5050,444 @@ int main_tmem_freeable(int argc, char **
     printf("%d\n", mb);
     return 0;
 }
+
+int main_poolcreate(int argc, char **argv)
+{
+    char *filename = NULL;
+    char *p, extra_config[1024];
+    int dryrun = 0;
+    int opt;
+    int option_index = 0;
+    static struct option long_options[] = {
+        {"help", 0, 0, 'h'},
+        {"defconfig", 1, 0, 'f'},
+        {"dryrun", 0, 0, 'n'},
+        {0, 0, 0, 0}
+    };
+    int ret;
+    void *config_data = 0;
+    int config_len = 0;
+    XLU_Config *config;
+    const char *buf;
+    char *name, *sched;
+    uint32_t poolid;
+    int schedid = -1;
+    XLU_ConfigList *cpus;
+    int n_cpus, i, n;
+    libxl_cpumap freemap;
+    libxl_cpumap cpumap;
+    libxl_uuid uuid;
+
+    while (1) {
+        opt = getopt_long(argc, argv, "hnf:", long_options, &option_index);
+        if (opt == -1)
+            break;
+
+        switch (opt) {
+        case 'f':
+            filename = optarg;
+            break;
+        case 'h':
+            help("pool-create");
+            return 0;
+        case 'n':
+            dryrun = 1;
+            break;
+        default:
+            fprintf(stderr, "option not supported\n");
+            break;
+        }
+    }
+
+    memset(extra_config, 0, sizeof(extra_config));
+    while (optind < argc) {
+        if ((p = strchr(argv[optind], '='))) {
+            if (strlen(extra_config) + 1 < sizeof(extra_config)) {
+                if (strlen(extra_config))
+                    strcat(extra_config, "\n");
+                strcat(extra_config, argv[optind]);
+            }
+        } else if (!filename) {
+            filename = argv[optind];
+        } else {
+            help("pool-create");
+            return -ERROR_FAIL;
+        }
+        optind++;
+    }
+
+    if (!filename) {
+        help("pool-create");
+        return -ERROR_FAIL;
+    }
+
+    if (libxl_read_file_contents(&ctx, filename, &config_data, &config_len)) {
+        fprintf(stderr, "Failed to read config file: %s: %s\n",
+                filename, strerror(errno));
+        return -ERROR_FAIL;
+    }
+    if (strlen(extra_config)) {
+        if (config_len > INT_MAX - (strlen(extra_config) + 2)) {
+            fprintf(stderr, "Failed to attach extra configration\n");
+            return -ERROR_FAIL;
+        }
+        config_data = realloc(config_data,
+                              config_len + strlen(extra_config) + 2);
+        if (!config_data) {
+            fprintf(stderr, "Failed to realloc config_data\n");
+            return -ERROR_FAIL;
+        }
+        strcat(config_data, "\n");
+        strcat(config_data, extra_config);
+        strcat(config_data, "\n");
+        config_len += (strlen(extra_config) + 2);
+    }
+
+    config = xlu_cfg_init(stderr, filename);
+    if (!config) {
+        fprintf(stderr, "Failed to allocate for configuration\n");
+        return -ERROR_FAIL;
+    }
+
+    ret = xlu_cfg_readdata(config, config_data, config_len);
+    if (ret) {
+        fprintf(stderr, "Failed to parse config file: %s\n", strerror(ret));
+        return -ERROR_FAIL;
+    }
+
+    if (!xlu_cfg_get_string (config, "name", &buf))
+        name = strdup(buf);
+    else
+        name = basename(filename);
+    if (!libxl_name_to_poolid(&ctx, name, &poolid)) {
+        fprintf(stderr, "Pool name \"%s\" already exists\n", name);
+        return -ERROR_FAIL;
+    }
+
+    if (!xlu_cfg_get_string (config, "sched", &buf)) {
+        if ((schedid = libxl_name_to_schedid(&ctx, buf)) < 0) {
+            fprintf(stderr, "Unknown scheduler\n");
+            return -ERROR_FAIL;
+        }
+    } else {
+        if ((schedid = libxl_get_sched_id(&ctx)) < 0) {
+            fprintf(stderr, "get_sched_id sysctl failed.\n");
+            return -ERROR_FAIL;
+        }
+    }
+    sched = libxl_schedid_to_name(&ctx, schedid);
+
+    if (libxl_get_freecpus(&ctx, &freemap)) {
+        fprintf(stderr, "libxl_get_freecpus failed\n");
+        return -ERROR_FAIL;
+    }
+    cpumap.map = calloc(freemap.size, 1);
+    if (!cpumap.map) {
+        fprintf(stderr, "Failed to allocate cpumap\n");
+        return -ERROR_FAIL;
+    }
+    cpumap.size = freemap.size;
+    if (!xlu_cfg_get_list(config, "cpus", &cpus, 0)) {
+        n_cpus = 0;
+        while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
+            i = atoi(buf);
+            if ((i < 0) || (i >= freemap.size * 8) ||
+                !(freemap.map[i / 64] & (1L << (i % 64)))) {
+                fprintf(stderr, "cpu %d illegal or not free\n", i);
+                return -ERROR_FAIL;
+            }
+            cpumap.map[i / 64] |= 1L << (i % 64);
+            n_cpus++;
+        }
+    } else {
+        n_cpus = 1;
+        n = 0;
+        for (i = 0; i < freemap.size; i++)
+            if (freemap.map[i / 64] & (1L << (i % 64))) {
+                n++;
+                cpumap.map[i / 64] = 1L << (i % 64);
+                break;
+            }
+        if (n != n_cpus) {
+            fprintf(stderr, "no free cpu found\n");
+            return -ERROR_FAIL;
+        }
+    }
+
+    libxl_uuid_generate(&uuid);
+
+    printf("Using config file \"%s\"\n", filename);
+    printf("pool name:      %s\n", name);
+    printf("scheduler:      %s\n", sched);
+    printf("number of cpus: %d\n", n_cpus);
+
+    if (dryrun)
+        return 0;
+
+    poolid = 0;
+    if (libxl_create_cpupool(&ctx, name, schedid, cpumap, &uuid, &poolid)) {
+        fprintf(stderr, "error on creating cpupool\n");
+        return -ERROR_FAIL;
+    }
+
+    return 0;
+}
+
+int main_poollist(int argc, char **argv)
+{
+    int opt;
+    int option_index = 0;
+    static struct option long_options[] = {
+        {"help", 0, 0, 'h'},
+        {"long", 0, 0, 'l'},
+        {"cpus", 0, 0, 'c'},
+        {0, 0, 0, 0}
+    };
+    int opt_long = 0;
+    int opt_cpus = 0;
+    char *pool = NULL;
+    libxl_poolinfo *poolinfo;
+    int n_pools, p, c, n;
+    uint32_t poolid;
+    char *name;
+
+    while (1) {
+        opt = getopt_long(argc, argv, "hlc", long_options, &option_index);
+        if (opt == -1)
+            break;
+
+        switch (opt) {
+        case 'h':
+            help("pool-list");
+            return 0;
+        case 'l':
+            opt_long = 1;
+            break;
+        case 'c':
+            opt_cpus = 1;
+            break;
+        default:
+            fprintf(stderr, "option not supported\n");
+            break;
+        }
+    }
+
+    if ((optind + 1) < argc) {
+        help("pool-list");
+        return -ERROR_FAIL;
+    }
+    if (optind < argc) {
+        pool = argv[optind];
+        if (libxl_name_to_poolid(&ctx, pool, &poolid)) {
+            fprintf(stderr, "Pool \'%s\' does not exist\n", pool);
+            return -ERROR_FAIL;
+        }
+    }
+
+    poolinfo = libxl_list_pool(&ctx, &n_pools);
+    if (!poolinfo) {
+        fprintf(stderr, "error getting cpupool info\n");
+        return -ERROR_NOMEM;
+    }
+
+    if (!opt_long) {
+        printf("%-19s", "Name");
+        if (opt_cpus)
+            printf("CPU list\n");
+        else
+            printf("CPUs   Sched     Active   Domain count\n");
+    }
+
+    for (p = 0; p < n_pools; p++) {
+        if (pool && (poolinfo[p].poolid != poolid))
+            continue;
+        name = libxl_poolid_to_name(&ctx, poolinfo[p].poolid);
+        if (!name) {
+            fprintf(stderr, "error getting cpupool info\n");
+            return -ERROR_NOMEM;
+        }
+        if (opt_long) {
+            return -ERROR_NI;
+        } else {
+            printf("%-19s", name);
+            n = 0;
+            for (c = 0; c < poolinfo[p].cpumap.size * 8; c++)
+                if (poolinfo[p].cpumap.map[c / 64] & (1L << (c % 64))) {
+                    if (n && opt_cpus) printf(",");
+                    if (opt_cpus) printf("%d", c);
+                    n++;
+                }
+            if (!opt_cpus) {
+                printf("%3d %9s       y       %4d", n,
+                       libxl_schedid_to_name(&ctx, poolinfo[p].sched_id),
+                       poolinfo[p].n_dom);
+            }
+            printf("\n");
+        }
+    }
+
+    return 0;
+}
+
+int main_pooldestroy(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-destroy");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-destroy");
+        return -ERROR_FAIL;
+    }
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_destroy_cpupool(&ctx, poolid);
+}
+
+int main_poolcpuadd(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    int cpu;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-cpu-add");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-cpu-add");
+        return -ERROR_FAIL;
+    }
+
+    if (!argv[optind]) {
+        fprintf(stderr, "no cpu specified\n");
+        help("pool-cpu-add");
+        return -ERROR_FAIL;
+    }
+    cpu = atoi(argv[optind]);
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_cpuadd(&ctx, poolid, cpu);
+}
+
+int main_poolcpuremove(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    int cpu;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-cpu-remove");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-cpu-remove");
+        return -ERROR_FAIL;
+    }
+
+    if (!argv[optind]) {
+        fprintf(stderr, "no cpu specified\n");
+        help("pool-cpu-remove");
+        return -ERROR_FAIL;
+    }
+    cpu = atoi(argv[optind]);
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_cpuremove(&ctx, poolid, cpu);
+}
+
+int main_poolmigrate(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    char *dom;
+    uint32_t domid;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-migrate");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    dom = argv[optind++];
+    if (!dom) {
+       fprintf(stderr, "no domain specified\n");
+        help("pool-migrate");
+        return -ERROR_FAIL;
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-migrate");
+        return -ERROR_FAIL;
+    }
+
+    if (domain_qualifier_to_domid(dom, &domid, NULL) ||
+        !libxl_domid_to_name(&ctx, domid)) {
+        fprintf(stderr, "unknown domain \'%s\'\n", dom);
+        return -ERROR_FAIL;
+    }
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_movedomain(&ctx, poolid, domid);
+}
diff -r 3985fea87987 tools/libxl/xl_cmdtable.c
--- a/tools/libxl/xl_cmdtable.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/libxl/xl_cmdtable.c	Wed Sep 15 13:14:35 2010 +0200
@@ -338,6 +338,41 @@ struct cmd_spec cmd_table[] = {
       "destroy a domain's version 2 virtual network device",
       "<Domain> <DevId>",
     },
+    { "pool-create",
+      &main_poolcreate,
+      "Create a CPU pool based an ConfigFile",
+      "[options] <ConfigFile> [vars]",
+      "-h, --help                   Print this help.\n"
+      "-f=FILE, --defconfig=FILE    Use the given configuration file.\n"
+      "-n, --dryrun                 Dry run - prints the resulting configuration."
+    },
+    { "pool-list",
+      &main_poollist,
+      "List CPU pools on host",
+      "[-l|--long] [-c|--cpus] [<CPU Pool>]",
+      "-l, --long                     Output all CPU pool details.\n"
+      "-c, --cpus                     Output list of CPUs used by a pool"
+    },
+    { "pool-destroy",
+      &main_pooldestroy,
+      "Deactivates a CPU pool",
+      "<CPU Pool>",
+    },
+    { "pool-cpu-add",
+      &main_poolcpuadd,
+      "Adds a CPU to a CPU pool",
+      "<CPU Pool> <CPU nr>",
+    },
+    { "pool-cpu-remove",
+      &main_poolcpuremove,
+      "Removes a CPU from a CPU pool",
+      "<CPU Pool> <CPU nr>",
+    },
+    { "pool-migrate",
+      &main_poolmigrate,
+      "Moves a domain into a CPU pool",
+      "<Domain> <CPU Pool>",
+    },
 };
 
 int cmdtable_len = sizeof(cmd_table)/sizeof(struct cmd_spec);
diff -r 3985fea87987 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Fri Sep 10 19:06:33 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c	Wed Sep 15 13:14:35 2010 +0200
@@ -241,7 +241,7 @@ static PyObject *pyxc_vcpu_setaffinity(X
     if ( xc_physinfo(self->xc_handle, &info) != 0 )
         return pyxc_error_to_exception(self->xc_handle);
   
-    nr_cpus = info.nr_cpus;
+    nr_cpus = info.max_cpu_id + 1;
 
     size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
     cpumap = malloc(cpumap_size * size);
@@ -400,7 +400,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
 
     if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
         return pyxc_error_to_exception(self->xc_handle);
-    nr_cpus = pinfo.nr_cpus;
+    nr_cpus = pinfo.max_cpu_id + 1;
 
     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
     if ( rc < 0 )
@@ -1906,22 +1906,23 @@ static PyObject *pyxc_dom_set_memshr(XcO
     return zero;
 }
 
-static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
 {
     PyObject *cpulist = NULL;
-    uint32_t i;
+    int i;
 
     cpulist = PyList_New(0);
-    for ( i = 0; cpumap != 0; i++ )
+    for ( i = 0; i < cpusize; i++ )
     {
-        if ( cpumap & 1 )
+        if ( *cpumap & (1L << (i % 64)) )
         {
             PyObject* pyint = PyInt_FromLong(i);
 
             PyList_Append(cpulist, pyint);
             Py_DECREF(pyint);
         }
-        cpumap >>= 1;
+        if ( (i % 64) == 63 )
+            cpumap++;
     }
     return cpulist;
 }
@@ -1966,7 +1967,7 @@ static PyObject *pyxc_cpupool_getinfo(Xc
     PyObject *list, *info_dict;
 
     uint32_t first_pool = 0;
-    int max_pools = 1024, nr_pools, i;
+    int max_pools = 1024, i;
     xc_cpupoolinfo_t *info;
 
     static char *kwd_list[] = { "first_pool", "max_pools", NULL };
@@ -1975,38 +1976,31 @@ static PyObject *pyxc_cpupool_getinfo(Xc
                                       &first_pool, &max_pools) )
         return NULL;
 
-    info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
-    if (info == NULL)
-        return PyErr_NoMemory();
-
-    nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
-
-    if (nr_pools < 0)
+    list = PyList_New(0);
+    for (i = 0; i < max_pools; i++)
     {
-        free(info);
-        return pyxc_error_to_exception(self->xc_handle);
-    }
-
-    list = PyList_New(nr_pools);
-    for ( i = 0 ; i < nr_pools; i++ )
-    {
+        info = xc_cpupool_getinfo(self->xc_handle, first_pool);
+        if (info == NULL)
+            break;
         info_dict = Py_BuildValue(
             "{s:i,s:i,s:i,s:N}",
-            "cpupool",         (int)info[i].cpupool_id,
-            "sched",           info[i].sched_id,
-            "n_dom",           info[i].n_dom,
-            "cpulist",         cpumap_to_cpulist(info[i].cpumap));
+            "cpupool",         (int)info->cpupool_id,
+            "sched",           info->sched_id,
+            "n_dom",           info->n_dom,
+            "cpulist",         cpumap_to_cpulist(info->cpumap,
+                                                 info->cpumap_size));
+        first_pool = info->cpupool_id + 1;
+        free(info);
+
         if ( info_dict == NULL )
         {
             Py_DECREF(list);
-            if ( info_dict != NULL ) { Py_DECREF(info_dict); }
-            free(info);
             return NULL;
         }
-        PyList_SetItem(list, i, info_dict);
+
+        PyList_Append(list, info_dict);
+        Py_DECREF(info_dict);
     }
-
-    free(info);
 
     return list;
 }
@@ -2072,12 +2066,28 @@ static PyObject *pyxc_cpupool_movedomain
 
 static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
 {
-    uint64_t cpumap;
+    uint64_t *cpumap;
+    xc_physinfo_t physinfo;
+    int ret;
+    PyObject *info = NULL;
 
-    if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+    if (xc_physinfo(self->xc_handle, &physinfo))
         return pyxc_error_to_exception(self->xc_handle);
 
-    return cpumap_to_cpulist(cpumap);
+    cpumap = calloc((physinfo.max_cpu_id + 64) / 64, sizeof(uint64_t));
+    if (!cpumap) {
+        errno = -ENOMEM;
+        return PyErr_SetFromErrno(xc_error_obj);
+    }
+
+    ret = xc_cpupool_freeinfo(self->xc_handle, cpumap,
+                              (physinfo.max_cpu_id + 8) / 8);
+    if (!ret)
+        info = cpumap_to_cpulist(cpumap, physinfo.max_cpu_id + 1);
+
+    free(cpumap);
+
+    return ret ? pyxc_error_to_exception(self->xc_handle) : info;
 }
 
 static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15 11:28             ` Juergen Gross
@ 2010-09-15 15:35               ` Ian Campbell
  2010-09-16  5:48                 ` Juergen Gross
  0 siblings, 1 reply; 17+ messages in thread
From: Ian Campbell @ 2010-09-15 15:35 UTC (permalink / raw)
  To: Juergen Gross; +Cc: xen-devel

On Wed, 2010-09-15 at 12:28 +0100, Juergen Gross wrote:
> On 09/15/10 12:22, Ian Campbell wrote:
> > On Wed, 2010-09-15 at 10:37 +0100, Ian Campbell wrote:
> >> On Wed, 2010-09-15 at 10:23 +0100, Juergen Gross wrote:
> >>>
> >>> And I'm not sure I'll get the generating of the bindings correctly.
> >>> So yes, please do it!
> >>
> >> Will do.
> >>
> >> > From looking at your patch it seems that the intention is that the size
> >> is the number of bytes in the map, rather than the number of CPUs which
> >> can be represented or the number of uint64_t's, is that right and/or
> >> what you would like to use?
> >
> > Seems like libxc counts bytes but libxl (with your patch) counts 64 bit
> > words.
> >
> > There doesn't seem to be any users of the size currently so I went with
> > bytes in the below (compile tested + examined generated code only) but
> > since your patch adds the first actual user feel free to change it if
> > that makes the libxl interface more suitable for your needs or whatever.
> 
> Thanks, updated patch is attached.
> I didn't add the destructor, because this would have made things much more
> complicated as libxl_list_pool returns an array of libxl_poolinfo elements.
> Doing only one free() at caller side is much easier than cycling through
> the elements and free-ing all extra allocated cpumaps.

I'm afraid I think you do need to cycle through the elements calling
libxl_poolinfo_destroy on each, this is consistent with how all other
libxl types are handled and means that users of the library are isolated
from a certain amount of change to the underlying types.

Often you can destroy each element as you walk over the list doing
whatever it is you needed the list for, for example see how
xl_cmdimpl.c:print_domain_vcpuinfo does it. libxl_list_vcpu has
basically the same semantics as libxl_list_pool should have.

Ian.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15 15:35               ` Ian Campbell
@ 2010-09-16  5:48                 ` Juergen Gross
  2010-09-16 17:24                   ` Ian Jackson
  0 siblings, 1 reply; 17+ messages in thread
From: Juergen Gross @ 2010-09-16  5:48 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel

[-- Attachment #1: Type: text/plain, Size: 981 bytes --]

On 09/15/10 17:35, Ian Campbell wrote:
> I'm afraid I think you do need to cycle through the elements calling
> libxl_poolinfo_destroy on each, this is consistent with how all other
> libxl types are handled and means that users of the library are isolated
> from a certain amount of change to the underlying types.
>
> Often you can destroy each element as you walk over the list doing
> whatever it is you needed the list for, for example see how
> xl_cmdimpl.c:print_domain_vcpuinfo does it. libxl_list_vcpu has
> basically the same semantics as libxl_list_pool should have.

Okay, new patch attached.


Juergen

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html

[-- Attachment #2: xl-pools.patch --]
[-- Type: text/x-patch, Size: 47064 bytes --]

Signed-off-by: juergen.gross@ts.fujitsu.com

diff -r 5393151a737b tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxc/xc_cpupool.c	Thu Sep 16 07:46:23 2010 +0200
@@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface *
     return ret;
 }
 
+static int get_cpumap_size(xc_interface *xch)
+{
+    static int cpumap_size = 0;
+    xc_physinfo_t physinfo;
+
+    if ( cpumap_size )
+        return cpumap_size;
+
+    if ( !xc_physinfo(xch, &physinfo) )
+        cpumap_size = (physinfo.max_cpu_id + 8) / 8;
+
+    return cpumap_size;
+}
+
 int xc_cpupool_create(xc_interface *xch,
                       uint32_t *ppoolid,
                       uint32_t sched_id)
@@ -64,50 +78,56 @@ int xc_cpupool_destroy(xc_interface *xch
     return do_sysctl_save(xch, &sysctl);
 }
 
-int xc_cpupool_getinfo(xc_interface *xch, 
-                       uint32_t first_poolid,
-                       uint32_t n_max, 
-                       xc_cpupoolinfo_t *info)
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, 
+                       uint32_t poolid)
 {
     int err = 0;
-    int p;
-    uint32_t poolid = first_poolid;
-    uint8_t local[sizeof (info->cpumap)];
+    xc_cpupoolinfo_t *info;
+    uint8_t *local;
+    int local_size;
+    int cpumap_size;
+    int size;
     DECLARE_SYSCTL;
 
-    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+    local_size = get_cpumap_size(xch);
+    cpumap_size = (local_size + 7) / 8;
+    size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
+    info = malloc(size);
+    if ( !info )
+        return NULL;
 
-    for (p = 0; p < n_max; p++)
+    memset(info, 0, size);
+    info->cpumap_size = local_size * 8;
+    info->cpumap = (uint64_t *)(info + 1);
+    local = (uint8_t *)(info->cpumap + cpumap_size);
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
+
+    if ( (err = lock_pages(local, local_size)) != 0 )
     {
-        sysctl.cmd = XEN_SYSCTL_cpupool_op;
-        sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
-        sysctl.u.cpupool_op.cpupool_id = poolid;
-        set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-        sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+        PERROR("Could not lock memory for Xen hypercall");
+        free(info);
+        return NULL;
+    }
+    err = do_sysctl_save(xch, &sysctl);
+    unlock_pages(local, local_size);
 
-        if ( (err = lock_pages(local, sizeof(local))) != 0 )
-        {
-            PERROR("Could not lock memory for Xen hypercall");
-            break;
-        }
-        err = do_sysctl_save(xch, &sysctl);
-        unlock_pages(local, sizeof (local));
-
-        if ( err < 0 )
-            break;
-
-        info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
-        info->sched_id = sysctl.u.cpupool_op.sched_id;
-        info->n_dom = sysctl.u.cpupool_op.n_dom;
-        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
-        poolid = sysctl.u.cpupool_op.cpupool_id + 1;
-        info++;
+    if ( err < 0 )
+    {
+        free(info);
+        return NULL;
     }
 
-    if ( p == 0 )
-        return err;
+    info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+    info->sched_id = sysctl.u.cpupool_op.sched_id;
+    info->n_dom = sysctl.u.cpupool_op.n_dom;
+    bitmap_byte_to_64(info->cpumap, local, local_size * 8);
 
-    return p;
+    return info;
 }
 
 int xc_cpupool_addcpu(xc_interface *xch,
@@ -150,30 +170,37 @@ int xc_cpupool_movedomain(xc_interface *
 }
 
 int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap)
+                        uint64_t *cpumap,
+                        int cpusize)
 {
     int err;
-    uint8_t local[sizeof (*cpumap)];
+    uint8_t *local;
     DECLARE_SYSCTL;
+
+    local = malloc(cpusize);
+    if (local == NULL)
+        return -ENOMEM;
 
     sysctl.cmd = XEN_SYSCTL_cpupool_op;
     sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
     set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-    sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+    sysctl.u.cpupool_op.cpumap.nr_cpus = cpusize * 8;
 
-    if ( (err = lock_pages(local, sizeof(local))) != 0 )
+    if ( (err = lock_pages(local, cpusize)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
+        free(local);
         return err;
     }
 
     err = do_sysctl_save(xch, &sysctl);
-    unlock_pages(local, sizeof (local));
+    unlock_pages(local, cpusize);
+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+
+    free(local);
 
     if (err < 0)
         return err;
 
-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
-
     return 0;
 }
diff -r 5393151a737b tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxc/xenctrl.h	Thu Sep 16 07:46:23 2010 +0200
@@ -535,7 +535,8 @@ typedef struct xc_cpupoolinfo {
     uint32_t cpupool_id;
     uint32_t sched_id;
     uint32_t n_dom;
-    uint64_t cpumap;
+    uint32_t cpumap_size;    /* max number of cpus in map */
+    uint64_t *cpumap;
 } xc_cpupoolinfo_t;
 
 /**
@@ -564,15 +565,11 @@ int xc_cpupool_destroy(xc_interface *xch
  * Get cpupool info. Returns info for up to the specified number of cpupools
  * starting at the given id.
  * @parm xc_handle a handle to an open hypervisor interface
- * @parm first_poolid lowest id for which info is returned
- * @parm n_max maximum number of cpupools to return info
- * @parm info pointer to xc_cpupoolinfo_t array
- * return number of cpupool infos
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (obtained by malloc)
  */
-int xc_cpupool_getinfo(xc_interface *xch,
-                       uint32_t first_poolid,
-                       uint32_t n_max,
-                       xc_cpupoolinfo_t *info);
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+                       uint32_t poolid);
 
 /**
  * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
@@ -615,10 +612,12 @@ int xc_cpupool_movedomain(xc_interface *
  *
  * @parm xc_handle a handle to an open hypervisor interface
  * @parm cpumap pointer where to store the cpumap
+ * @parm cpusize size of cpumap array in bytes
  * return 0 on success, -1 on failure
  */
 int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 
 
 /*
diff -r 5393151a737b tools/libxl/libxl.c
--- a/tools/libxl/libxl.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl.c	Thu Sep 16 07:46:23 2010 +0200
@@ -594,9 +594,19 @@ libxl_poolinfo * libxl_list_pool(libxl_c
 libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool)
 {
     libxl_poolinfo *ptr;
-    int i, ret;
-    xc_cpupoolinfo_t info[256];
-    int size = 256;
+    int i, m;
+    xc_cpupoolinfo_t *info;
+    int size;
+    uint32_t poolid;
+    libxl_physinfo physinfo;
+    int mapsize;
+
+    if (libxl_get_physinfo(ctx, &physinfo) != 0) {
+        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting phys info");
+        return NULL;
+    }
+    mapsize = (physinfo.max_cpu_id + 64) / 64;
+    size = physinfo.max_cpu_id + 32;
 
     ptr = calloc(size, sizeof(libxl_poolinfo));
     if (!ptr) {
@@ -604,16 +614,25 @@ libxl_poolinfo * libxl_list_pool(libxl_c
         return NULL;
     }
 
-    ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info);
-    if (ret<0) {
-        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting cpupool info");
-        return NULL;
+    poolid = 0;
+    for (i = 0; i < size; i++) {
+        info = xc_cpupool_getinfo(ctx->xch, poolid);
+        if (info == NULL)
+            break;
+        ptr[i].poolid = info->cpupool_id;
+        ptr[i].sched_id = info->sched_id;
+        ptr[i].n_dom = info->n_dom;
+        ptr[i].cpumap.size = mapsize * sizeof(*ptr[i].cpumap.map);
+        ptr[i].cpumap.map = calloc(mapsize, sizeof(*ptr[i].cpumap.map));
+        if (!ptr[i].cpumap.map)
+            break;
+        for (m = 0; m < mapsize; m++)
+            ptr[i].cpumap.map[m] = info->cpumap[m];
+        poolid = info->cpupool_id + 1;
+        free(info);
     }
 
-    for (i = 0; i < ret; i++) {
-        ptr[i].poolid = info[i].cpupool_id;
-    }
-    *nb_pool = ret;
+    *nb_pool = i;
     return ptr;
 }
 
@@ -2927,8 +2946,9 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
 
     num_cpuwords = ((physinfo.max_cpu_id + 64) / 64);
     for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
-        ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap));
-        if (!ptr->cpumap) {
+        ptr->cpumap.size = num_cpuwords * sizeof(*ptr->cpumap.map);
+        ptr->cpumap.map = malloc(ptr->cpumap.size);
+        if (!ptr->cpumap.map) {
             return NULL;
         }
         if (xc_vcpu_getinfo(ctx->xch, domid, *nb_vcpu, &vcpuinfo) == -1) {
@@ -2936,7 +2956,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
             return NULL;
         }
         if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu,
-            ptr->cpumap, ((*nrcpus) + 7) / 8) == -1) {
+            ptr->cpumap.map, ((*nrcpus) + 7) / 8) == -1) {
             LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity");
             return NULL;
         }
@@ -3340,3 +3360,182 @@ void libxl_file_reference_destroy(libxl_
     libxl__file_reference_unmap(f);
     free(f->path);
 }
+
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap)
+{
+    libxl_physinfo info;
+    int mapsize;
+
+    if (libxl_get_physinfo(ctx, &info) != 0)
+        return ERROR_FAIL;
+
+    mapsize = (info.max_cpu_id + 64) / 64;
+    cpumap->map = calloc(mapsize, sizeof(*cpumap->map));
+    if (!cpumap->map)
+        return ERROR_NOMEM;
+    cpumap->size = mapsize * sizeof(*cpumap->map);
+
+    if (xc_cpupool_freeinfo(ctx->xch, cpumap->map, cpumap->size)) {
+        free(cpumap->map);
+        cpumap->map = NULL;
+        return ERROR_FAIL;
+    }
+
+    return 0;
+}
+
+int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
+                         libxl_cpumap cpumap, libxl_uuid *uuid,
+                         uint32_t *poolid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc;
+    int i;
+    xs_transaction_t t;
+    char *uuid_string;
+
+    uuid_string = libxl__uuid2string(&gc, *uuid);
+    if (!uuid_string)
+        return ERROR_NOMEM;
+
+    rc = xc_cpupool_create(ctx->xch, poolid, schedid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+           "Could not create cpupool");
+        return ERROR_FAIL;
+    }
+
+    for (i = 0; i < cpumap.size * 8; i++)
+        if (cpumap.map[i / 64] & (1L << (i % 64))) {
+            rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
+            if (rc) {
+                LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+                    "Error moving cpu to cpupool");
+                return ERROR_FAIL;
+            }
+        }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_mkdir(ctx->xsh, t, libxl__sprintf(&gc, "/local/pool/%d", *poolid));
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "/local/pool/%d/uuid", *poolid),
+                 uuid_string);
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "/local/pool/%d/name", *poolid),
+                 name);
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            return 0;
+    }
+}
+
+int libxl_destroy_cpupool(libxl_ctx *ctx, uint32_t poolid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc, i;
+    xc_cpupoolinfo_t *info;
+    xs_transaction_t t;
+
+    info = xc_cpupool_getinfo(ctx->xch, poolid);
+    if (info == NULL)
+        return ERROR_NOMEM;
+
+    rc = ERROR_INVAL;
+    if ((info->cpupool_id != poolid) || (info->n_dom))
+        goto out;
+
+    for (i = 0; i < info->cpumap_size; i++)
+        if (info->cpumap[i / 64] & (1L << (i % 64))) {
+            rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
+            if (rc) {
+                LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+                    "Error removing cpu from cpupool");
+                rc = ERROR_FAIL;
+                goto out;
+            }
+        }
+
+    rc = xc_cpupool_destroy(ctx->xch, poolid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "Could not destroy cpupool");
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_rm(ctx->xsh, XBT_NULL, libxl__sprintf(&gc, "/local/pool/%d", poolid));
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            break;
+    }
+
+    rc = 0;
+
+out:
+    free(info);
+
+    return rc;
+}
+
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    int rc;
+
+    rc = xc_cpupool_addcpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error moving cpu to cpupool");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    int rc;
+
+    rc = xc_cpupool_removecpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error removing cpu from cpupool");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc;
+    char *dom_path;
+    char *vm_path;
+    char *poolname;
+    xs_transaction_t t;
+
+    dom_path = libxl__xs_get_dompath(&gc, domid);
+    if (!dom_path) {
+        return ERROR_FAIL;
+    }
+
+    rc = xc_cpupool_movedomain(ctx->xch, poolid, domid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error moving domain to cpupool");
+        return ERROR_FAIL;
+    }
+
+    poolname = libxl__poolid_to_name(&gc, poolid);
+    vm_path = libxl__xs_read(&gc, XBT_NULL, libxl__sprintf(&gc, "%s/vm", dom_path));
+    for (; vm_path;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "%s/pool_name", vm_path), poolname);
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            break;
+    }
+
+    return 0;
+}
diff -r 5393151a737b tools/libxl/libxl.h
--- a/tools/libxl/libxl.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl.h	Thu Sep 16 07:46:23 2010 +0200
@@ -138,8 +138,6 @@ typedef char **libxl_string_list;
 typedef char **libxl_string_list;
 
 typedef char **libxl_key_value_list;
-
-typedef uint64_t *libxl_cpumap;
 
 typedef uint32_t libxl_hwcap[8];
 
@@ -471,6 +469,15 @@ int libxl_device_net2_del(libxl_ctx *ctx
 int libxl_device_net2_del(libxl_ctx *ctx, libxl_device_net2 *net2,
                           int wait);
 
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap);
+int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
+                         libxl_cpumap cpumap, libxl_uuid *uuid,
+                         uint32_t *poolid);
+int libxl_destroy_cpupool(libxl_ctx *ctx, uint32_t poolid);
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
+
 /* common paths */
 const char *libxl_sbindir_path(void);
 const char *libxl_bindir_path(void);
diff -r 5393151a737b tools/libxl/libxl.idl
--- a/tools/libxl/libxl.idl	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl.idl	Thu Sep 16 07:46:23 2010 +0200
@@ -14,8 +14,6 @@ libxl_nic_type = Builtin("nic_type")
 
 libxl_string_list = Builtin("string_list", destructor_fn="libxl_string_list_destroy", passby=PASS_BY_REFERENCE)
 libxl_key_value_list = Builtin("key_value_list", destructor_fn="libxl_key_value_list_destroy", passby=PASS_BY_REFERENCE)
-
-libxl_cpumap = Builtin("cpumap", destructor_fn="free")
 
 libxl_hwcap = Builtin("hwcap")
 
@@ -42,9 +40,17 @@ SHUTDOWN_* constant."""),
     ("vcpu_online", uint32),
     ], destructor_fn=None)
 
+libxl_cpumap = Struct("cpumap", [
+    ("size", uint32),
+    ("map",  Reference(uint64)),
+    ])
+                      
 libxl_poolinfo = Struct("poolinfo", [
-    ("poolid", uint32)
-    ], destructor_fn=None)
+    ("poolid",      uint32),
+    ("sched_id",    uint32),
+    ("n_dom",       uint32),
+    ("cpumap",      libxl_cpumap)
+    ], destructor_fn="libxl_poolinfo_destroy")
 
 libxl_vminfo = Struct("vminfo", [
     ("uuid", libxl_uuid),
diff -r 5393151a737b tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl_utils.c	Thu Sep 16 07:46:23 2010 +0200
@@ -31,6 +31,17 @@
 #include "libxl_utils.h"
 #include "libxl_internal.h"
 
+struct schedid_name {
+    char *name;
+    int id;
+};
+
+static struct schedid_name schedid_name[] = {
+    { "credit", XEN_SCHEDULER_CREDIT },
+    { "sedf", XEN_SCHEDULER_SEDF },
+    { "credit2", XEN_SCHEDULER_CREDIT2 },
+    { NULL, -1 }
+};
 
 unsigned long libxl_get_required_shadow_memory(unsigned long maxmem_kb, unsigned int smp_cpus)
 {
@@ -124,19 +135,41 @@ int libxl_name_to_poolid(libxl_ctx *ctx,
         return ERROR_NOMEM;
 
     for (i = 0; i < nb_pools; i++) {
-        poolname = libxl_poolid_to_name(ctx, poolinfo[i].poolid);
-        if (!poolname)
-            continue;
-        if (strcmp(poolname, name) == 0) {
-            *poolid = poolinfo[i].poolid;
-            ret = 0;
+        if (ret && ((poolname = libxl_poolid_to_name(ctx,
+            poolinfo[i].poolid)) != NULL)) {
+            if (strcmp(poolname, name) == 0) {
+                *poolid = poolinfo[i].poolid;
+                ret = 0;
+                free(poolname);
+            }
             free(poolname);
-            break;
         }
-        free(poolname);
+        libxl_poolinfo_destroy(poolinfo + i);
     }
     free(poolinfo);
     return ret;
+}
+
+int libxl_name_to_schedid(libxl_ctx *ctx, const char *name)
+{
+    int i;
+
+    for (i = 0; schedid_name[i].name != NULL; i++)
+        if (strcmp(name, schedid_name[i].name) == 0)
+            return schedid_name[i].id;
+
+    return -1;
+}
+
+char *libxl_schedid_to_name(libxl_ctx *ctx, int schedid)
+{
+    int i;
+
+    for (i = 0; schedid_name[i].name != NULL; i++)
+        if (schedid_name[i].id == schedid)
+            return schedid_name[i].name;
+
+    return "unknown";
 }
 
 int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid)
diff -r 5393151a737b tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl_utils.h	Thu Sep 16 07:46:23 2010 +0200
@@ -23,6 +23,8 @@ char *libxl_domid_to_name(libxl_ctx *ctx
 char *libxl_domid_to_name(libxl_ctx *ctx, uint32_t domid);
 int libxl_name_to_poolid(libxl_ctx *ctx, const char *name, uint32_t *poolid);
 char *libxl_poolid_to_name(libxl_ctx *ctx, uint32_t poolid);
+int libxl_name_to_schedid(libxl_ctx *ctx, const char *name);
+char *libxl_schedid_to_name(libxl_ctx *ctx, int schedid);
 int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid);
 int libxl_is_stubdom(libxl_ctx *ctx, uint32_t domid, uint32_t *target_domid);
 int libxl_create_logfile(libxl_ctx *ctx, char *name, char **full_name);
diff -r 5393151a737b tools/libxl/libxltypes.py
--- a/tools/libxl/libxltypes.py	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxltypes.py	Thu Sep 16 07:46:23 2010 +0200
@@ -119,7 +119,10 @@ class Reference(Type):
         kwargs.setdefault('passby', PASS_BY_VALUE)
         
         kwargs.setdefault('namespace', ty.namespace)
-        typename = ty.typename[len(kwargs['namespace']):]
+
+        typename = ty.typename
+        if ty.namespace:
+            typename = typename[len(kwargs['namespace']):]
         Type.__init__(self, typename + " *", **kwargs)
 
 #
diff -r 5393151a737b tools/libxl/libxlu_cfg_l.c
--- a/tools/libxl/libxlu_cfg_l.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxlu_cfg_l.c	Thu Sep 16 07:46:23 2010 +0200
@@ -54,6 +54,7 @@ typedef unsigned char flex_uint8_t;
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -83,8 +84,6 @@ typedef unsigned int flex_uint32_t;
 #ifndef UINT32_MAX
 #define UINT32_MAX             (4294967295U)
 #endif
-
-#endif /* ! C99 */
 
 #endif /* ! FLEXINT_H */
 
@@ -159,15 +158,7 @@ typedef void* yyscan_t;
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k.
- * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
- * Ditto for the __ia64__ case accordingly.
- */
-#define YY_BUF_SIZE 32768
-#else
 #define YY_BUF_SIZE 16384
-#endif /* __ia64__ */
 #endif
 
 /* The state buf must be large enough to hold one state per character in the main buffer.
@@ -496,7 +487,7 @@ void xlu__cfg_yyset_column(int  column_n
 void xlu__cfg_yyset_column(int  column_no, yyscan_t yyscanner);
 
 
-#line 500 "libxlu_cfg_l.c"
+#line 491 "libxlu_cfg_l.c"
 
 #define INITIAL 0
 #define lexerr 1
@@ -632,12 +623,7 @@ static int input (yyscan_t yyscanner );
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k */
-#define YY_READ_BUF_SIZE 16384
-#else
 #define YY_READ_BUF_SIZE 8192
-#endif /* __ia64__ */
 #endif
 
 /* Copy whatever the last rule matched to the standard output. */
@@ -645,7 +631,7 @@ static int input (yyscan_t yyscanner );
 /* This used to be an fputs(), but since the string might contain NUL's,
  * we now use fwrite().
  */
-#define ECHO do { if (fwrite( yytext, yyleng, 1, yyout )) {} } while (0)
+#define ECHO fwrite( yytext, yyleng, 1, yyout )
 #endif
 
 /* Gets input and stuffs it into "buf".  number of characters read, or YY_NULL,
@@ -656,7 +642,7 @@ static int input (yyscan_t yyscanner );
 	if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
 		{ \
 		int c = '*'; \
-		size_t n; \
+		int n; \
 		for ( n = 0; n < max_size && \
 			     (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
 			buf[n] = (char) c; \
@@ -744,7 +730,7 @@ YY_DECL
 #line 37 "libxlu_cfg_l.l"
 
 
-#line 748 "libxlu_cfg_l.c"
+#line 734 "libxlu_cfg_l.c"
 
     yylval = yylval_param;
 
@@ -944,7 +930,7 @@ YY_RULE_SETUP
 #line 82 "libxlu_cfg_l.l"
 YY_FATAL_ERROR( "flex scanner jammed" );
 	YY_BREAK
-#line 948 "libxlu_cfg_l.c"
+#line 934 "libxlu_cfg_l.c"
 case YY_STATE_EOF(INITIAL):
 case YY_STATE_EOF(lexerr):
 	yyterminate();
@@ -1687,8 +1673,8 @@ YY_BUFFER_STATE xlu__cfg_yy_scan_string 
 
 /** Setup the input buffer state to scan the given bytes. The next call to xlu__cfg_yylex() will
  * scan from a @e copy of @a bytes.
- * @param yybytes the byte buffer to scan
- * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
+ * @param bytes the byte buffer to scan
+ * @param len the number of bytes in the buffer pointed to by @a bytes.
  * @param yyscanner The scanner object.
  * @return the newly allocated buffer state object.
  */
diff -r 5393151a737b tools/libxl/libxlu_cfg_l.h
--- a/tools/libxl/libxlu_cfg_l.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxlu_cfg_l.h	Thu Sep 16 07:46:23 2010 +0200
@@ -58,6 +58,7 @@ typedef unsigned char flex_uint8_t;
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -87,8 +88,6 @@ typedef unsigned int flex_uint32_t;
 #ifndef UINT32_MAX
 #define UINT32_MAX             (4294967295U)
 #endif
-
-#endif /* ! C99 */
 
 #endif /* ! FLEXINT_H */
 
@@ -132,15 +131,7 @@ typedef void* yyscan_t;
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k.
- * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
- * Ditto for the __ia64__ case accordingly.
- */
-#define YY_BUF_SIZE 32768
-#else
 #define YY_BUF_SIZE 16384
-#endif /* __ia64__ */
 #endif
 
 #ifndef YY_TYPEDEF_YY_BUFFER_STATE
@@ -310,12 +301,7 @@ static int yy_flex_strlen (yyconst char 
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k */
-#define YY_READ_BUF_SIZE 16384
-#else
 #define YY_READ_BUF_SIZE 8192
-#endif /* __ia64__ */
 #endif
 
 /* Number of entries by which start-condition stack grows. */
@@ -352,6 +338,6 @@ extern int xlu__cfg_yylex \
 
 #line 82 "libxlu_cfg_l.l"
 
-#line 356 "libxlu_cfg_l.h"
+#line 342 "libxlu_cfg_l.h"
 #undef xlu__cfg_yyIN_HEADER
 #endif /* xlu__cfg_yyHEADER_H */
diff -r 5393151a737b tools/libxl/xl.h
--- a/tools/libxl/xl.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/xl.h	Thu Sep 16 07:46:23 2010 +0200
@@ -79,6 +79,12 @@ int main_network2attach(int argc, char *
 int main_network2attach(int argc, char **argv);
 int main_network2list(int argc, char **argv);
 int main_network2detach(int argc, char **argv);
+int main_poolcreate(int argc, char **argv);
+int main_poollist(int argc, char **argv);
+int main_pooldestroy(int argc, char **argv);
+int main_poolcpuadd(int argc, char **argv);
+int main_poolcpuremove(int argc, char **argv);
+int main_poolmigrate(int argc, char **argv);
 
 void help(char *command);
 
diff -r 5393151a737b tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/xl_cmdimpl.c	Thu Sep 16 07:46:23 2010 +0200
@@ -420,7 +420,7 @@ static void printf_info(int domid,
     printf("\t(ssidref %d)\n", c_info->ssidref);
     printf("\t(name %s)\n", c_info->name);
     printf("\t(uuid " LIBXL_UUID_FMT ")\n", LIBXL_UUID_BYTES(c_info->uuid));
-    printf("\t(cpupool %s (%d))\n", c_info->poolname, c_info->poolid);
+    printf("\t(cpupool %s)\n", c_info->poolname);
     if (c_info->xsdata)
         printf("\t(xsdata contains data)\n");
     else
@@ -3286,7 +3286,7 @@ static void print_vcpuinfo(uint32_t tdom
     printf("%9.1f  ", ((float)vcpuinfo->vcpu_time / 1e9));
     /* CPU AFFINITY */
     pcpumap = nr_cpus > 64 ? (uint64_t)-1 : ((1ULL << nr_cpus) - 1);
-    for (cpumap = vcpuinfo->cpumap; nr_cpus; ++cpumap) {
+    for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
         if (*cpumap < pcpumap) {
             break;
         }
@@ -3301,7 +3301,7 @@ static void print_vcpuinfo(uint32_t tdom
     if (!nr_cpus) {
         printf("any cpu\n");
     } else {
-        for (cpumap = vcpuinfo->cpumap; nr_cpus; ++cpumap) {
+        for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
             pcpumap = *cpumap;
             for (i = 0; !(pcpumap & 1); ++i, pcpumap >>= 1)
                 ;
@@ -3572,10 +3572,7 @@ static void output_xeninfo(void)
     printf("xen_minor              : %d\n", info->xen_version_minor);
     printf("xen_extra              : %s\n", info->xen_version_extra);
     printf("xen_caps               : %s\n", info->capabilities);
-    printf("xen_scheduler          : %s\n",
-        sched_id == XEN_SCHEDULER_SEDF ? "sedf" :
-        sched_id == XEN_SCHEDULER_CREDIT ? "credit" :
-        sched_id == XEN_SCHEDULER_CREDIT2 ? "credit2" : "unknown");
+    printf("xen_scheduler          : %s\n", libxl_schedid_to_name(&ctx, sched_id));
     printf("xen_pagesize           : %lu\n", info->pagesize);
     printf("platform_params        : virt_start=0x%lx\n", info->virt_start);
     printf("xen_changeset          : %s\n", info->changeset);
@@ -3606,6 +3603,8 @@ static void output_physinfo(void)
     libxl_physinfo info;
     const libxl_version_info *vinfo;
     unsigned int i;
+    libxl_cpumap cpumap;
+    int n = 0;
 
     if (libxl_get_physinfo(&ctx, &info) != 0) {
         fprintf(stderr, "libxl_physinfo failed.\n");
@@ -3632,6 +3631,13 @@ static void output_physinfo(void)
         printf("total_memory           : %"PRIu64"\n", info.total_pages / i);
         printf("free_memory            : %"PRIu64"\n", info.free_pages / i);
     }
+    if (!libxl_get_freecpus(&ctx, &cpumap)) {
+        for (i = 0; i < cpumap.size * 8; i++)
+            if (cpumap.map[i / 64] & ((uint64_t)1 << (i % 64)))
+                n++;
+        printf("free_cpus              : %d\n", n);
+        free(cpumap.map);
+    }
 
     return;
 }
@@ -5047,3 +5053,446 @@ int main_tmem_freeable(int argc, char **
     printf("%d\n", mb);
     return 0;
 }
+
+int main_poolcreate(int argc, char **argv)
+{
+    char *filename = NULL;
+    char *p, extra_config[1024];
+    int dryrun = 0;
+    int opt;
+    int option_index = 0;
+    static struct option long_options[] = {
+        {"help", 0, 0, 'h'},
+        {"defconfig", 1, 0, 'f'},
+        {"dryrun", 0, 0, 'n'},
+        {0, 0, 0, 0}
+    };
+    int ret;
+    void *config_data = 0;
+    int config_len = 0;
+    XLU_Config *config;
+    const char *buf;
+    char *name, *sched;
+    uint32_t poolid;
+    int schedid = -1;
+    XLU_ConfigList *cpus;
+    int n_cpus, i, n;
+    libxl_cpumap freemap;
+    libxl_cpumap cpumap;
+    libxl_uuid uuid;
+
+    while (1) {
+        opt = getopt_long(argc, argv, "hnf:", long_options, &option_index);
+        if (opt == -1)
+            break;
+
+        switch (opt) {
+        case 'f':
+            filename = optarg;
+            break;
+        case 'h':
+            help("pool-create");
+            return 0;
+        case 'n':
+            dryrun = 1;
+            break;
+        default:
+            fprintf(stderr, "option not supported\n");
+            break;
+        }
+    }
+
+    memset(extra_config, 0, sizeof(extra_config));
+    while (optind < argc) {
+        if ((p = strchr(argv[optind], '='))) {
+            if (strlen(extra_config) + 1 < sizeof(extra_config)) {
+                if (strlen(extra_config))
+                    strcat(extra_config, "\n");
+                strcat(extra_config, argv[optind]);
+            }
+        } else if (!filename) {
+            filename = argv[optind];
+        } else {
+            help("pool-create");
+            return -ERROR_FAIL;
+        }
+        optind++;
+    }
+
+    if (!filename) {
+        help("pool-create");
+        return -ERROR_FAIL;
+    }
+
+    if (libxl_read_file_contents(&ctx, filename, &config_data, &config_len)) {
+        fprintf(stderr, "Failed to read config file: %s: %s\n",
+                filename, strerror(errno));
+        return -ERROR_FAIL;
+    }
+    if (strlen(extra_config)) {
+        if (config_len > INT_MAX - (strlen(extra_config) + 2)) {
+            fprintf(stderr, "Failed to attach extra configration\n");
+            return -ERROR_FAIL;
+        }
+        config_data = realloc(config_data,
+                              config_len + strlen(extra_config) + 2);
+        if (!config_data) {
+            fprintf(stderr, "Failed to realloc config_data\n");
+            return -ERROR_FAIL;
+        }
+        strcat(config_data, "\n");
+        strcat(config_data, extra_config);
+        strcat(config_data, "\n");
+        config_len += (strlen(extra_config) + 2);
+    }
+
+    config = xlu_cfg_init(stderr, filename);
+    if (!config) {
+        fprintf(stderr, "Failed to allocate for configuration\n");
+        return -ERROR_FAIL;
+    }
+
+    ret = xlu_cfg_readdata(config, config_data, config_len);
+    if (ret) {
+        fprintf(stderr, "Failed to parse config file: %s\n", strerror(ret));
+        return -ERROR_FAIL;
+    }
+
+    if (!xlu_cfg_get_string (config, "name", &buf))
+        name = strdup(buf);
+    else
+        name = basename(filename);
+    if (!libxl_name_to_poolid(&ctx, name, &poolid)) {
+        fprintf(stderr, "Pool name \"%s\" already exists\n", name);
+        return -ERROR_FAIL;
+    }
+
+    if (!xlu_cfg_get_string (config, "sched", &buf)) {
+        if ((schedid = libxl_name_to_schedid(&ctx, buf)) < 0) {
+            fprintf(stderr, "Unknown scheduler\n");
+            return -ERROR_FAIL;
+        }
+    } else {
+        if ((schedid = libxl_get_sched_id(&ctx)) < 0) {
+            fprintf(stderr, "get_sched_id sysctl failed.\n");
+            return -ERROR_FAIL;
+        }
+    }
+    sched = libxl_schedid_to_name(&ctx, schedid);
+
+    if (libxl_get_freecpus(&ctx, &freemap)) {
+        fprintf(stderr, "libxl_get_freecpus failed\n");
+        return -ERROR_FAIL;
+    }
+    cpumap.map = calloc(freemap.size, 1);
+    if (!cpumap.map) {
+        fprintf(stderr, "Failed to allocate cpumap\n");
+        return -ERROR_FAIL;
+    }
+    cpumap.size = freemap.size;
+    if (!xlu_cfg_get_list(config, "cpus", &cpus, 0)) {
+        n_cpus = 0;
+        while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
+            i = atoi(buf);
+            if ((i < 0) || (i >= freemap.size * 8) ||
+                !(freemap.map[i / 64] & (1L << (i % 64)))) {
+                fprintf(stderr, "cpu %d illegal or not free\n", i);
+                return -ERROR_FAIL;
+            }
+            cpumap.map[i / 64] |= 1L << (i % 64);
+            n_cpus++;
+        }
+    } else {
+        n_cpus = 1;
+        n = 0;
+        for (i = 0; i < freemap.size; i++)
+            if (freemap.map[i / 64] & (1L << (i % 64))) {
+                n++;
+                cpumap.map[i / 64] = 1L << (i % 64);
+                break;
+            }
+        if (n != n_cpus) {
+            fprintf(stderr, "no free cpu found\n");
+            return -ERROR_FAIL;
+        }
+    }
+
+    libxl_uuid_generate(&uuid);
+
+    printf("Using config file \"%s\"\n", filename);
+    printf("pool name:      %s\n", name);
+    printf("scheduler:      %s\n", sched);
+    printf("number of cpus: %d\n", n_cpus);
+
+    if (dryrun)
+        return 0;
+
+    poolid = 0;
+    if (libxl_create_cpupool(&ctx, name, schedid, cpumap, &uuid, &poolid)) {
+        fprintf(stderr, "error on creating cpupool\n");
+        return -ERROR_FAIL;
+    }
+
+    return 0;
+}
+
+int main_poollist(int argc, char **argv)
+{
+    int opt;
+    int option_index = 0;
+    static struct option long_options[] = {
+        {"help", 0, 0, 'h'},
+        {"long", 0, 0, 'l'},
+        {"cpus", 0, 0, 'c'},
+        {0, 0, 0, 0}
+    };
+    int opt_long = 0;
+    int opt_cpus = 0;
+    char *pool = NULL;
+    libxl_poolinfo *poolinfo;
+    int n_pools, p, c, n;
+    uint32_t poolid;
+    char *name;
+    int ret = 0;
+
+    while (1) {
+        opt = getopt_long(argc, argv, "hlc", long_options, &option_index);
+        if (opt == -1)
+            break;
+
+        switch (opt) {
+        case 'h':
+            help("pool-list");
+            return 0;
+        case 'l':
+            opt_long = 1;
+            break;
+        case 'c':
+            opt_cpus = 1;
+            break;
+        default:
+            fprintf(stderr, "option not supported\n");
+            break;
+        }
+    }
+
+    if ((optind + 1) < argc) {
+        help("pool-list");
+        return -ERROR_FAIL;
+    }
+    if (optind < argc) {
+        pool = argv[optind];
+        if (libxl_name_to_poolid(&ctx, pool, &poolid)) {
+            fprintf(stderr, "Pool \'%s\' does not exist\n", pool);
+            return -ERROR_FAIL;
+        }
+    }
+
+    poolinfo = libxl_list_pool(&ctx, &n_pools);
+    if (!poolinfo) {
+        fprintf(stderr, "error getting cpupool info\n");
+        return -ERROR_NOMEM;
+    }
+
+    if (!opt_long) {
+        printf("%-19s", "Name");
+        if (opt_cpus)
+            printf("CPU list\n");
+        else
+            printf("CPUs   Sched     Active   Domain count\n");
+    }
+
+    for (p = 0; p < n_pools; p++) {
+        if (!ret && (!pool || (poolinfo[p].poolid != poolid))) {
+            name = libxl_poolid_to_name(&ctx, poolinfo[p].poolid);
+            if (!name) {
+                fprintf(stderr, "error getting cpupool info\n");
+                ret = -ERROR_NOMEM;
+            }
+            else if (opt_long) {
+                ret = -ERROR_NI;
+            } else {
+                printf("%-19s", name);
+                n = 0;
+                for (c = 0; c < poolinfo[p].cpumap.size * 8; c++)
+                    if (poolinfo[p].cpumap.map[c / 64] & (1L << (c % 64))) {
+                        if (n && opt_cpus) printf(",");
+                        if (opt_cpus) printf("%d", c);
+                        n++;
+                    }
+                if (!opt_cpus) {
+                    printf("%3d %9s       y       %4d", n,
+                           libxl_schedid_to_name(&ctx, poolinfo[p].sched_id),
+                           poolinfo[p].n_dom);
+                }
+                printf("\n");
+            }
+        }
+        libxl_poolinfo_destroy(poolinfo + p);
+    }
+
+    return ret;
+}
+
+int main_pooldestroy(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-destroy");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-destroy");
+        return -ERROR_FAIL;
+    }
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_destroy_cpupool(&ctx, poolid);
+}
+
+int main_poolcpuadd(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    int cpu;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-cpu-add");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-cpu-add");
+        return -ERROR_FAIL;
+    }
+
+    if (!argv[optind]) {
+        fprintf(stderr, "no cpu specified\n");
+        help("pool-cpu-add");
+        return -ERROR_FAIL;
+    }
+    cpu = atoi(argv[optind]);
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_cpuadd(&ctx, poolid, cpu);
+}
+
+int main_poolcpuremove(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    int cpu;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-cpu-remove");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-cpu-remove");
+        return -ERROR_FAIL;
+    }
+
+    if (!argv[optind]) {
+        fprintf(stderr, "no cpu specified\n");
+        help("pool-cpu-remove");
+        return -ERROR_FAIL;
+    }
+    cpu = atoi(argv[optind]);
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_cpuremove(&ctx, poolid, cpu);
+}
+
+int main_poolmigrate(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    char *dom;
+    uint32_t domid;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-migrate");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    dom = argv[optind++];
+    if (!dom) {
+       fprintf(stderr, "no domain specified\n");
+        help("pool-migrate");
+        return -ERROR_FAIL;
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-migrate");
+        return -ERROR_FAIL;
+    }
+
+    if (domain_qualifier_to_domid(dom, &domid, NULL) ||
+        !libxl_domid_to_name(&ctx, domid)) {
+        fprintf(stderr, "unknown domain \'%s\'\n", dom);
+        return -ERROR_FAIL;
+    }
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_movedomain(&ctx, poolid, domid);
+}
diff -r 5393151a737b tools/libxl/xl_cmdtable.c
--- a/tools/libxl/xl_cmdtable.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/xl_cmdtable.c	Thu Sep 16 07:46:23 2010 +0200
@@ -338,6 +338,41 @@ struct cmd_spec cmd_table[] = {
       "destroy a domain's version 2 virtual network device",
       "<Domain> <DevId>",
     },
+    { "pool-create",
+      &main_poolcreate,
+      "Create a CPU pool based an ConfigFile",
+      "[options] <ConfigFile> [vars]",
+      "-h, --help                   Print this help.\n"
+      "-f=FILE, --defconfig=FILE    Use the given configuration file.\n"
+      "-n, --dryrun                 Dry run - prints the resulting configuration."
+    },
+    { "pool-list",
+      &main_poollist,
+      "List CPU pools on host",
+      "[-l|--long] [-c|--cpus] [<CPU Pool>]",
+      "-l, --long                     Output all CPU pool details.\n"
+      "-c, --cpus                     Output list of CPUs used by a pool"
+    },
+    { "pool-destroy",
+      &main_pooldestroy,
+      "Deactivates a CPU pool",
+      "<CPU Pool>",
+    },
+    { "pool-cpu-add",
+      &main_poolcpuadd,
+      "Adds a CPU to a CPU pool",
+      "<CPU Pool> <CPU nr>",
+    },
+    { "pool-cpu-remove",
+      &main_poolcpuremove,
+      "Removes a CPU from a CPU pool",
+      "<CPU Pool> <CPU nr>",
+    },
+    { "pool-migrate",
+      &main_poolmigrate,
+      "Moves a domain into a CPU pool",
+      "<Domain> <CPU Pool>",
+    },
 };
 
 int cmdtable_len = sizeof(cmd_table)/sizeof(struct cmd_spec);
diff -r 5393151a737b tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c	Thu Sep 16 07:46:23 2010 +0200
@@ -241,7 +241,7 @@ static PyObject *pyxc_vcpu_setaffinity(X
     if ( xc_physinfo(self->xc_handle, &info) != 0 )
         return pyxc_error_to_exception(self->xc_handle);
   
-    nr_cpus = info.nr_cpus;
+    nr_cpus = info.max_cpu_id + 1;
 
     size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
     cpumap = malloc(cpumap_size * size);
@@ -400,7 +400,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
 
     if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
         return pyxc_error_to_exception(self->xc_handle);
-    nr_cpus = pinfo.nr_cpus;
+    nr_cpus = pinfo.max_cpu_id + 1;
 
     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
     if ( rc < 0 )
@@ -1906,22 +1906,23 @@ static PyObject *pyxc_dom_set_memshr(XcO
     return zero;
 }
 
-static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
 {
     PyObject *cpulist = NULL;
-    uint32_t i;
+    int i;
 
     cpulist = PyList_New(0);
-    for ( i = 0; cpumap != 0; i++ )
+    for ( i = 0; i < cpusize; i++ )
     {
-        if ( cpumap & 1 )
+        if ( *cpumap & (1L << (i % 64)) )
         {
             PyObject* pyint = PyInt_FromLong(i);
 
             PyList_Append(cpulist, pyint);
             Py_DECREF(pyint);
         }
-        cpumap >>= 1;
+        if ( (i % 64) == 63 )
+            cpumap++;
     }
     return cpulist;
 }
@@ -1966,7 +1967,7 @@ static PyObject *pyxc_cpupool_getinfo(Xc
     PyObject *list, *info_dict;
 
     uint32_t first_pool = 0;
-    int max_pools = 1024, nr_pools, i;
+    int max_pools = 1024, i;
     xc_cpupoolinfo_t *info;
 
     static char *kwd_list[] = { "first_pool", "max_pools", NULL };
@@ -1975,38 +1976,31 @@ static PyObject *pyxc_cpupool_getinfo(Xc
                                       &first_pool, &max_pools) )
         return NULL;
 
-    info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
-    if (info == NULL)
-        return PyErr_NoMemory();
-
-    nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
-
-    if (nr_pools < 0)
+    list = PyList_New(0);
+    for (i = 0; i < max_pools; i++)
     {
-        free(info);
-        return pyxc_error_to_exception(self->xc_handle);
-    }
-
-    list = PyList_New(nr_pools);
-    for ( i = 0 ; i < nr_pools; i++ )
-    {
+        info = xc_cpupool_getinfo(self->xc_handle, first_pool);
+        if (info == NULL)
+            break;
         info_dict = Py_BuildValue(
             "{s:i,s:i,s:i,s:N}",
-            "cpupool",         (int)info[i].cpupool_id,
-            "sched",           info[i].sched_id,
-            "n_dom",           info[i].n_dom,
-            "cpulist",         cpumap_to_cpulist(info[i].cpumap));
+            "cpupool",         (int)info->cpupool_id,
+            "sched",           info->sched_id,
+            "n_dom",           info->n_dom,
+            "cpulist",         cpumap_to_cpulist(info->cpumap,
+                                                 info->cpumap_size));
+        first_pool = info->cpupool_id + 1;
+        free(info);
+
         if ( info_dict == NULL )
         {
             Py_DECREF(list);
-            if ( info_dict != NULL ) { Py_DECREF(info_dict); }
-            free(info);
             return NULL;
         }
-        PyList_SetItem(list, i, info_dict);
+
+        PyList_Append(list, info_dict);
+        Py_DECREF(info_dict);
     }
-
-    free(info);
 
     return list;
 }
@@ -2072,12 +2066,28 @@ static PyObject *pyxc_cpupool_movedomain
 
 static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
 {
-    uint64_t cpumap;
+    uint64_t *cpumap;
+    xc_physinfo_t physinfo;
+    int ret;
+    PyObject *info = NULL;
 
-    if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+    if (xc_physinfo(self->xc_handle, &physinfo))
         return pyxc_error_to_exception(self->xc_handle);
 
-    return cpumap_to_cpulist(cpumap);
+    cpumap = calloc((physinfo.max_cpu_id + 64) / 64, sizeof(uint64_t));
+    if (!cpumap) {
+        errno = -ENOMEM;
+        return PyErr_SetFromErrno(xc_error_obj);
+    }
+
+    ret = xc_cpupool_freeinfo(self->xc_handle, cpumap,
+                              (physinfo.max_cpu_id + 8) / 8);
+    if (!ret)
+        info = cpumap_to_cpulist(cpumap, physinfo.max_cpu_id + 1);
+
+    free(cpumap);
+
+    return ret ? pyxc_error_to_exception(self->xc_handle) : info;
 }
 
 static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15  7:26 [Patch][resend] implementation of cpupool support in xl Juergen Gross
  2010-09-15  8:29 ` Ian Campbell
@ 2010-09-16  5:58 ` Vasiliy G Tolstov
  2010-09-16 13:01   ` Juergen Gross
  2010-09-16 10:45 ` Ian Campbell
  2 siblings, 1 reply; 17+ messages in thread
From: Vasiliy G Tolstov @ 2010-09-16  5:58 UTC (permalink / raw)
  To: Juergen Gross; +Cc: xen-devel

В Срд, 15/09/2010 в 09:26 +0200, Juergen Gross пишет:
> Hi,
> 
> attached patch implements cpupool support in xl. Rewrite of previous patch
> after rework of libxl.
> 
> 
> Juergen

Sorry for may be breaking thread, but what benefits of using cpu pool in
Xen? Can You provide some use cases for using cpu pools?

Thank You.

-- 
Vasiliy G Tolstov <v.tolstov@selfip.ru>
Selfip.Ru

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-15  7:26 [Patch][resend] implementation of cpupool support in xl Juergen Gross
  2010-09-15  8:29 ` Ian Campbell
  2010-09-16  5:58 ` Vasiliy G Tolstov
@ 2010-09-16 10:45 ` Ian Campbell
  2010-09-16 12:02   ` Juergen Gross
  2 siblings, 1 reply; 17+ messages in thread
From: Ian Campbell @ 2010-09-16 10:45 UTC (permalink / raw)
  To: Juergen Gross; +Cc: xen-devel

On Wed, 2010-09-15 at 08:26 +0100, Juergen Gross wrote:
> Hi,
> 
> attached patch implements cpupool support in xl. Rewrite of previous patch
> after rework of libxl.

Thanks!

Please could you provide a changelog message? It seems you needed to
modify the libxc interface in order to support libxl, it would be worth
commenting on what/why you changed.

In several places I notice you calculate (X + 64) / 64. Normally I would
expect a (X + 63) / 64 pattern to round something up (for example in
other places you use + 7 / 8). Is this deliberate? Probably worth a
comment, or maybe wrap the concept into a function with an appropriate
name?

I think the units of libxl_cpumap.size might be worth a comment too,
especially since appears to differ from the units used in the libxc
interface. You can do this in the IDL with e.g.
-    ("size", uint32),
+    ("size", uint32, False, "number of <bananas> in map"),
     ("map",  Reference(uint64)),

I think <bananas>=="uint64_t sized words"? Perhaps size would be better
represented as the maximum CPU ID in the map? The fact that the map is
rounded up to the next 64 bit boundary isn't too interesting to callers,
is it?

I saw a "size * 8" which I think would better as "size *
sizeof(*....map)", although perhaps this usage reinforces the idea that
the size should be in CPUs not multiples of some word size? Especially
given that this is seen in the caller who I guess shouldn't really need
to know these details.

Similarly callers seem to be doing "map[i / 64] & (i %64)" type
arithmetic, I think providing a libxl_cpumap_cpu_present helper (and
_set/_clear if necessary) would help here.

Ian.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-16 10:45 ` Ian Campbell
@ 2010-09-16 12:02   ` Juergen Gross
  2010-09-16 17:22     ` Ian Jackson
  0 siblings, 1 reply; 17+ messages in thread
From: Juergen Gross @ 2010-09-16 12:02 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel

[-- Attachment #1: Type: text/plain, Size: 2243 bytes --]

On 09/16/10 12:45, Ian Campbell wrote:
> On Wed, 2010-09-15 at 08:26 +0100, Juergen Gross wrote:
>> Hi,
>>
>> attached patch implements cpupool support in xl. Rewrite of previous patch
>> after rework of libxl.
>
> Thanks!
>
> Please could you provide a changelog message? It seems you needed to
> modify the libxc interface in order to support libxl, it would be worth
> commenting on what/why you changed.

Okay, done.

> In several places I notice you calculate (X + 64) / 64. Normally I would
> expect a (X + 63) / 64 pattern to round something up (for example in
> other places you use + 7 / 8). Is this deliberate? Probably worth a
> comment, or maybe wrap the concept into a function with an appropriate
> name?

This was the conversion from max_cpu_id (which is 1 less than the max
number of cpus) to the number of map elements. I've moved this to an own
function for allocating a cpumap.map.

> I think the units of libxl_cpumap.size might be worth a comment too,
> especially since appears to differ from the units used in the libxc
> interface. You can do this in the IDL with e.g.
> -    ("size", uint32),
> +    ("size", uint32, False, "number of<bananas>  in map"),
>       ("map",  Reference(uint64)),

Okay.

> I think<bananas>=="uint64_t sized words"? Perhaps size would be better
> represented as the maximum CPU ID in the map? The fact that the map is
> rounded up to the next 64 bit boundary isn't too interesting to callers,
> is it?

I kept your proposal to keep the size in bytes (like in xc).

> I saw a "size * 8" which I think would better as "size *
> sizeof(*....map)",

The 8 is the number of bits in a byte (size being number of bytes).

> Similarly callers seem to be doing "map[i / 64]&  (i %64)" type
> arithmetic, I think providing a libxl_cpumap_cpu_present helper (and
> _set/_clear if necessary) would help here.

Done that, too.


Juergen

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html

[-- Attachment #2: xl-pools.patch --]
[-- Type: text/x-patch, Size: 46950 bytes --]

Signed-off-by: juergen.gross@ts.fujitsu.com

Support of cpu pools in xl:
  library functions
  xl pool-create
  xl pool-list
  xl pool-destroy
  xl pool-cpu-add
  xl pool-cpu-remove
  xl pool-migrate
To be able to support arbitrary numbers of physical cpus it was necessary to
include the size of cpumaps in the xc-interfaces for cpu pools as well
These were:
  definition of xc_cpupoolinfo_t
  xc_cpupool_getinfo()
  xc_cpupool_freeinfo()

diff -r 5393151a737b tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxc/xc_cpupool.c	Thu Sep 16 13:48:44 2010 +0200
@@ -34,6 +34,20 @@
     return ret;
 }
 
+static int get_cpumap_size(xc_interface *xch)
+{
+    static int cpumap_size = 0;
+    xc_physinfo_t physinfo;
+
+    if ( cpumap_size )
+        return cpumap_size;
+
+    if ( !xc_physinfo(xch, &physinfo) )
+        cpumap_size = (physinfo.max_cpu_id + 8) / 8;
+
+    return cpumap_size;
+}
+
 int xc_cpupool_create(xc_interface *xch,
                       uint32_t *ppoolid,
                       uint32_t sched_id)
@@ -64,50 +78,56 @@
     return do_sysctl_save(xch, &sysctl);
 }
 
-int xc_cpupool_getinfo(xc_interface *xch, 
-                       uint32_t first_poolid,
-                       uint32_t n_max, 
-                       xc_cpupoolinfo_t *info)
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, 
+                       uint32_t poolid)
 {
     int err = 0;
-    int p;
-    uint32_t poolid = first_poolid;
-    uint8_t local[sizeof (info->cpumap)];
+    xc_cpupoolinfo_t *info;
+    uint8_t *local;
+    int local_size;
+    int cpumap_size;
+    int size;
     DECLARE_SYSCTL;
 
-    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+    local_size = get_cpumap_size(xch);
+    cpumap_size = (local_size + 7) / 8;
+    size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
+    info = malloc(size);
+    if ( !info )
+        return NULL;
 
-    for (p = 0; p < n_max; p++)
+    memset(info, 0, size);
+    info->cpumap_size = local_size * 8;
+    info->cpumap = (uint64_t *)(info + 1);
+    local = (uint8_t *)(info->cpumap + cpumap_size);
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
+
+    if ( (err = lock_pages(local, local_size)) != 0 )
     {
-        sysctl.cmd = XEN_SYSCTL_cpupool_op;
-        sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
-        sysctl.u.cpupool_op.cpupool_id = poolid;
-        set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-        sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+        PERROR("Could not lock memory for Xen hypercall");
+        free(info);
+        return NULL;
+    }
+    err = do_sysctl_save(xch, &sysctl);
+    unlock_pages(local, local_size);
 
-        if ( (err = lock_pages(local, sizeof(local))) != 0 )
-        {
-            PERROR("Could not lock memory for Xen hypercall");
-            break;
-        }
-        err = do_sysctl_save(xch, &sysctl);
-        unlock_pages(local, sizeof (local));
-
-        if ( err < 0 )
-            break;
-
-        info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
-        info->sched_id = sysctl.u.cpupool_op.sched_id;
-        info->n_dom = sysctl.u.cpupool_op.n_dom;
-        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
-        poolid = sysctl.u.cpupool_op.cpupool_id + 1;
-        info++;
+    if ( err < 0 )
+    {
+        free(info);
+        return NULL;
     }
 
-    if ( p == 0 )
-        return err;
+    info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+    info->sched_id = sysctl.u.cpupool_op.sched_id;
+    info->n_dom = sysctl.u.cpupool_op.n_dom;
+    bitmap_byte_to_64(info->cpumap, local, local_size * 8);
 
-    return p;
+    return info;
 }
 
 int xc_cpupool_addcpu(xc_interface *xch,
@@ -150,30 +170,37 @@
 }
 
 int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap)
+                        uint64_t *cpumap,
+                        int cpusize)
 {
     int err;
-    uint8_t local[sizeof (*cpumap)];
+    uint8_t *local;
     DECLARE_SYSCTL;
+
+    local = malloc(cpusize);
+    if (local == NULL)
+        return -ENOMEM;
 
     sysctl.cmd = XEN_SYSCTL_cpupool_op;
     sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
     set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-    sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+    sysctl.u.cpupool_op.cpumap.nr_cpus = cpusize * 8;
 
-    if ( (err = lock_pages(local, sizeof(local))) != 0 )
+    if ( (err = lock_pages(local, cpusize)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
+        free(local);
         return err;
     }
 
     err = do_sysctl_save(xch, &sysctl);
-    unlock_pages(local, sizeof (local));
+    unlock_pages(local, cpusize);
+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+
+    free(local);
 
     if (err < 0)
         return err;
 
-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
-
     return 0;
 }
diff -r 5393151a737b tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxc/xenctrl.h	Thu Sep 16 13:48:44 2010 +0200
@@ -535,7 +535,8 @@
     uint32_t cpupool_id;
     uint32_t sched_id;
     uint32_t n_dom;
-    uint64_t cpumap;
+    uint32_t cpumap_size;    /* max number of cpus in map */
+    uint64_t *cpumap;
 } xc_cpupoolinfo_t;
 
 /**
@@ -564,15 +565,11 @@
  * Get cpupool info. Returns info for up to the specified number of cpupools
  * starting at the given id.
  * @parm xc_handle a handle to an open hypervisor interface
- * @parm first_poolid lowest id for which info is returned
- * @parm n_max maximum number of cpupools to return info
- * @parm info pointer to xc_cpupoolinfo_t array
- * return number of cpupool infos
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (obtained by malloc)
  */
-int xc_cpupool_getinfo(xc_interface *xch,
-                       uint32_t first_poolid,
-                       uint32_t n_max,
-                       xc_cpupoolinfo_t *info);
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+                       uint32_t poolid);
 
 /**
  * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
@@ -615,10 +612,12 @@
  *
  * @parm xc_handle a handle to an open hypervisor interface
  * @parm cpumap pointer where to store the cpumap
+ * @parm cpusize size of cpumap array in bytes
  * return 0 on success, -1 on failure
  */
 int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 
 
 /*
diff -r 5393151a737b tools/libxl/libxl.c
--- a/tools/libxl/libxl.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl.c	Thu Sep 16 13:48:44 2010 +0200
@@ -594,9 +594,17 @@
 libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool)
 {
     libxl_poolinfo *ptr;
-    int i, ret;
-    xc_cpupoolinfo_t info[256];
-    int size = 256;
+    int i, m;
+    xc_cpupoolinfo_t *info;
+    int size;
+    uint32_t poolid;
+    libxl_physinfo physinfo;
+
+    if (libxl_get_physinfo(ctx, &physinfo) != 0) {
+        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting phys info");
+        return NULL;
+    }
+    size = physinfo.max_cpu_id + 32;
 
     ptr = calloc(size, sizeof(libxl_poolinfo));
     if (!ptr) {
@@ -604,16 +612,23 @@
         return NULL;
     }
 
-    ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info);
-    if (ret<0) {
-        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting cpupool info");
-        return NULL;
+    poolid = 0;
+    for (i = 0; i < size; i++) {
+        info = xc_cpupool_getinfo(ctx->xch, poolid);
+        if (info == NULL)
+            break;
+        ptr[i].poolid = info->cpupool_id;
+        ptr[i].sched_id = info->sched_id;
+        ptr[i].n_dom = info->n_dom;
+        if (libxl_cpumap_alloc(&ptr[i].cpumap, physinfo.max_cpu_id + 1))
+            break;
+        for (m = 0; m < ptr[i].cpumap.size / sizeof(*ptr[i].cpumap.map); m++)
+            ptr[i].cpumap.map[m] = info->cpumap[m];
+        poolid = info->cpupool_id + 1;
+        free(info);
     }
 
-    for (i = 0; i < ret; i++) {
-        ptr[i].poolid = info[i].cpupool_id;
-    }
-    *nb_pool = ret;
+    *nb_pool = i;
     return ptr;
 }
 
@@ -2909,7 +2924,6 @@
     xc_domaininfo_t domaininfo;
     xc_vcpuinfo_t vcpuinfo;
     xc_physinfo_t physinfo = { 0 };
-    unsigned num_cpuwords;
 
     if (xc_domain_getinfolist(ctx->xch, domid, 1, &domaininfo) != 1) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting infolist");
@@ -2925,10 +2939,9 @@
         return NULL;
     }
 
-    num_cpuwords = ((physinfo.max_cpu_id + 64) / 64);
     for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
-        ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap));
-        if (!ptr->cpumap) {
+        if (libxl_cpumap_alloc(&ptr->cpumap, *nrcpus)) {
+            LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpumap");
             return NULL;
         }
         if (xc_vcpu_getinfo(ctx->xch, domid, *nb_vcpu, &vcpuinfo) == -1) {
@@ -2936,7 +2949,7 @@
             return NULL;
         }
         if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu,
-            ptr->cpumap, ((*nrcpus) + 7) / 8) == -1) {
+            ptr->cpumap.map, ((*nrcpus) + 7) / 8) == -1) {
             LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity");
             return NULL;
         }
@@ -3340,3 +3353,180 @@
     libxl__file_reference_unmap(f);
     free(f->path);
 }
+
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap)
+{
+    libxl_physinfo info;
+    int ret;
+
+    if (libxl_get_physinfo(ctx, &info) != 0)
+        return ERROR_FAIL;
+
+    ret = libxl_cpumap_alloc(cpumap, info.max_cpu_id + 1);
+    if (ret)
+        return ret;
+
+    if (xc_cpupool_freeinfo(ctx->xch, cpumap->map, cpumap->size)) {
+        free(cpumap->map);
+        cpumap->map = NULL;
+        return ERROR_FAIL;
+    }
+
+    return 0;
+}
+
+int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
+                         libxl_cpumap cpumap, libxl_uuid *uuid,
+                         uint32_t *poolid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc;
+    int i;
+    xs_transaction_t t;
+    char *uuid_string;
+
+    uuid_string = libxl__uuid2string(&gc, *uuid);
+    if (!uuid_string)
+        return ERROR_NOMEM;
+
+    rc = xc_cpupool_create(ctx->xch, poolid, schedid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+           "Could not create cpupool");
+        return ERROR_FAIL;
+    }
+
+    for (i = 0; i < cpumap.size * 8; i++)
+        if (cpumap.map[i / 64] & (1L << (i % 64))) {
+            rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
+            if (rc) {
+                LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+                    "Error moving cpu to cpupool");
+                return ERROR_FAIL;
+            }
+        }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_mkdir(ctx->xsh, t, libxl__sprintf(&gc, "/local/pool/%d", *poolid));
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "/local/pool/%d/uuid", *poolid),
+                 uuid_string);
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "/local/pool/%d/name", *poolid),
+                 name);
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            return 0;
+    }
+}
+
+int libxl_destroy_cpupool(libxl_ctx *ctx, uint32_t poolid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc, i;
+    xc_cpupoolinfo_t *info;
+    xs_transaction_t t;
+
+    info = xc_cpupool_getinfo(ctx->xch, poolid);
+    if (info == NULL)
+        return ERROR_NOMEM;
+
+    rc = ERROR_INVAL;
+    if ((info->cpupool_id != poolid) || (info->n_dom))
+        goto out;
+
+    for (i = 0; i < info->cpumap_size; i++)
+        if (info->cpumap[i / 64] & (1L << (i % 64))) {
+            rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
+            if (rc) {
+                LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+                    "Error removing cpu from cpupool");
+                rc = ERROR_FAIL;
+                goto out;
+            }
+        }
+
+    rc = xc_cpupool_destroy(ctx->xch, poolid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "Could not destroy cpupool");
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_rm(ctx->xsh, XBT_NULL, libxl__sprintf(&gc, "/local/pool/%d", poolid));
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            break;
+    }
+
+    rc = 0;
+
+out:
+    free(info);
+
+    return rc;
+}
+
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    int rc;
+
+    rc = xc_cpupool_addcpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error moving cpu to cpupool");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    int rc;
+
+    rc = xc_cpupool_removecpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error removing cpu from cpupool");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid)
+{
+    libxl__gc gc = LIBXL_INIT_GC(ctx);
+    int rc;
+    char *dom_path;
+    char *vm_path;
+    char *poolname;
+    xs_transaction_t t;
+
+    dom_path = libxl__xs_get_dompath(&gc, domid);
+    if (!dom_path) {
+        return ERROR_FAIL;
+    }
+
+    rc = xc_cpupool_movedomain(ctx->xch, poolid, domid);
+    if (rc) {
+        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
+            "Error moving domain to cpupool");
+        return ERROR_FAIL;
+    }
+
+    poolname = libxl__poolid_to_name(&gc, poolid);
+    vm_path = libxl__xs_read(&gc, XBT_NULL, libxl__sprintf(&gc, "%s/vm", dom_path));
+    for (; vm_path;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        libxl__xs_write(&gc, t, libxl__sprintf(&gc, "%s/pool_name", vm_path), poolname);
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            break;
+    }
+
+    return 0;
+}
diff -r 5393151a737b tools/libxl/libxl.h
--- a/tools/libxl/libxl.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl.h	Thu Sep 16 13:48:44 2010 +0200
@@ -138,8 +138,6 @@
 typedef char **libxl_string_list;
 
 typedef char **libxl_key_value_list;
-
-typedef uint64_t *libxl_cpumap;
 
 typedef uint32_t libxl_hwcap[8];
 
@@ -471,6 +469,15 @@
 int libxl_device_net2_del(libxl_ctx *ctx, libxl_device_net2 *net2,
                           int wait);
 
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap);
+int libxl_create_cpupool(libxl_ctx *ctx, char *name, int schedid,
+                         libxl_cpumap cpumap, libxl_uuid *uuid,
+                         uint32_t *poolid);
+int libxl_destroy_cpupool(libxl_ctx *ctx, uint32_t poolid);
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
+
 /* common paths */
 const char *libxl_sbindir_path(void);
 const char *libxl_bindir_path(void);
diff -r 5393151a737b tools/libxl/libxl.idl
--- a/tools/libxl/libxl.idl	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl.idl	Thu Sep 16 13:48:44 2010 +0200
@@ -14,8 +14,6 @@
 
 libxl_string_list = Builtin("string_list", destructor_fn="libxl_string_list_destroy", passby=PASS_BY_REFERENCE)
 libxl_key_value_list = Builtin("key_value_list", destructor_fn="libxl_key_value_list_destroy", passby=PASS_BY_REFERENCE)
-
-libxl_cpumap = Builtin("cpumap", destructor_fn="free")
 
 libxl_hwcap = Builtin("hwcap")
 
@@ -42,9 +40,17 @@
     ("vcpu_online", uint32),
     ], destructor_fn=None)
 
+libxl_cpumap = Struct("cpumap", [
+    ("size", uint32, False, "number of bytes in map"),
+    ("map",  Reference(uint64)),
+    ])
+                      
 libxl_poolinfo = Struct("poolinfo", [
-    ("poolid", uint32)
-    ], destructor_fn=None)
+    ("poolid",      uint32),
+    ("sched_id",    uint32),
+    ("n_dom",       uint32),
+    ("cpumap",      libxl_cpumap)
+    ], destructor_fn="libxl_poolinfo_destroy")
 
 libxl_vminfo = Struct("vminfo", [
     ("uuid", libxl_uuid),
diff -r 5393151a737b tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl_utils.c	Thu Sep 16 13:48:44 2010 +0200
@@ -31,6 +31,17 @@
 #include "libxl_utils.h"
 #include "libxl_internal.h"
 
+struct schedid_name {
+    char *name;
+    int id;
+};
+
+static struct schedid_name schedid_name[] = {
+    { "credit", XEN_SCHEDULER_CREDIT },
+    { "sedf", XEN_SCHEDULER_SEDF },
+    { "credit2", XEN_SCHEDULER_CREDIT2 },
+    { NULL, -1 }
+};
 
 unsigned long libxl_get_required_shadow_memory(unsigned long maxmem_kb, unsigned int smp_cpus)
 {
@@ -124,19 +135,41 @@
         return ERROR_NOMEM;
 
     for (i = 0; i < nb_pools; i++) {
-        poolname = libxl_poolid_to_name(ctx, poolinfo[i].poolid);
-        if (!poolname)
-            continue;
-        if (strcmp(poolname, name) == 0) {
-            *poolid = poolinfo[i].poolid;
-            ret = 0;
+        if (ret && ((poolname = libxl_poolid_to_name(ctx,
+            poolinfo[i].poolid)) != NULL)) {
+            if (strcmp(poolname, name) == 0) {
+                *poolid = poolinfo[i].poolid;
+                ret = 0;
+                free(poolname);
+            }
             free(poolname);
-            break;
         }
-        free(poolname);
+        libxl_poolinfo_destroy(poolinfo + i);
     }
     free(poolinfo);
     return ret;
+}
+
+int libxl_name_to_schedid(libxl_ctx *ctx, const char *name)
+{
+    int i;
+
+    for (i = 0; schedid_name[i].name != NULL; i++)
+        if (strcmp(name, schedid_name[i].name) == 0)
+            return schedid_name[i].id;
+
+    return -1;
+}
+
+char *libxl_schedid_to_name(libxl_ctx *ctx, int schedid)
+{
+    int i;
+
+    for (i = 0; schedid_name[i].name != NULL; i++)
+        if (schedid_name[i].id == schedid)
+            return schedid_name[i].name;
+
+    return "unknown";
 }
 
 int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid)
@@ -675,3 +708,36 @@
     libxl__free_all(&gc);
     return rc;
 }
+
+int libxl_cpumap_alloc(libxl_cpumap *cpumap, int max_cpus)
+{
+    int elems;
+
+    elems = (max_cpus + 63) / 64;
+    cpumap->map = calloc(elems, sizeof(*cpumap->map));
+    if (!cpumap->map)
+        return ERROR_NOMEM;
+    cpumap->size = elems * 8;     /* size in bytes */
+    return 0;
+}
+
+int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu)
+{
+    if (cpu >= cpumap->size * 8)
+        return 0;
+    return (cpumap->map[cpu / 64] & (1L << (cpu & 63))) ? 1 : 0;
+}
+
+void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu)
+{
+    if (cpu >= cpumap->size * 8)
+        return;
+    cpumap->map[cpu / 64] |= 1L << (cpu & 63);
+}
+
+void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu)
+{
+    if (cpu >= cpumap->size * 8)
+        return;
+    cpumap->map[cpu / 64] &= ~(1L << (cpu & 63));
+}
diff -r 5393151a737b tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxl_utils.h	Thu Sep 16 13:48:44 2010 +0200
@@ -23,6 +23,8 @@
 char *libxl_domid_to_name(libxl_ctx *ctx, uint32_t domid);
 int libxl_name_to_poolid(libxl_ctx *ctx, const char *name, uint32_t *poolid);
 char *libxl_poolid_to_name(libxl_ctx *ctx, uint32_t poolid);
+int libxl_name_to_schedid(libxl_ctx *ctx, const char *name);
+char *libxl_schedid_to_name(libxl_ctx *ctx, int schedid);
 int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid);
 int libxl_is_stubdom(libxl_ctx *ctx, uint32_t domid, uint32_t *target_domid);
 int libxl_create_logfile(libxl_ctx *ctx, char *name, char **full_name);
@@ -74,5 +76,9 @@
  * return -1 if there are an error */
 int libxl_check_device_model_version(libxl_ctx *ctx, char *path);
 
+int libxl_cpumap_alloc(libxl_cpumap *cpumap, int max_cpus);
+int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu);
+void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu);
+void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu);
 #endif
 
diff -r 5393151a737b tools/libxl/libxltypes.py
--- a/tools/libxl/libxltypes.py	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxltypes.py	Thu Sep 16 13:48:44 2010 +0200
@@ -119,7 +119,10 @@
         kwargs.setdefault('passby', PASS_BY_VALUE)
         
         kwargs.setdefault('namespace', ty.namespace)
-        typename = ty.typename[len(kwargs['namespace']):]
+
+        typename = ty.typename
+        if ty.namespace:
+            typename = typename[len(kwargs['namespace']):]
         Type.__init__(self, typename + " *", **kwargs)
 
 #
diff -r 5393151a737b tools/libxl/libxlu_cfg_l.c
--- a/tools/libxl/libxlu_cfg_l.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxlu_cfg_l.c	Thu Sep 16 13:48:44 2010 +0200
@@ -54,6 +54,7 @@
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -83,8 +84,6 @@
 #ifndef UINT32_MAX
 #define UINT32_MAX             (4294967295U)
 #endif
-
-#endif /* ! C99 */
 
 #endif /* ! FLEXINT_H */
 
@@ -159,15 +158,7 @@
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k.
- * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
- * Ditto for the __ia64__ case accordingly.
- */
-#define YY_BUF_SIZE 32768
-#else
 #define YY_BUF_SIZE 16384
-#endif /* __ia64__ */
 #endif
 
 /* The state buf must be large enough to hold one state per character in the main buffer.
@@ -496,7 +487,7 @@
 void xlu__cfg_yyset_column(int  column_no, yyscan_t yyscanner);
 
 
-#line 500 "libxlu_cfg_l.c"
+#line 491 "libxlu_cfg_l.c"
 
 #define INITIAL 0
 #define lexerr 1
@@ -632,12 +623,7 @@
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k */
-#define YY_READ_BUF_SIZE 16384
-#else
 #define YY_READ_BUF_SIZE 8192
-#endif /* __ia64__ */
 #endif
 
 /* Copy whatever the last rule matched to the standard output. */
@@ -645,7 +631,7 @@
 /* This used to be an fputs(), but since the string might contain NUL's,
  * we now use fwrite().
  */
-#define ECHO do { if (fwrite( yytext, yyleng, 1, yyout )) {} } while (0)
+#define ECHO fwrite( yytext, yyleng, 1, yyout )
 #endif
 
 /* Gets input and stuffs it into "buf".  number of characters read, or YY_NULL,
@@ -656,7 +642,7 @@
 	if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
 		{ \
 		int c = '*'; \
-		size_t n; \
+		int n; \
 		for ( n = 0; n < max_size && \
 			     (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
 			buf[n] = (char) c; \
@@ -744,7 +730,7 @@
 #line 37 "libxlu_cfg_l.l"
 
 
-#line 748 "libxlu_cfg_l.c"
+#line 734 "libxlu_cfg_l.c"
 
     yylval = yylval_param;
 
@@ -944,7 +930,7 @@
 #line 82 "libxlu_cfg_l.l"
 YY_FATAL_ERROR( "flex scanner jammed" );
 	YY_BREAK
-#line 948 "libxlu_cfg_l.c"
+#line 934 "libxlu_cfg_l.c"
 case YY_STATE_EOF(INITIAL):
 case YY_STATE_EOF(lexerr):
 	yyterminate();
@@ -1687,8 +1673,8 @@
 
 /** Setup the input buffer state to scan the given bytes. The next call to xlu__cfg_yylex() will
  * scan from a @e copy of @a bytes.
- * @param yybytes the byte buffer to scan
- * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
+ * @param bytes the byte buffer to scan
+ * @param len the number of bytes in the buffer pointed to by @a bytes.
  * @param yyscanner The scanner object.
  * @return the newly allocated buffer state object.
  */
diff -r 5393151a737b tools/libxl/libxlu_cfg_l.h
--- a/tools/libxl/libxlu_cfg_l.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/libxlu_cfg_l.h	Thu Sep 16 13:48:44 2010 +0200
@@ -58,6 +58,7 @@
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -87,8 +88,6 @@
 #ifndef UINT32_MAX
 #define UINT32_MAX             (4294967295U)
 #endif
-
-#endif /* ! C99 */
 
 #endif /* ! FLEXINT_H */
 
@@ -132,15 +131,7 @@
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k.
- * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
- * Ditto for the __ia64__ case accordingly.
- */
-#define YY_BUF_SIZE 32768
-#else
 #define YY_BUF_SIZE 16384
-#endif /* __ia64__ */
 #endif
 
 #ifndef YY_TYPEDEF_YY_BUFFER_STATE
@@ -310,12 +301,7 @@
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
-#ifdef __ia64__
-/* On IA-64, the buffer size is 16k, not 8k */
-#define YY_READ_BUF_SIZE 16384
-#else
 #define YY_READ_BUF_SIZE 8192
-#endif /* __ia64__ */
 #endif
 
 /* Number of entries by which start-condition stack grows. */
@@ -352,6 +338,6 @@
 
 #line 82 "libxlu_cfg_l.l"
 
-#line 356 "libxlu_cfg_l.h"
+#line 342 "libxlu_cfg_l.h"
 #undef xlu__cfg_yyIN_HEADER
 #endif /* xlu__cfg_yyHEADER_H */
diff -r 5393151a737b tools/libxl/xl.h
--- a/tools/libxl/xl.h	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/xl.h	Thu Sep 16 13:48:44 2010 +0200
@@ -79,6 +79,12 @@
 int main_network2attach(int argc, char **argv);
 int main_network2list(int argc, char **argv);
 int main_network2detach(int argc, char **argv);
+int main_poolcreate(int argc, char **argv);
+int main_poollist(int argc, char **argv);
+int main_pooldestroy(int argc, char **argv);
+int main_poolcpuadd(int argc, char **argv);
+int main_poolcpuremove(int argc, char **argv);
+int main_poolmigrate(int argc, char **argv);
 
 void help(char *command);
 
diff -r 5393151a737b tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/xl_cmdimpl.c	Thu Sep 16 13:48:44 2010 +0200
@@ -420,7 +420,7 @@
     printf("\t(ssidref %d)\n", c_info->ssidref);
     printf("\t(name %s)\n", c_info->name);
     printf("\t(uuid " LIBXL_UUID_FMT ")\n", LIBXL_UUID_BYTES(c_info->uuid));
-    printf("\t(cpupool %s (%d))\n", c_info->poolname, c_info->poolid);
+    printf("\t(cpupool %s)\n", c_info->poolname);
     if (c_info->xsdata)
         printf("\t(xsdata contains data)\n");
     else
@@ -3286,7 +3286,7 @@
     printf("%9.1f  ", ((float)vcpuinfo->vcpu_time / 1e9));
     /* CPU AFFINITY */
     pcpumap = nr_cpus > 64 ? (uint64_t)-1 : ((1ULL << nr_cpus) - 1);
-    for (cpumap = vcpuinfo->cpumap; nr_cpus; ++cpumap) {
+    for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
         if (*cpumap < pcpumap) {
             break;
         }
@@ -3301,7 +3301,7 @@
     if (!nr_cpus) {
         printf("any cpu\n");
     } else {
-        for (cpumap = vcpuinfo->cpumap; nr_cpus; ++cpumap) {
+        for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
             pcpumap = *cpumap;
             for (i = 0; !(pcpumap & 1); ++i, pcpumap >>= 1)
                 ;
@@ -3572,10 +3572,7 @@
     printf("xen_minor              : %d\n", info->xen_version_minor);
     printf("xen_extra              : %s\n", info->xen_version_extra);
     printf("xen_caps               : %s\n", info->capabilities);
-    printf("xen_scheduler          : %s\n",
-        sched_id == XEN_SCHEDULER_SEDF ? "sedf" :
-        sched_id == XEN_SCHEDULER_CREDIT ? "credit" :
-        sched_id == XEN_SCHEDULER_CREDIT2 ? "credit2" : "unknown");
+    printf("xen_scheduler          : %s\n", libxl_schedid_to_name(&ctx, sched_id));
     printf("xen_pagesize           : %lu\n", info->pagesize);
     printf("platform_params        : virt_start=0x%lx\n", info->virt_start);
     printf("xen_changeset          : %s\n", info->changeset);
@@ -3606,6 +3603,8 @@
     libxl_physinfo info;
     const libxl_version_info *vinfo;
     unsigned int i;
+    libxl_cpumap cpumap;
+    int n = 0;
 
     if (libxl_get_physinfo(&ctx, &info) != 0) {
         fprintf(stderr, "libxl_physinfo failed.\n");
@@ -3632,6 +3631,13 @@
         printf("total_memory           : %"PRIu64"\n", info.total_pages / i);
         printf("free_memory            : %"PRIu64"\n", info.free_pages / i);
     }
+    if (!libxl_get_freecpus(&ctx, &cpumap)) {
+        for (i = 0; i < cpumap.size * 8; i++)
+            if (libxl_cpumap_test(&cpumap, i))
+                n++;
+        printf("free_cpus              : %d\n", n);
+        free(cpumap.map);
+    }
 
     return;
 }
@@ -5047,3 +5053,444 @@
     printf("%d\n", mb);
     return 0;
 }
+
+int main_poolcreate(int argc, char **argv)
+{
+    char *filename = NULL;
+    char *p, extra_config[1024];
+    int dryrun = 0;
+    int opt;
+    int option_index = 0;
+    static struct option long_options[] = {
+        {"help", 0, 0, 'h'},
+        {"defconfig", 1, 0, 'f'},
+        {"dryrun", 0, 0, 'n'},
+        {0, 0, 0, 0}
+    };
+    int ret;
+    void *config_data = 0;
+    int config_len = 0;
+    XLU_Config *config;
+    const char *buf;
+    char *name, *sched;
+    uint32_t poolid;
+    int schedid = -1;
+    XLU_ConfigList *cpus;
+    int n_cpus, i, n;
+    libxl_cpumap freemap;
+    libxl_cpumap cpumap;
+    libxl_uuid uuid;
+
+    while (1) {
+        opt = getopt_long(argc, argv, "hnf:", long_options, &option_index);
+        if (opt == -1)
+            break;
+
+        switch (opt) {
+        case 'f':
+            filename = optarg;
+            break;
+        case 'h':
+            help("pool-create");
+            return 0;
+        case 'n':
+            dryrun = 1;
+            break;
+        default:
+            fprintf(stderr, "option not supported\n");
+            break;
+        }
+    }
+
+    memset(extra_config, 0, sizeof(extra_config));
+    while (optind < argc) {
+        if ((p = strchr(argv[optind], '='))) {
+            if (strlen(extra_config) + 1 < sizeof(extra_config)) {
+                if (strlen(extra_config))
+                    strcat(extra_config, "\n");
+                strcat(extra_config, argv[optind]);
+            }
+        } else if (!filename) {
+            filename = argv[optind];
+        } else {
+            help("pool-create");
+            return -ERROR_FAIL;
+        }
+        optind++;
+    }
+
+    if (!filename) {
+        help("pool-create");
+        return -ERROR_FAIL;
+    }
+
+    if (libxl_read_file_contents(&ctx, filename, &config_data, &config_len)) {
+        fprintf(stderr, "Failed to read config file: %s: %s\n",
+                filename, strerror(errno));
+        return -ERROR_FAIL;
+    }
+    if (strlen(extra_config)) {
+        if (config_len > INT_MAX - (strlen(extra_config) + 2)) {
+            fprintf(stderr, "Failed to attach extra configration\n");
+            return -ERROR_FAIL;
+        }
+        config_data = realloc(config_data,
+                              config_len + strlen(extra_config) + 2);
+        if (!config_data) {
+            fprintf(stderr, "Failed to realloc config_data\n");
+            return -ERROR_FAIL;
+        }
+        strcat(config_data, "\n");
+        strcat(config_data, extra_config);
+        strcat(config_data, "\n");
+        config_len += (strlen(extra_config) + 2);
+    }
+
+    config = xlu_cfg_init(stderr, filename);
+    if (!config) {
+        fprintf(stderr, "Failed to allocate for configuration\n");
+        return -ERROR_FAIL;
+    }
+
+    ret = xlu_cfg_readdata(config, config_data, config_len);
+    if (ret) {
+        fprintf(stderr, "Failed to parse config file: %s\n", strerror(ret));
+        return -ERROR_FAIL;
+    }
+
+    if (!xlu_cfg_get_string (config, "name", &buf))
+        name = strdup(buf);
+    else
+        name = basename(filename);
+    if (!libxl_name_to_poolid(&ctx, name, &poolid)) {
+        fprintf(stderr, "Pool name \"%s\" already exists\n", name);
+        return -ERROR_FAIL;
+    }
+
+    if (!xlu_cfg_get_string (config, "sched", &buf)) {
+        if ((schedid = libxl_name_to_schedid(&ctx, buf)) < 0) {
+            fprintf(stderr, "Unknown scheduler\n");
+            return -ERROR_FAIL;
+        }
+    } else {
+        if ((schedid = libxl_get_sched_id(&ctx)) < 0) {
+            fprintf(stderr, "get_sched_id sysctl failed.\n");
+            return -ERROR_FAIL;
+        }
+    }
+    sched = libxl_schedid_to_name(&ctx, schedid);
+
+    if (libxl_get_freecpus(&ctx, &freemap)) {
+        fprintf(stderr, "libxl_get_freecpus failed\n");
+        return -ERROR_FAIL;
+    }
+    if (libxl_cpumap_alloc(&cpumap, freemap.size * 8)) {
+        fprintf(stderr, "Failed to allocate cpumap\n");
+        return -ERROR_FAIL;
+    }
+    if (!xlu_cfg_get_list(config, "cpus", &cpus, 0)) {
+        n_cpus = 0;
+        while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
+            i = atoi(buf);
+            if ((i < 0) || (i >= freemap.size * 8) ||
+                !libxl_cpumap_test(&freemap, i)) {
+                fprintf(stderr, "cpu %d illegal or not free\n", i);
+                return -ERROR_FAIL;
+            }
+            libxl_cpumap_set(&cpumap, i);
+            n_cpus++;
+        }
+    } else {
+        n_cpus = 1;
+        n = 0;
+        for (i = 0; i < freemap.size; i++)
+            if (libxl_cpumap_test(&freemap, i)) {
+                n++;
+                libxl_cpumap_set(&cpumap, i);
+                break;
+            }
+        if (n != n_cpus) {
+            fprintf(stderr, "no free cpu found\n");
+            return -ERROR_FAIL;
+        }
+    }
+
+    libxl_uuid_generate(&uuid);
+
+    printf("Using config file \"%s\"\n", filename);
+    printf("pool name:      %s\n", name);
+    printf("scheduler:      %s\n", sched);
+    printf("number of cpus: %d\n", n_cpus);
+
+    if (dryrun)
+        return 0;
+
+    poolid = 0;
+    if (libxl_create_cpupool(&ctx, name, schedid, cpumap, &uuid, &poolid)) {
+        fprintf(stderr, "error on creating cpupool\n");
+        return -ERROR_FAIL;
+    }
+
+    return 0;
+}
+
+int main_poollist(int argc, char **argv)
+{
+    int opt;
+    int option_index = 0;
+    static struct option long_options[] = {
+        {"help", 0, 0, 'h'},
+        {"long", 0, 0, 'l'},
+        {"cpus", 0, 0, 'c'},
+        {0, 0, 0, 0}
+    };
+    int opt_long = 0;
+    int opt_cpus = 0;
+    char *pool = NULL;
+    libxl_poolinfo *poolinfo;
+    int n_pools, p, c, n;
+    uint32_t poolid;
+    char *name;
+    int ret = 0;
+
+    while (1) {
+        opt = getopt_long(argc, argv, "hlc", long_options, &option_index);
+        if (opt == -1)
+            break;
+
+        switch (opt) {
+        case 'h':
+            help("pool-list");
+            return 0;
+        case 'l':
+            opt_long = 1;
+            break;
+        case 'c':
+            opt_cpus = 1;
+            break;
+        default:
+            fprintf(stderr, "option not supported\n");
+            break;
+        }
+    }
+
+    if ((optind + 1) < argc) {
+        help("pool-list");
+        return -ERROR_FAIL;
+    }
+    if (optind < argc) {
+        pool = argv[optind];
+        if (libxl_name_to_poolid(&ctx, pool, &poolid)) {
+            fprintf(stderr, "Pool \'%s\' does not exist\n", pool);
+            return -ERROR_FAIL;
+        }
+    }
+
+    poolinfo = libxl_list_pool(&ctx, &n_pools);
+    if (!poolinfo) {
+        fprintf(stderr, "error getting cpupool info\n");
+        return -ERROR_NOMEM;
+    }
+
+    if (!opt_long) {
+        printf("%-19s", "Name");
+        if (opt_cpus)
+            printf("CPU list\n");
+        else
+            printf("CPUs   Sched     Active   Domain count\n");
+    }
+
+    for (p = 0; p < n_pools; p++) {
+        if (!ret && (!pool || (poolinfo[p].poolid != poolid))) {
+            name = libxl_poolid_to_name(&ctx, poolinfo[p].poolid);
+            if (!name) {
+                fprintf(stderr, "error getting cpupool info\n");
+                ret = -ERROR_NOMEM;
+            }
+            else if (opt_long) {
+                ret = -ERROR_NI;
+            } else {
+                printf("%-19s", name);
+                n = 0;
+                for (c = 0; c < poolinfo[p].cpumap.size * 8; c++)
+                    if (poolinfo[p].cpumap.map[c / 64] & (1L << (c % 64))) {
+                        if (n && opt_cpus) printf(",");
+                        if (opt_cpus) printf("%d", c);
+                        n++;
+                    }
+                if (!opt_cpus) {
+                    printf("%3d %9s       y       %4d", n,
+                           libxl_schedid_to_name(&ctx, poolinfo[p].sched_id),
+                           poolinfo[p].n_dom);
+                }
+                printf("\n");
+            }
+        }
+        libxl_poolinfo_destroy(poolinfo + p);
+    }
+
+    return ret;
+}
+
+int main_pooldestroy(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-destroy");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-destroy");
+        return -ERROR_FAIL;
+    }
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_destroy_cpupool(&ctx, poolid);
+}
+
+int main_poolcpuadd(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    int cpu;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-cpu-add");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-cpu-add");
+        return -ERROR_FAIL;
+    }
+
+    if (!argv[optind]) {
+        fprintf(stderr, "no cpu specified\n");
+        help("pool-cpu-add");
+        return -ERROR_FAIL;
+    }
+    cpu = atoi(argv[optind]);
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_cpuadd(&ctx, poolid, cpu);
+}
+
+int main_poolcpuremove(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    int cpu;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-cpu-remove");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-cpu-remove");
+        return -ERROR_FAIL;
+    }
+
+    if (!argv[optind]) {
+        fprintf(stderr, "no cpu specified\n");
+        help("pool-cpu-remove");
+        return -ERROR_FAIL;
+    }
+    cpu = atoi(argv[optind]);
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_cpuremove(&ctx, poolid, cpu);
+}
+
+int main_poolmigrate(int argc, char **argv)
+{
+    int opt;
+    char *pool;
+    uint32_t poolid;
+    char *dom;
+    uint32_t domid;
+
+    while ((opt = getopt(argc, argv, "h")) != -1) {
+        switch (opt) {
+        case 'h':
+            help("pool-migrate");
+            return 0;
+        default:
+            fprintf(stderr, "option `%c' not supported.\n", opt);
+            break;
+        }
+    }
+
+    dom = argv[optind++];
+    if (!dom) {
+       fprintf(stderr, "no domain specified\n");
+        help("pool-migrate");
+        return -ERROR_FAIL;
+    }
+
+    pool = argv[optind++];
+    if (!pool) {
+        fprintf(stderr, "no cpupool specified\n");
+        help("pool-migrate");
+        return -ERROR_FAIL;
+    }
+
+    if (domain_qualifier_to_domid(dom, &domid, NULL) ||
+        !libxl_domid_to_name(&ctx, domid)) {
+        fprintf(stderr, "unknown domain \'%s\'\n", dom);
+        return -ERROR_FAIL;
+    }
+
+    if (pool_qualifier_to_poolid(pool, &poolid, NULL) ||
+        !libxl_poolid_to_name(&ctx, poolid)) {
+        fprintf(stderr, "unknown pool \'%s\'\n", pool);
+        return -ERROR_FAIL;
+    }
+
+    return -libxl_cpupool_movedomain(&ctx, poolid, domid);
+}
diff -r 5393151a737b tools/libxl/xl_cmdtable.c
--- a/tools/libxl/xl_cmdtable.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/libxl/xl_cmdtable.c	Thu Sep 16 13:48:44 2010 +0200
@@ -338,6 +338,41 @@
       "destroy a domain's version 2 virtual network device",
       "<Domain> <DevId>",
     },
+    { "pool-create",
+      &main_poolcreate,
+      "Create a CPU pool based an ConfigFile",
+      "[options] <ConfigFile> [vars]",
+      "-h, --help                   Print this help.\n"
+      "-f=FILE, --defconfig=FILE    Use the given configuration file.\n"
+      "-n, --dryrun                 Dry run - prints the resulting configuration."
+    },
+    { "pool-list",
+      &main_poollist,
+      "List CPU pools on host",
+      "[-l|--long] [-c|--cpus] [<CPU Pool>]",
+      "-l, --long                     Output all CPU pool details.\n"
+      "-c, --cpus                     Output list of CPUs used by a pool"
+    },
+    { "pool-destroy",
+      &main_pooldestroy,
+      "Deactivates a CPU pool",
+      "<CPU Pool>",
+    },
+    { "pool-cpu-add",
+      &main_poolcpuadd,
+      "Adds a CPU to a CPU pool",
+      "<CPU Pool> <CPU nr>",
+    },
+    { "pool-cpu-remove",
+      &main_poolcpuremove,
+      "Removes a CPU from a CPU pool",
+      "<CPU Pool> <CPU nr>",
+    },
+    { "pool-migrate",
+      &main_poolmigrate,
+      "Moves a domain into a CPU pool",
+      "<Domain> <CPU Pool>",
+    },
 };
 
 int cmdtable_len = sizeof(cmd_table)/sizeof(struct cmd_spec);
diff -r 5393151a737b tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Tue Sep 14 18:26:10 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c	Thu Sep 16 13:48:44 2010 +0200
@@ -241,7 +241,7 @@
     if ( xc_physinfo(self->xc_handle, &info) != 0 )
         return pyxc_error_to_exception(self->xc_handle);
   
-    nr_cpus = info.nr_cpus;
+    nr_cpus = info.max_cpu_id + 1;
 
     size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
     cpumap = malloc(cpumap_size * size);
@@ -400,7 +400,7 @@
 
     if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
         return pyxc_error_to_exception(self->xc_handle);
-    nr_cpus = pinfo.nr_cpus;
+    nr_cpus = pinfo.max_cpu_id + 1;
 
     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
     if ( rc < 0 )
@@ -1906,22 +1906,23 @@
     return zero;
 }
 
-static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
 {
     PyObject *cpulist = NULL;
-    uint32_t i;
+    int i;
 
     cpulist = PyList_New(0);
-    for ( i = 0; cpumap != 0; i++ )
+    for ( i = 0; i < cpusize; i++ )
     {
-        if ( cpumap & 1 )
+        if ( *cpumap & (1L << (i % 64)) )
         {
             PyObject* pyint = PyInt_FromLong(i);
 
             PyList_Append(cpulist, pyint);
             Py_DECREF(pyint);
         }
-        cpumap >>= 1;
+        if ( (i % 64) == 63 )
+            cpumap++;
     }
     return cpulist;
 }
@@ -1966,7 +1967,7 @@
     PyObject *list, *info_dict;
 
     uint32_t first_pool = 0;
-    int max_pools = 1024, nr_pools, i;
+    int max_pools = 1024, i;
     xc_cpupoolinfo_t *info;
 
     static char *kwd_list[] = { "first_pool", "max_pools", NULL };
@@ -1975,38 +1976,31 @@
                                       &first_pool, &max_pools) )
         return NULL;
 
-    info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
-    if (info == NULL)
-        return PyErr_NoMemory();
-
-    nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
-
-    if (nr_pools < 0)
+    list = PyList_New(0);
+    for (i = 0; i < max_pools; i++)
     {
-        free(info);
-        return pyxc_error_to_exception(self->xc_handle);
-    }
-
-    list = PyList_New(nr_pools);
-    for ( i = 0 ; i < nr_pools; i++ )
-    {
+        info = xc_cpupool_getinfo(self->xc_handle, first_pool);
+        if (info == NULL)
+            break;
         info_dict = Py_BuildValue(
             "{s:i,s:i,s:i,s:N}",
-            "cpupool",         (int)info[i].cpupool_id,
-            "sched",           info[i].sched_id,
-            "n_dom",           info[i].n_dom,
-            "cpulist",         cpumap_to_cpulist(info[i].cpumap));
+            "cpupool",         (int)info->cpupool_id,
+            "sched",           info->sched_id,
+            "n_dom",           info->n_dom,
+            "cpulist",         cpumap_to_cpulist(info->cpumap,
+                                                 info->cpumap_size));
+        first_pool = info->cpupool_id + 1;
+        free(info);
+
         if ( info_dict == NULL )
         {
             Py_DECREF(list);
-            if ( info_dict != NULL ) { Py_DECREF(info_dict); }
-            free(info);
             return NULL;
         }
-        PyList_SetItem(list, i, info_dict);
+
+        PyList_Append(list, info_dict);
+        Py_DECREF(info_dict);
     }
-
-    free(info);
 
     return list;
 }
@@ -2072,12 +2066,28 @@
 
 static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
 {
-    uint64_t cpumap;
+    uint64_t *cpumap;
+    xc_physinfo_t physinfo;
+    int ret;
+    PyObject *info = NULL;
 
-    if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+    if (xc_physinfo(self->xc_handle, &physinfo))
         return pyxc_error_to_exception(self->xc_handle);
 
-    return cpumap_to_cpulist(cpumap);
+    cpumap = calloc((physinfo.max_cpu_id + 64) / 64, sizeof(uint64_t));
+    if (!cpumap) {
+        errno = -ENOMEM;
+        return PyErr_SetFromErrno(xc_error_obj);
+    }
+
+    ret = xc_cpupool_freeinfo(self->xc_handle, cpumap,
+                              (physinfo.max_cpu_id + 8) / 8);
+    if (!ret)
+        info = cpumap_to_cpulist(cpumap, physinfo.max_cpu_id + 1);
+
+    free(cpumap);
+
+    return ret ? pyxc_error_to_exception(self->xc_handle) : info;
 }
 
 static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-16  5:58 ` Vasiliy G Tolstov
@ 2010-09-16 13:01   ` Juergen Gross
  2010-09-16 13:02     ` Vasiliy G Tolstov
  0 siblings, 1 reply; 17+ messages in thread
From: Juergen Gross @ 2010-09-16 13:01 UTC (permalink / raw)
  To: v.tolstov; +Cc: xen-devel

On 09/16/10 07:58, Vasiliy G Tolstov wrote:
> Sorry for may be breaking thread, but what benefits of using cpu pool in
> Xen? Can You provide some use cases for using cpu pools?

Without cpu pools the whole Xen server is running one scheduler which is
scheduling all domains on all cpus (possibly with some restrictions due to
cpu pinning). With cpu pools each pool is running it's own scheduler
responsible only for some cpus and some domains. This may be important if
you want to:
- isolate some domains from others regarding cpu usage
- restrict a group of domains to a subset of cpus (e.g. due to license
   restrictions), while scheduling parameters should still be working (this was
   our main problem without cpu pools: scheduling weights and cpu pinning are
   not working together very well)
- use different scheduling strategies for different domains (e.g. credit
   scheduler for some domains and credit2 or sedf for others)

For an extended discussion on cpu pools please see:
http://lists.xensource.com/archives/html/xen-devel/2009-07/msg01116.html


HTH Juergen

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-16 13:01   ` Juergen Gross
@ 2010-09-16 13:02     ` Vasiliy G Tolstov
  0 siblings, 0 replies; 17+ messages in thread
From: Vasiliy G Tolstov @ 2010-09-16 13:02 UTC (permalink / raw)
  To: Juergen Gross; +Cc: xen-devel

В Чтв, 16/09/2010 в 15:01 +0200, Juergen Gross пишет:
> On 09/16/10 07:58, Vasiliy G Tolstov wrote:
> > Sorry for may be breaking thread, but what benefits of using cpu pool in
> > Xen? Can You provide some use cases for using cpu pools?
> 
> Without cpu pools the whole Xen server is running one scheduler which is
> scheduling all domains on all cpus (possibly with some restrictions due to
> cpu pinning). With cpu pools each pool is running it's own scheduler
> responsible only for some cpus and some domains. This may be important if
> you want to:
> - isolate some domains from others regarding cpu usage
> - restrict a group of domains to a subset of cpus (e.g. due to license
>    restrictions), while scheduling parameters should still be working (this was
>    our main problem without cpu pools: scheduling weights and cpu pinning are
>    not working together very well)
> - use different scheduling strategies for different domains (e.g. credit
>    scheduler for some domains and credit2 or sedf for others)
> 
> For an extended discussion on cpu pools please see:
> http://lists.xensource.com/archives/html/xen-devel/2009-07/msg01116.html
> 
> 
> HTH Juergen
> 

Thank you very much!

-- 
Vasiliy G Tolstov <v.tolstov@selfip.ru>
Selfip.Ru

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-16 12:02   ` Juergen Gross
@ 2010-09-16 17:22     ` Ian Jackson
  0 siblings, 0 replies; 17+ messages in thread
From: Ian Jackson @ 2010-09-16 17:22 UTC (permalink / raw)
  To: Juergen Gross; +Cc: xen-devel, Ian Campbell

Juergen Gross writes ("Re: [Xen-devel] [Patch][resend] implementation of cpupool support in xl"):
> To be able to support arbitrary numbers of physical cpus it was necessary to
> include the size of cpumaps in the xc-interfaces for cpu pools as well
> These were:
>   definition of xc_cpupoolinfo_t
>   xc_cpupool_getinfo()
>   xc_cpupool_freeinfo()

I think it would make sense to separate out this part of the patch
(which also touches tools/python).  Can the consequence in libxl of
this change to the xc_ functions be decoupled from the extra
functionality of the new libxl and xl features ?

I think ideally this would be three patches:
  libxc: cpupools: support arbitrary number of physical cpus
  libxl: support more cpupool operations
  xl: support cpupools
although splitting the 2nd and 3rd apart is trivial so I could do it
myself :-).

Ian.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Patch][resend] implementation of cpupool support in xl
  2010-09-16  5:48                 ` Juergen Gross
@ 2010-09-16 17:24                   ` Ian Jackson
  0 siblings, 0 replies; 17+ messages in thread
From: Ian Jackson @ 2010-09-16 17:24 UTC (permalink / raw)
  To: Juergen Gross; +Cc: Ian Campbell, xen-devel

Juergen Gross writes ("Re: [Xen-devel] [Patch][resend] implementation of cpupool support in xl"):
> Okay, new patch attached.

Thanks.  See the comments in my other mail, just sent.  Also can you
please refresh your patch(es) against xen-unstable staging ?  There
are a few conflicts when I test-apply it, right now.

Ian.

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2010-09-16 17:24 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-09-15  7:26 [Patch][resend] implementation of cpupool support in xl Juergen Gross
2010-09-15  8:29 ` Ian Campbell
2010-09-15  8:45   ` Juergen Gross
2010-09-15  9:16     ` Ian Campbell
2010-09-15  9:23       ` Juergen Gross
2010-09-15  9:37         ` Ian Campbell
2010-09-15 10:22           ` Ian Campbell
2010-09-15 11:28             ` Juergen Gross
2010-09-15 15:35               ` Ian Campbell
2010-09-16  5:48                 ` Juergen Gross
2010-09-16 17:24                   ` Ian Jackson
2010-09-16  5:58 ` Vasiliy G Tolstov
2010-09-16 13:01   ` Juergen Gross
2010-09-16 13:02     ` Vasiliy G Tolstov
2010-09-16 10:45 ` Ian Campbell
2010-09-16 12:02   ` Juergen Gross
2010-09-16 17:22     ` Ian Jackson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.