* [Patch] update cpumask handling for cpu pools in libxc and python
@ 2010-09-17 5:51 Juergen Gross
2010-09-17 9:00 ` Ian Campbell
0 siblings, 1 reply; 10+ messages in thread
From: Juergen Gross @ 2010-09-17 5:51 UTC (permalink / raw)
To: xen-devel, Ian Campbell
[-- Attachment #1: Type: text/plain, Size: 496 bytes --]
Hi,
attached patch updates the cpumask handling for cpu pools in libxc and python
to support arbitrary numbers of cpus.
Juergen
--
Juergen Gross Principal Developer Operating Systems
TSP ES&S SWE OS6 Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28 Internet: ts.fujitsu.com
D-80807 Muenchen Company details: ts.fujitsu.com/imprint.html
[-- Attachment #2: cpupool-tools-cpumask.patch --]
[-- Type: text/x-patch, Size: 11558 bytes --]
Signed-off-by: juergen.gross@ts.fujitsu.com
To be able to support arbitrary numbers of physical cpus it was necessary to
include the size of cpumaps in the xc-interfaces for cpu pools.
These were:
definition of xc_cpupoolinfo_t
xc_cpupool_getinfo()
xc_cpupool_freeinfo()
diff -r d978675f3d53 tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c Thu Sep 16 18:29:26 2010 +0100
+++ b/tools/libxc/xc_cpupool.c Fri Sep 17 07:42:30 2010 +0200
@@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface *
return ret;
}
+static int get_cpumap_size(xc_interface *xch)
+{
+ static int cpumap_size = 0;
+ xc_physinfo_t physinfo;
+
+ if ( cpumap_size )
+ return cpumap_size;
+
+ if ( !xc_physinfo(xch, &physinfo) )
+ cpumap_size = (physinfo.max_cpu_id + 8) / 8;
+
+ return cpumap_size;
+}
+
int xc_cpupool_create(xc_interface *xch,
uint32_t *ppoolid,
uint32_t sched_id)
@@ -64,50 +78,56 @@ int xc_cpupool_destroy(xc_interface *xch
return do_sysctl_save(xch, &sysctl);
}
-int xc_cpupool_getinfo(xc_interface *xch,
- uint32_t first_poolid,
- uint32_t n_max,
- xc_cpupoolinfo_t *info)
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+ uint32_t poolid)
{
int err = 0;
- int p;
- uint32_t poolid = first_poolid;
- uint8_t local[sizeof (info->cpumap)];
+ xc_cpupoolinfo_t *info;
+ uint8_t *local;
+ int local_size;
+ int cpumap_size;
+ int size;
DECLARE_SYSCTL;
- memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+ local_size = get_cpumap_size(xch);
+ cpumap_size = (local_size + 7) / 8;
+ size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
+ info = malloc(size);
+ if ( !info )
+ return NULL;
- for (p = 0; p < n_max; p++)
+ memset(info, 0, size);
+ info->cpumap_size = local_size * 8;
+ info->cpumap = (uint64_t *)(info + 1);
+ local = (uint8_t *)(info->cpumap + cpumap_size);
+
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
+
+ if ( (err = lock_pages(local, local_size)) != 0 )
{
- sysctl.cmd = XEN_SYSCTL_cpupool_op;
- sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
- sysctl.u.cpupool_op.cpupool_id = poolid;
- set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
- sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+ PERROR("Could not lock memory for Xen hypercall");
+ free(info);
+ return NULL;
+ }
+ err = do_sysctl_save(xch, &sysctl);
+ unlock_pages(local, local_size);
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
- {
- PERROR("Could not lock memory for Xen hypercall");
- break;
- }
- err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
-
- if ( err < 0 )
- break;
-
- info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
- info->sched_id = sysctl.u.cpupool_op.sched_id;
- info->n_dom = sysctl.u.cpupool_op.n_dom;
- bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
- poolid = sysctl.u.cpupool_op.cpupool_id + 1;
- info++;
+ if ( err < 0 )
+ {
+ free(info);
+ return NULL;
}
- if ( p == 0 )
- return err;
+ info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+ info->sched_id = sysctl.u.cpupool_op.sched_id;
+ info->n_dom = sysctl.u.cpupool_op.n_dom;
+ bitmap_byte_to_64(info->cpumap, local, local_size * 8);
- return p;
+ return info;
}
int xc_cpupool_addcpu(xc_interface *xch,
@@ -150,30 +170,37 @@ int xc_cpupool_movedomain(xc_interface *
}
int xc_cpupool_freeinfo(xc_interface *xch,
- uint64_t *cpumap)
+ uint64_t *cpumap,
+ int cpusize)
{
int err;
- uint8_t local[sizeof (*cpumap)];
+ uint8_t *local;
DECLARE_SYSCTL;
+
+ local = malloc(cpusize);
+ if (local == NULL)
+ return -ENOMEM;
sysctl.cmd = XEN_SYSCTL_cpupool_op;
sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
- sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+ sysctl.u.cpupool_op.cpumap.nr_cpus = cpusize * 8;
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ if ( (err = lock_pages(local, cpusize)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
+ free(local);
return err;
}
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
+ unlock_pages(local, cpusize);
+ bitmap_byte_to_64(cpumap, local, cpusize * 8);
+
+ free(local);
if (err < 0)
return err;
- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
-
return 0;
}
diff -r d978675f3d53 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Thu Sep 16 18:29:26 2010 +0100
+++ b/tools/libxc/xenctrl.h Fri Sep 17 07:42:30 2010 +0200
@@ -535,7 +535,8 @@ typedef struct xc_cpupoolinfo {
uint32_t cpupool_id;
uint32_t sched_id;
uint32_t n_dom;
- uint64_t cpumap;
+ uint32_t cpumap_size; /* max number of cpus in map */
+ uint64_t *cpumap;
} xc_cpupoolinfo_t;
/**
@@ -564,15 +565,11 @@ int xc_cpupool_destroy(xc_interface *xch
* Get cpupool info. Returns info for up to the specified number of cpupools
* starting at the given id.
* @parm xc_handle a handle to an open hypervisor interface
- * @parm first_poolid lowest id for which info is returned
- * @parm n_max maximum number of cpupools to return info
- * @parm info pointer to xc_cpupoolinfo_t array
- * return number of cpupool infos
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (obtained by malloc)
*/
-int xc_cpupool_getinfo(xc_interface *xch,
- uint32_t first_poolid,
- uint32_t n_max,
- xc_cpupoolinfo_t *info);
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+ uint32_t poolid);
/**
* Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
@@ -615,10 +612,12 @@ int xc_cpupool_movedomain(xc_interface *
*
* @parm xc_handle a handle to an open hypervisor interface
* @parm cpumap pointer where to store the cpumap
+ * @parm cpusize size of cpumap array in bytes
* return 0 on success, -1 on failure
*/
int xc_cpupool_freeinfo(xc_interface *xch,
- uint64_t *cpumap);
+ uint64_t *cpumap,
+ int cpusize);
/*
diff -r d978675f3d53 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Sep 16 18:29:26 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Fri Sep 17 07:42:30 2010 +0200
@@ -241,7 +241,7 @@ static PyObject *pyxc_vcpu_setaffinity(X
if ( xc_physinfo(self->xc_handle, &info) != 0 )
return pyxc_error_to_exception(self->xc_handle);
- nr_cpus = info.nr_cpus;
+ nr_cpus = info.max_cpu_id + 1;
size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
cpumap = malloc(cpumap_size * size);
@@ -400,7 +400,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
return pyxc_error_to_exception(self->xc_handle);
- nr_cpus = pinfo.nr_cpus;
+ nr_cpus = pinfo.max_cpu_id + 1;
rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
if ( rc < 0 )
@@ -1906,22 +1906,23 @@ static PyObject *pyxc_dom_set_memshr(XcO
return zero;
}
-static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
{
PyObject *cpulist = NULL;
- uint32_t i;
+ int i;
cpulist = PyList_New(0);
- for ( i = 0; cpumap != 0; i++ )
+ for ( i = 0; i < cpusize; i++ )
{
- if ( cpumap & 1 )
+ if ( *cpumap & (1L << (i % 64)) )
{
PyObject* pyint = PyInt_FromLong(i);
PyList_Append(cpulist, pyint);
Py_DECREF(pyint);
}
- cpumap >>= 1;
+ if ( (i % 64) == 63 )
+ cpumap++;
}
return cpulist;
}
@@ -1966,7 +1967,7 @@ static PyObject *pyxc_cpupool_getinfo(Xc
PyObject *list, *info_dict;
uint32_t first_pool = 0;
- int max_pools = 1024, nr_pools, i;
+ int max_pools = 1024, i;
xc_cpupoolinfo_t *info;
static char *kwd_list[] = { "first_pool", "max_pools", NULL };
@@ -1975,38 +1976,31 @@ static PyObject *pyxc_cpupool_getinfo(Xc
&first_pool, &max_pools) )
return NULL;
- info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
- if (info == NULL)
- return PyErr_NoMemory();
-
- nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
-
- if (nr_pools < 0)
+ list = PyList_New(0);
+ for (i = 0; i < max_pools; i++)
{
- free(info);
- return pyxc_error_to_exception(self->xc_handle);
- }
-
- list = PyList_New(nr_pools);
- for ( i = 0 ; i < nr_pools; i++ )
- {
+ info = xc_cpupool_getinfo(self->xc_handle, first_pool);
+ if (info == NULL)
+ break;
info_dict = Py_BuildValue(
"{s:i,s:i,s:i,s:N}",
- "cpupool", (int)info[i].cpupool_id,
- "sched", info[i].sched_id,
- "n_dom", info[i].n_dom,
- "cpulist", cpumap_to_cpulist(info[i].cpumap));
+ "cpupool", (int)info->cpupool_id,
+ "sched", info->sched_id,
+ "n_dom", info->n_dom,
+ "cpulist", cpumap_to_cpulist(info->cpumap,
+ info->cpumap_size));
+ first_pool = info->cpupool_id + 1;
+ free(info);
+
if ( info_dict == NULL )
{
Py_DECREF(list);
- if ( info_dict != NULL ) { Py_DECREF(info_dict); }
- free(info);
return NULL;
}
- PyList_SetItem(list, i, info_dict);
+
+ PyList_Append(list, info_dict);
+ Py_DECREF(info_dict);
}
-
- free(info);
return list;
}
@@ -2072,12 +2066,28 @@ static PyObject *pyxc_cpupool_movedomain
static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
{
- uint64_t cpumap;
+ uint64_t *cpumap;
+ xc_physinfo_t physinfo;
+ int ret;
+ PyObject *info = NULL;
- if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+ if (xc_physinfo(self->xc_handle, &physinfo))
return pyxc_error_to_exception(self->xc_handle);
- return cpumap_to_cpulist(cpumap);
+ cpumap = calloc((physinfo.max_cpu_id + 64) / 64, sizeof(uint64_t));
+ if (!cpumap) {
+ errno = -ENOMEM;
+ return PyErr_SetFromErrno(xc_error_obj);
+ }
+
+ ret = xc_cpupool_freeinfo(self->xc_handle, cpumap,
+ (physinfo.max_cpu_id + 8) / 8);
+ if (!ret)
+ info = cpumap_to_cpulist(cpumap, physinfo.max_cpu_id + 1);
+
+ free(cpumap);
+
+ return ret ? pyxc_error_to_exception(self->xc_handle) : info;
}
static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [Patch] update cpumask handling for cpu pools in libxc and python
2010-09-17 5:51 [Patch] update cpumask handling for cpu pools in libxc and python Juergen Gross
@ 2010-09-17 9:00 ` Ian Campbell
2010-09-17 9:20 ` Juergen Gross
0 siblings, 1 reply; 10+ messages in thread
From: Ian Campbell @ 2010-09-17 9:00 UTC (permalink / raw)
To: Juergen Gross; +Cc: xen-devel
On Fri, 2010-09-17 at 06:51 +0100, Juergen Gross wrote:
> Signed-off-by: juergen.gross@ts.fujitsu.com
>
> To be able to support arbitrary numbers of physical cpus it was
> necessary to
> include the size of cpumaps in the xc-interfaces for cpu pools.
> These were:
> definition of xc_cpupoolinfo_t
> xc_cpupool_getinfo()
> xc_cpupool_freeinfo()
>
> diff -r d978675f3d53 tools/libxc/xc_cpupool.c
> --- a/tools/libxc/xc_cpupool.c Thu Sep 16 18:29:26 2010 +0100
> +++ b/tools/libxc/xc_cpupool.c Fri Sep 17 07:42:30 2010 +0200
> @@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface *
> return ret;
> }
>
> +static int get_cpumap_size(xc_interface *xch)
> +{
> + static int cpumap_size = 0;
> + xc_physinfo_t physinfo;
> +
> + if ( cpumap_size )
> + return cpumap_size;
> +
> + if ( !xc_physinfo(xch, &physinfo) )
> + cpumap_size = (physinfo.max_cpu_id + 8) / 8;
The + 8 / 8 thing still jumps out at me every time I look at it. Perhaps
the intention would be clearer if written as:
int nr_cpus = physinfo.max_cpu_id + 1;
cpumap_size = (nr+cpus + 7) / 8;
??
If xc_physinfo fails (which seems like a reasonably fatal type of error)
then this function returns 0 but the caller does not seem to check for
this case.
> +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
> + uint32_t poolid)
> {
> [...]
> + local_size = get_cpumap_size(xch);
> + cpumap_size = (local_size + 7) / 8;
local_size has already been rounded up in get_cpumap_size. Do we really
need to do it again?
> + size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
Why do we need both "cpumap_size * 8" and local_size additional bytes
here? Both contain the number of bytes necessary to contain a cpumap
bitmask and in fact I suspect they are both equal at this point (see
point about rounding above).
Ian.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: Re: [Patch] update cpumask handling for cpu pools in libxc and python
2010-09-17 9:00 ` Ian Campbell
@ 2010-09-17 9:20 ` Juergen Gross
2010-09-17 9:44 ` Ian Campbell
2010-09-17 15:51 ` Ian Jackson
0 siblings, 2 replies; 10+ messages in thread
From: Juergen Gross @ 2010-09-17 9:20 UTC (permalink / raw)
To: Ian Campbell; +Cc: xen-devel
[-- Attachment #1: Type: text/plain, Size: 2723 bytes --]
On 09/17/10 11:00, Ian Campbell wrote:
> On Fri, 2010-09-17 at 06:51 +0100, Juergen Gross wrote:
>> Signed-off-by: juergen.gross@ts.fujitsu.com
>>
>> To be able to support arbitrary numbers of physical cpus it was
>> necessary to
>> include the size of cpumaps in the xc-interfaces for cpu pools.
>> These were:
>> definition of xc_cpupoolinfo_t
>> xc_cpupool_getinfo()
>> xc_cpupool_freeinfo()
>>
>> diff -r d978675f3d53 tools/libxc/xc_cpupool.c
>> --- a/tools/libxc/xc_cpupool.c Thu Sep 16 18:29:26 2010 +0100
>> +++ b/tools/libxc/xc_cpupool.c Fri Sep 17 07:42:30 2010 +0200
>> @@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface *
>> return ret;
>> }
>>
>> +static int get_cpumap_size(xc_interface *xch)
>> +{
>> + static int cpumap_size = 0;
>> + xc_physinfo_t physinfo;
>> +
>> + if ( cpumap_size )
>> + return cpumap_size;
>> +
>> + if ( !xc_physinfo(xch,&physinfo) )
>> + cpumap_size = (physinfo.max_cpu_id + 8) / 8;
>
> The + 8 / 8 thing still jumps out at me every time I look at it. Perhaps
> the intention would be clearer if written as:
> int nr_cpus = physinfo.max_cpu_id + 1;
> cpumap_size = (nr+cpus + 7) / 8;
I modified it to make it more clear.
> If xc_physinfo fails (which seems like a reasonably fatal type of error)
> then this function returns 0 but the caller does not seem to check for
> this case.
Okay, test added.
>
>> +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
>> + uint32_t poolid)
>> {
>> [...]
>> + local_size = get_cpumap_size(xch);
>> + cpumap_size = (local_size + 7) / 8;
>
> local_size has already been rounded up in get_cpumap_size. Do we really
> need to do it again?
I've made it more clear that this is rounding to uint64.
>
>> + size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
>
> Why do we need both "cpumap_size * 8" and local_size additional bytes
> here? Both contain the number of bytes necessary to contain a cpumap
> bitmask and in fact I suspect they are both equal at this point (see
> point about rounding above).
The hypervisor returns a cpumask based on bytes, the tools use uint64-based
cpumasks. In practice this is equivalent as long as multiple of 8 bytes are
written by the hypervisor and the system is running little endian.
I prefer a clean interface mapping here.
Juergen
--
Juergen Gross Principal Developer Operating Systems
TSP ES&S SWE OS6 Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28 Internet: ts.fujitsu.com
D-80807 Muenchen Company details: ts.fujitsu.com/imprint.html
[-- Attachment #2: cpupool-tools-cpumask.patch --]
[-- Type: text/x-patch, Size: 11757 bytes --]
Signed-off-by: juergen.gross@ts.fujitsu.com
To be able to support arbitrary numbers of physical cpus it was necessary to
include the size of cpumaps in the xc-interfaces for cpu pools.
These were:
definition of xc_cpupoolinfo_t
xc_cpupool_getinfo()
xc_cpupool_freeinfo()
diff -r d978675f3d53 tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c Thu Sep 16 18:29:26 2010 +0100
+++ b/tools/libxc/xc_cpupool.c Fri Sep 17 11:14:16 2010 +0200
@@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface *
return ret;
}
+static int get_cpumap_size(xc_interface *xch)
+{
+ static int cpumap_size = 0;
+ xc_physinfo_t physinfo;
+
+ if ( cpumap_size )
+ return cpumap_size;
+
+ if ( !xc_physinfo(xch, &physinfo) )
+ cpumap_size = ((physinfo.max_cpu_id + 1) + 7) / 8; /* round to bytes */
+
+ return cpumap_size;
+}
+
int xc_cpupool_create(xc_interface *xch,
uint32_t *ppoolid,
uint32_t sched_id)
@@ -64,50 +78,61 @@ int xc_cpupool_destroy(xc_interface *xch
return do_sysctl_save(xch, &sysctl);
}
-int xc_cpupool_getinfo(xc_interface *xch,
- uint32_t first_poolid,
- uint32_t n_max,
- xc_cpupoolinfo_t *info)
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+ uint32_t poolid)
{
int err = 0;
- int p;
- uint32_t poolid = first_poolid;
- uint8_t local[sizeof (info->cpumap)];
+ xc_cpupoolinfo_t *info;
+ uint8_t *local;
+ int local_size;
+ int cpumap_size;
+ int size;
DECLARE_SYSCTL;
- memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+ local_size = get_cpumap_size(xch);
+ if (!local_size)
+ {
+ PERROR("Could not get number of cpus");
+ return NULL;
+ }
+ cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap);
+ size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap) + local_size;
+ info = malloc(size);
+ if ( !info )
+ return NULL;
- for (p = 0; p < n_max; p++)
+ memset(info, 0, size);
+ info->cpumap_size = local_size * 8;
+ info->cpumap = (uint64_t *)(info + 1);
+ local = (uint8_t *)(info->cpumap + cpumap_size);
+
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
+
+ if ( (err = lock_pages(local, local_size)) != 0 )
{
- sysctl.cmd = XEN_SYSCTL_cpupool_op;
- sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
- sysctl.u.cpupool_op.cpupool_id = poolid;
- set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
- sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+ PERROR("Could not lock memory for Xen hypercall");
+ free(info);
+ return NULL;
+ }
+ err = do_sysctl_save(xch, &sysctl);
+ unlock_pages(local, local_size);
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
- {
- PERROR("Could not lock memory for Xen hypercall");
- break;
- }
- err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
-
- if ( err < 0 )
- break;
-
- info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
- info->sched_id = sysctl.u.cpupool_op.sched_id;
- info->n_dom = sysctl.u.cpupool_op.n_dom;
- bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
- poolid = sysctl.u.cpupool_op.cpupool_id + 1;
- info++;
+ if ( err < 0 )
+ {
+ free(info);
+ return NULL;
}
- if ( p == 0 )
- return err;
+ info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+ info->sched_id = sysctl.u.cpupool_op.sched_id;
+ info->n_dom = sysctl.u.cpupool_op.n_dom;
+ bitmap_byte_to_64(info->cpumap, local, local_size * 8);
- return p;
+ return info;
}
int xc_cpupool_addcpu(xc_interface *xch,
@@ -150,30 +175,37 @@ int xc_cpupool_movedomain(xc_interface *
}
int xc_cpupool_freeinfo(xc_interface *xch,
- uint64_t *cpumap)
+ uint64_t *cpumap,
+ int cpusize)
{
int err;
- uint8_t local[sizeof (*cpumap)];
+ uint8_t *local;
DECLARE_SYSCTL;
+
+ local = malloc(cpusize);
+ if (local == NULL)
+ return -ENOMEM;
sysctl.cmd = XEN_SYSCTL_cpupool_op;
sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
- sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+ sysctl.u.cpupool_op.cpumap.nr_cpus = cpusize * 8;
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ if ( (err = lock_pages(local, cpusize)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
+ free(local);
return err;
}
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
+ unlock_pages(local, cpusize);
+ bitmap_byte_to_64(cpumap, local, cpusize * 8);
+
+ free(local);
if (err < 0)
return err;
- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
-
return 0;
}
diff -r d978675f3d53 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Thu Sep 16 18:29:26 2010 +0100
+++ b/tools/libxc/xenctrl.h Fri Sep 17 11:14:16 2010 +0200
@@ -535,7 +535,8 @@ typedef struct xc_cpupoolinfo {
uint32_t cpupool_id;
uint32_t sched_id;
uint32_t n_dom;
- uint64_t cpumap;
+ uint32_t cpumap_size; /* max number of cpus in map */
+ uint64_t *cpumap;
} xc_cpupoolinfo_t;
/**
@@ -564,15 +565,11 @@ int xc_cpupool_destroy(xc_interface *xch
* Get cpupool info. Returns info for up to the specified number of cpupools
* starting at the given id.
* @parm xc_handle a handle to an open hypervisor interface
- * @parm first_poolid lowest id for which info is returned
- * @parm n_max maximum number of cpupools to return info
- * @parm info pointer to xc_cpupoolinfo_t array
- * return number of cpupool infos
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (obtained by malloc)
*/
-int xc_cpupool_getinfo(xc_interface *xch,
- uint32_t first_poolid,
- uint32_t n_max,
- xc_cpupoolinfo_t *info);
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+ uint32_t poolid);
/**
* Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
@@ -615,10 +612,12 @@ int xc_cpupool_movedomain(xc_interface *
*
* @parm xc_handle a handle to an open hypervisor interface
* @parm cpumap pointer where to store the cpumap
+ * @parm cpusize size of cpumap array in bytes
* return 0 on success, -1 on failure
*/
int xc_cpupool_freeinfo(xc_interface *xch,
- uint64_t *cpumap);
+ uint64_t *cpumap,
+ int cpusize);
/*
diff -r d978675f3d53 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Sep 16 18:29:26 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Fri Sep 17 11:14:16 2010 +0200
@@ -241,7 +241,7 @@ static PyObject *pyxc_vcpu_setaffinity(X
if ( xc_physinfo(self->xc_handle, &info) != 0 )
return pyxc_error_to_exception(self->xc_handle);
- nr_cpus = info.nr_cpus;
+ nr_cpus = info.max_cpu_id + 1;
size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
cpumap = malloc(cpumap_size * size);
@@ -400,7 +400,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
return pyxc_error_to_exception(self->xc_handle);
- nr_cpus = pinfo.nr_cpus;
+ nr_cpus = pinfo.max_cpu_id + 1;
rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
if ( rc < 0 )
@@ -1906,22 +1906,23 @@ static PyObject *pyxc_dom_set_memshr(XcO
return zero;
}
-static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
{
PyObject *cpulist = NULL;
- uint32_t i;
+ int i;
cpulist = PyList_New(0);
- for ( i = 0; cpumap != 0; i++ )
+ for ( i = 0; i < cpusize; i++ )
{
- if ( cpumap & 1 )
+ if ( *cpumap & (1L << (i % 64)) )
{
PyObject* pyint = PyInt_FromLong(i);
PyList_Append(cpulist, pyint);
Py_DECREF(pyint);
}
- cpumap >>= 1;
+ if ( (i % 64) == 63 )
+ cpumap++;
}
return cpulist;
}
@@ -1966,7 +1967,7 @@ static PyObject *pyxc_cpupool_getinfo(Xc
PyObject *list, *info_dict;
uint32_t first_pool = 0;
- int max_pools = 1024, nr_pools, i;
+ int max_pools = 1024, i;
xc_cpupoolinfo_t *info;
static char *kwd_list[] = { "first_pool", "max_pools", NULL };
@@ -1975,38 +1976,31 @@ static PyObject *pyxc_cpupool_getinfo(Xc
&first_pool, &max_pools) )
return NULL;
- info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
- if (info == NULL)
- return PyErr_NoMemory();
-
- nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
-
- if (nr_pools < 0)
+ list = PyList_New(0);
+ for (i = 0; i < max_pools; i++)
{
- free(info);
- return pyxc_error_to_exception(self->xc_handle);
- }
-
- list = PyList_New(nr_pools);
- for ( i = 0 ; i < nr_pools; i++ )
- {
+ info = xc_cpupool_getinfo(self->xc_handle, first_pool);
+ if (info == NULL)
+ break;
info_dict = Py_BuildValue(
"{s:i,s:i,s:i,s:N}",
- "cpupool", (int)info[i].cpupool_id,
- "sched", info[i].sched_id,
- "n_dom", info[i].n_dom,
- "cpulist", cpumap_to_cpulist(info[i].cpumap));
+ "cpupool", (int)info->cpupool_id,
+ "sched", info->sched_id,
+ "n_dom", info->n_dom,
+ "cpulist", cpumap_to_cpulist(info->cpumap,
+ info->cpumap_size));
+ first_pool = info->cpupool_id + 1;
+ free(info);
+
if ( info_dict == NULL )
{
Py_DECREF(list);
- if ( info_dict != NULL ) { Py_DECREF(info_dict); }
- free(info);
return NULL;
}
- PyList_SetItem(list, i, info_dict);
+
+ PyList_Append(list, info_dict);
+ Py_DECREF(info_dict);
}
-
- free(info);
return list;
}
@@ -2072,12 +2066,28 @@ static PyObject *pyxc_cpupool_movedomain
static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
{
- uint64_t cpumap;
+ uint64_t *cpumap;
+ xc_physinfo_t physinfo;
+ int ret;
+ PyObject *info = NULL;
- if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+ if (xc_physinfo(self->xc_handle, &physinfo))
return pyxc_error_to_exception(self->xc_handle);
- return cpumap_to_cpulist(cpumap);
+ cpumap = calloc((physinfo.max_cpu_id + 64) / 64, sizeof(uint64_t));
+ if (!cpumap) {
+ errno = -ENOMEM;
+ return PyErr_SetFromErrno(xc_error_obj);
+ }
+
+ ret = xc_cpupool_freeinfo(self->xc_handle, cpumap,
+ (physinfo.max_cpu_id + 8) / 8);
+ if (!ret)
+ info = cpumap_to_cpulist(cpumap, physinfo.max_cpu_id + 1);
+
+ free(cpumap);
+
+ return ret ? pyxc_error_to_exception(self->xc_handle) : info;
}
static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: Re: [Patch] update cpumask handling for cpu pools in libxc and python
2010-09-17 9:20 ` Juergen Gross
@ 2010-09-17 9:44 ` Ian Campbell
2010-09-17 10:04 ` Juergen Gross
2010-09-17 10:08 ` Re: [Patch] update cpumask handling for cpu pools in libxc and python Juergen Gross
2010-09-17 15:51 ` Ian Jackson
1 sibling, 2 replies; 10+ messages in thread
From: Ian Campbell @ 2010-09-17 9:44 UTC (permalink / raw)
To: Juergen Gross; +Cc: xen-devel
On Fri, 2010-09-17 at 10:20 +0100, Juergen Gross wrote:
> On 09/17/10 11:00, Ian Campbell wrote:
> > On Fri, 2010-09-17 at 06:51 +0100, Juergen Gross wrote:
> >> Signed-off-by: juergen.gross@ts.fujitsu.com
> >>
> >> To be able to support arbitrary numbers of physical cpus it was
> >> necessary to
> >> include the size of cpumaps in the xc-interfaces for cpu pools.
> >> These were:
> >> definition of xc_cpupoolinfo_t
> >> xc_cpupool_getinfo()
> >> xc_cpupool_freeinfo()
> >>
> >> diff -r d978675f3d53 tools/libxc/xc_cpupool.c
> >> --- a/tools/libxc/xc_cpupool.c Thu Sep 16 18:29:26 2010 +0100
> >> +++ b/tools/libxc/xc_cpupool.c Fri Sep 17 07:42:30 2010 +0200
> >> @@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface *
> >> return ret;
> >> }
> >>
> >> +static int get_cpumap_size(xc_interface *xch)
> >> +{
> >> + static int cpumap_size = 0;
> >> + xc_physinfo_t physinfo;
> >> +
> >> + if ( cpumap_size )
> >> + return cpumap_size;
> >> +
> >> + if ( !xc_physinfo(xch,&physinfo) )
> >> + cpumap_size = (physinfo.max_cpu_id + 8) / 8;
> >
> > The + 8 / 8 thing still jumps out at me every time I look at it. Perhaps
> > the intention would be clearer if written as:
> > int nr_cpus = physinfo.max_cpu_id + 1;
> > cpumap_size = (nr+cpus + 7) / 8;
>
> I modified it to make it more clear.
Thanks.
>
> > If xc_physinfo fails (which seems like a reasonably fatal type of error)
> > then this function returns 0 but the caller does not seem to check for
> > this case.
>
> Okay, test added.
>
> >
> >> +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
> >> + uint32_t poolid)
> >> {
> >> [...]
> >> + local_size = get_cpumap_size(xch);
> >> + cpumap_size = (local_size + 7) / 8;
> >
> > local_size has already been rounded up in get_cpumap_size. Do we really
> > need to do it again?
>
> I've made it more clear that this is rounding to uint64.
Wouldn't that be "(.. + 63) / 8" then?
>
> >
> >> + size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
> >
> > Why do we need both "cpumap_size * 8" and local_size additional bytes
> > here? Both contain the number of bytes necessary to contain a cpumap
> > bitmask and in fact I suspect they are both equal at this point (see
> > point about rounding above).
>
> The hypervisor returns a cpumask based on bytes, the tools use uint64-based
> cpumasks.
Oh, I see, as well as xc_cpupool_info_t and the cpumap which it contains
being allocated together in a single buffer you are also including a
third buffer which is for local use in this function only but which is
included in the memory region returned to the caller (who doesn't know
about it). Seems a bit odd to me, why not just allocate it locally then
free it (or use alloca)?
Actually, when I complete my hypercall buffer patch this memory will
need to be separately allocated any way since it needs to come from a
special pool. I'd prefer it if you just used explicit separate
allocation for this buffer from the start.
> In practice this is equivalent as long as multiple of 8 bytes are
> written by the hypervisor and the system is running little endian.
> I prefer a clean interface mapping here.
Using a single uint64 when there was a limit of 64 cpus made sense but
now that it is variable length why not just use bytes everywhere? It
would avoid a lot of confusion about what various size units are at
various points etc. You would avoid needing to translate between the
hypervisor and tools representations too, wouldn't you?
Ian.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: Re: [Patch] update cpumask handling for cpu pools in libxc and python
2010-09-17 9:44 ` Ian Campbell
@ 2010-09-17 10:04 ` Juergen Gross
2010-09-17 10:10 ` Ian Campbell
2010-09-17 10:08 ` Re: [Patch] update cpumask handling for cpu pools in libxc and python Juergen Gross
1 sibling, 1 reply; 10+ messages in thread
From: Juergen Gross @ 2010-09-17 10:04 UTC (permalink / raw)
To: Ian Campbell; +Cc: xen-devel
On 09/17/10 11:44, Ian Campbell wrote:
> On Fri, 2010-09-17 at 10:20 +0100, Juergen Gross wrote:
>> On 09/17/10 11:00, Ian Campbell wrote:
>>> local_size has already been rounded up in get_cpumap_size. Do we really
>>> need to do it again?
>>
>> I've made it more clear that this is rounding to uint64.
>
> Wouldn't that be "(.. + 63) / 8" then?
No, local_size is already bytes...
>>
>>>
>>>> + size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
>>>
>>> Why do we need both "cpumap_size * 8" and local_size additional bytes
>>> here? Both contain the number of bytes necessary to contain a cpumap
>>> bitmask and in fact I suspect they are both equal at this point (see
>>> point about rounding above).
>>
>> The hypervisor returns a cpumask based on bytes, the tools use uint64-based
>> cpumasks.
>
> Oh, I see, as well as xc_cpupool_info_t and the cpumap which it contains
> being allocated together in a single buffer you are also including a
> third buffer which is for local use in this function only but which is
> included in the memory region returned to the caller (who doesn't know
> about it). Seems a bit odd to me, why not just allocate it locally then
> free it (or use alloca)?
>
> Actually, when I complete my hypercall buffer patch this memory will
> need to be separately allocated any way since it needs to come from a
> special pool. I'd prefer it if you just used explicit separate
> allocation for this buffer from the start.
Okay.
>
>> In practice this is equivalent as long as multiple of 8 bytes are
>> written by the hypervisor and the system is running little endian.
>> I prefer a clean interface mapping here.
>
> Using a single uint64 when there was a limit of 64 cpus made sense but
> now that it is variable length why not just use bytes everywhere? It
> would avoid a lot of confusion about what various size units are at
> various points etc. You would avoid needing to translate between the
> hypervisor and tools representations too, wouldn't you?
This would suggest changing xc_vcpu_setaffinity() and
--
Juergen Gross Principal Developer Operating Systems
TSP ES&S SWE OS6 Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28 Internet: ts.fujitsu.com
D-80807 Muenchen Company details: ts.fujitsu.com/imprint.html
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: Re: [Patch] update cpumask handling for cpu pools in libxc and python
2010-09-17 9:44 ` Ian Campbell
2010-09-17 10:04 ` Juergen Gross
@ 2010-09-17 10:08 ` Juergen Gross
2010-09-17 13:40 ` Ian Campbell
1 sibling, 1 reply; 10+ messages in thread
From: Juergen Gross @ 2010-09-17 10:08 UTC (permalink / raw)
To: Ian Campbell; +Cc: xen-devel
[-- Attachment #1: Type: text/plain, Size: 2520 bytes --]
Please ignore previous mail, I hit the send-button too early...
On 09/17/10 11:44, Ian Campbell wrote:
> On Fri, 2010-09-17 at 10:20 +0100, Juergen Gross wrote:
>> On 09/17/10 11:00, Ian Campbell wrote:
>>> local_size has already been rounded up in get_cpumap_size. Do we really
>>> need to do it again?
>>
>> I've made it more clear that this is rounding to uint64.
>
> Wouldn't that be "(.. + 63) / 8" then?
No, local_size is already bytes...
>>
>>>
>>>> + size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
>>>
>>> Why do we need both "cpumap_size * 8" and local_size additional bytes
>>> here? Both contain the number of bytes necessary to contain a cpumap
>>> bitmask and in fact I suspect they are both equal at this point (see
>>> point about rounding above).
>>
>> The hypervisor returns a cpumask based on bytes, the tools use uint64-based
>> cpumasks.
>
> Oh, I see, as well as xc_cpupool_info_t and the cpumap which it contains
> being allocated together in a single buffer you are also including a
> third buffer which is for local use in this function only but which is
> included in the memory region returned to the caller (who doesn't know
> about it). Seems a bit odd to me, why not just allocate it locally then
> free it (or use alloca)?
>
> Actually, when I complete my hypercall buffer patch this memory will
> need to be separately allocated any way since it needs to come from a
> special pool. I'd prefer it if you just used explicit separate
> allocation for this buffer from the start.
Okay.
>
>> In practice this is equivalent as long as multiple of 8 bytes are
>> written by the hypervisor and the system is running little endian.
>> I prefer a clean interface mapping here.
>
> Using a single uint64 when there was a limit of 64 cpus made sense but
> now that it is variable length why not just use bytes everywhere? It
> would avoid a lot of confusion about what various size units are at
> various points etc. You would avoid needing to translate between the
> hypervisor and tools representations too, wouldn't you?
This would suggest changing xc_vcpu_setaffinity() and xc_vcpu_getaffinity(),
too.
--
Juergen Gross Principal Developer Operating Systems
TSP ES&S SWE OS6 Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28 Internet: ts.fujitsu.com
D-80807 Muenchen Company details: ts.fujitsu.com/imprint.html
[-- Attachment #2: cpupool-tools-cpumask.patch --]
[-- Type: text/x-patch, Size: 11723 bytes --]
Signed-off-by: juergen.gross@ts.fujitsu.com
To be able to support arbitrary numbers of physical cpus it was necessary to
include the size of cpumaps in the xc-interfaces for cpu pools.
These were:
definition of xc_cpupoolinfo_t
xc_cpupool_getinfo()
xc_cpupool_freeinfo()
diff -r d978675f3d53 tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c Thu Sep 16 18:29:26 2010 +0100
+++ b/tools/libxc/xc_cpupool.c Fri Sep 17 12:07:24 2010 +0200
@@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface *
return ret;
}
+static int get_cpumap_size(xc_interface *xch)
+{
+ static int cpumap_size = 0;
+ xc_physinfo_t physinfo;
+
+ if ( cpumap_size )
+ return cpumap_size;
+
+ if ( !xc_physinfo(xch, &physinfo) )
+ cpumap_size = ((physinfo.max_cpu_id + 1) + 7) / 8; /* round to bytes */
+
+ return cpumap_size;
+}
+
int xc_cpupool_create(xc_interface *xch,
uint32_t *ppoolid,
uint32_t sched_id)
@@ -64,50 +78,61 @@ int xc_cpupool_destroy(xc_interface *xch
return do_sysctl_save(xch, &sysctl);
}
-int xc_cpupool_getinfo(xc_interface *xch,
- uint32_t first_poolid,
- uint32_t n_max,
- xc_cpupoolinfo_t *info)
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+ uint32_t poolid)
{
int err = 0;
- int p;
- uint32_t poolid = first_poolid;
- uint8_t local[sizeof (info->cpumap)];
+ xc_cpupoolinfo_t *info;
+ uint8_t *local;
+ int local_size;
+ int cpumap_size;
+ int size;
DECLARE_SYSCTL;
- memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+ local_size = get_cpumap_size(xch);
+ local = alloca(local_size);
+ if (!local_size)
+ {
+ PERROR("Could not get number of cpus");
+ return NULL;
+ }
+ cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap);
+ size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap);
+ info = malloc(size);
+ if ( !info )
+ return NULL;
- for (p = 0; p < n_max; p++)
+ memset(info, 0, size);
+ info->cpumap_size = local_size * 8;
+ info->cpumap = (uint64_t *)(info + 1);
+
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
+
+ if ( (err = lock_pages(local, local_size)) != 0 )
{
- sysctl.cmd = XEN_SYSCTL_cpupool_op;
- sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
- sysctl.u.cpupool_op.cpupool_id = poolid;
- set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
- sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+ PERROR("Could not lock memory for Xen hypercall");
+ free(info);
+ return NULL;
+ }
+ err = do_sysctl_save(xch, &sysctl);
+ unlock_pages(local, local_size);
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
- {
- PERROR("Could not lock memory for Xen hypercall");
- break;
- }
- err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
-
- if ( err < 0 )
- break;
-
- info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
- info->sched_id = sysctl.u.cpupool_op.sched_id;
- info->n_dom = sysctl.u.cpupool_op.n_dom;
- bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
- poolid = sysctl.u.cpupool_op.cpupool_id + 1;
- info++;
+ if ( err < 0 )
+ {
+ free(info);
+ return NULL;
}
- if ( p == 0 )
- return err;
+ info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+ info->sched_id = sysctl.u.cpupool_op.sched_id;
+ info->n_dom = sysctl.u.cpupool_op.n_dom;
+ bitmap_byte_to_64(info->cpumap, local, local_size * 8);
- return p;
+ return info;
}
int xc_cpupool_addcpu(xc_interface *xch,
@@ -150,30 +175,37 @@ int xc_cpupool_movedomain(xc_interface *
}
int xc_cpupool_freeinfo(xc_interface *xch,
- uint64_t *cpumap)
+ uint64_t *cpumap,
+ int cpusize)
{
int err;
- uint8_t local[sizeof (*cpumap)];
+ uint8_t *local;
DECLARE_SYSCTL;
+
+ local = malloc(cpusize);
+ if (local == NULL)
+ return -ENOMEM;
sysctl.cmd = XEN_SYSCTL_cpupool_op;
sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
- sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+ sysctl.u.cpupool_op.cpumap.nr_cpus = cpusize * 8;
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ if ( (err = lock_pages(local, cpusize)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
+ free(local);
return err;
}
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
+ unlock_pages(local, cpusize);
+ bitmap_byte_to_64(cpumap, local, cpusize * 8);
+
+ free(local);
if (err < 0)
return err;
- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
-
return 0;
}
diff -r d978675f3d53 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Thu Sep 16 18:29:26 2010 +0100
+++ b/tools/libxc/xenctrl.h Fri Sep 17 12:07:24 2010 +0200
@@ -535,7 +535,8 @@ typedef struct xc_cpupoolinfo {
uint32_t cpupool_id;
uint32_t sched_id;
uint32_t n_dom;
- uint64_t cpumap;
+ uint32_t cpumap_size; /* max number of cpus in map */
+ uint64_t *cpumap;
} xc_cpupoolinfo_t;
/**
@@ -564,15 +565,11 @@ int xc_cpupool_destroy(xc_interface *xch
* Get cpupool info. Returns info for up to the specified number of cpupools
* starting at the given id.
* @parm xc_handle a handle to an open hypervisor interface
- * @parm first_poolid lowest id for which info is returned
- * @parm n_max maximum number of cpupools to return info
- * @parm info pointer to xc_cpupoolinfo_t array
- * return number of cpupool infos
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (obtained by malloc)
*/
-int xc_cpupool_getinfo(xc_interface *xch,
- uint32_t first_poolid,
- uint32_t n_max,
- xc_cpupoolinfo_t *info);
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+ uint32_t poolid);
/**
* Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
@@ -615,10 +612,12 @@ int xc_cpupool_movedomain(xc_interface *
*
* @parm xc_handle a handle to an open hypervisor interface
* @parm cpumap pointer where to store the cpumap
+ * @parm cpusize size of cpumap array in bytes
* return 0 on success, -1 on failure
*/
int xc_cpupool_freeinfo(xc_interface *xch,
- uint64_t *cpumap);
+ uint64_t *cpumap,
+ int cpusize);
/*
diff -r d978675f3d53 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Sep 16 18:29:26 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Fri Sep 17 12:07:24 2010 +0200
@@ -241,7 +241,7 @@ static PyObject *pyxc_vcpu_setaffinity(X
if ( xc_physinfo(self->xc_handle, &info) != 0 )
return pyxc_error_to_exception(self->xc_handle);
- nr_cpus = info.nr_cpus;
+ nr_cpus = info.max_cpu_id + 1;
size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
cpumap = malloc(cpumap_size * size);
@@ -400,7 +400,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
return pyxc_error_to_exception(self->xc_handle);
- nr_cpus = pinfo.nr_cpus;
+ nr_cpus = pinfo.max_cpu_id + 1;
rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
if ( rc < 0 )
@@ -1906,22 +1906,23 @@ static PyObject *pyxc_dom_set_memshr(XcO
return zero;
}
-static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
{
PyObject *cpulist = NULL;
- uint32_t i;
+ int i;
cpulist = PyList_New(0);
- for ( i = 0; cpumap != 0; i++ )
+ for ( i = 0; i < cpusize; i++ )
{
- if ( cpumap & 1 )
+ if ( *cpumap & (1L << (i % 64)) )
{
PyObject* pyint = PyInt_FromLong(i);
PyList_Append(cpulist, pyint);
Py_DECREF(pyint);
}
- cpumap >>= 1;
+ if ( (i % 64) == 63 )
+ cpumap++;
}
return cpulist;
}
@@ -1966,7 +1967,7 @@ static PyObject *pyxc_cpupool_getinfo(Xc
PyObject *list, *info_dict;
uint32_t first_pool = 0;
- int max_pools = 1024, nr_pools, i;
+ int max_pools = 1024, i;
xc_cpupoolinfo_t *info;
static char *kwd_list[] = { "first_pool", "max_pools", NULL };
@@ -1975,38 +1976,31 @@ static PyObject *pyxc_cpupool_getinfo(Xc
&first_pool, &max_pools) )
return NULL;
- info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
- if (info == NULL)
- return PyErr_NoMemory();
-
- nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
-
- if (nr_pools < 0)
+ list = PyList_New(0);
+ for (i = 0; i < max_pools; i++)
{
- free(info);
- return pyxc_error_to_exception(self->xc_handle);
- }
-
- list = PyList_New(nr_pools);
- for ( i = 0 ; i < nr_pools; i++ )
- {
+ info = xc_cpupool_getinfo(self->xc_handle, first_pool);
+ if (info == NULL)
+ break;
info_dict = Py_BuildValue(
"{s:i,s:i,s:i,s:N}",
- "cpupool", (int)info[i].cpupool_id,
- "sched", info[i].sched_id,
- "n_dom", info[i].n_dom,
- "cpulist", cpumap_to_cpulist(info[i].cpumap));
+ "cpupool", (int)info->cpupool_id,
+ "sched", info->sched_id,
+ "n_dom", info->n_dom,
+ "cpulist", cpumap_to_cpulist(info->cpumap,
+ info->cpumap_size));
+ first_pool = info->cpupool_id + 1;
+ free(info);
+
if ( info_dict == NULL )
{
Py_DECREF(list);
- if ( info_dict != NULL ) { Py_DECREF(info_dict); }
- free(info);
return NULL;
}
- PyList_SetItem(list, i, info_dict);
+
+ PyList_Append(list, info_dict);
+ Py_DECREF(info_dict);
}
-
- free(info);
return list;
}
@@ -2072,12 +2066,28 @@ static PyObject *pyxc_cpupool_movedomain
static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
{
- uint64_t cpumap;
+ uint64_t *cpumap;
+ xc_physinfo_t physinfo;
+ int ret;
+ PyObject *info = NULL;
- if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+ if (xc_physinfo(self->xc_handle, &physinfo))
return pyxc_error_to_exception(self->xc_handle);
- return cpumap_to_cpulist(cpumap);
+ cpumap = calloc((physinfo.max_cpu_id + 64) / 64, sizeof(uint64_t));
+ if (!cpumap) {
+ errno = -ENOMEM;
+ return PyErr_SetFromErrno(xc_error_obj);
+ }
+
+ ret = xc_cpupool_freeinfo(self->xc_handle, cpumap,
+ (physinfo.max_cpu_id + 8) / 8);
+ if (!ret)
+ info = cpumap_to_cpulist(cpumap, physinfo.max_cpu_id + 1);
+
+ free(cpumap);
+
+ return ret ? pyxc_error_to_exception(self->xc_handle) : info;
}
static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: Re: [Patch] update cpumask handling for cpu pools in libxc and python
2010-09-17 10:04 ` Juergen Gross
@ 2010-09-17 10:10 ` Ian Campbell
[not found] ` <4C934619.1000807@ts.fujitsu.com>
0 siblings, 1 reply; 10+ messages in thread
From: Ian Campbell @ 2010-09-17 10:10 UTC (permalink / raw)
To: Juergen Gross; +Cc: xen-devel
On Fri, 2010-09-17 at 11:04 +0100, Juergen Gross wrote:
> On 09/17/10 11:44, Ian Campbell wrote:
> > On Fri, 2010-09-17 at 10:20 +0100, Juergen Gross wrote:
> >> On 09/17/10 11:00, Ian Campbell wrote:
> >>> local_size has already been rounded up in get_cpumap_size. Do we really
> >>> need to do it again?
> >>
> >> I've made it more clear that this is rounding to uint64.
> >
> > Wouldn't that be "(.. + 63) / 8" then?
>
> No, local_size is already bytes...
Oh yeah.
I think that helps make my point about the confusion inherent in using
variously bits, bytes and uint64s as you move up the callstack ;-)
> This would suggest changing xc_vcpu_setaffinity() and
Not sure if others will agree but personally I'd be fine with that.
Ian.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: Re: [Patch] update cpumask handling for cpu pools in libxc and python
2010-09-17 10:08 ` Re: [Patch] update cpumask handling for cpu pools in libxc and python Juergen Gross
@ 2010-09-17 13:40 ` Ian Campbell
0 siblings, 0 replies; 10+ messages in thread
From: Ian Campbell @ 2010-09-17 13:40 UTC (permalink / raw)
To: Juergen Gross; +Cc: xen-devel
On Fri, 2010-09-17 at 11:08 +0100, Juergen Gross wrote:
> Please ignore previous mail, I hit the send-button too early...
>
> On 09/17/10 11:44, Ian Campbell wrote:
> > On Fri, 2010-09-17 at 10:20 +0100, Juergen Gross wrote:
> >> On 09/17/10 11:00, Ian Campbell wrote:
> >>> local_size has already been rounded up in get_cpumap_size. Do we really
> >>> need to do it again?
> >>
> >> I've made it more clear that this is rounding to uint64.
> >
> > Wouldn't that be "(.. + 63) / 8" then?
>
> No, local_size is already bytes...
>
> >>
> >>>
> >>>> + size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size;
> >>>
> >>> Why do we need both "cpumap_size * 8" and local_size additional bytes
> >>> here? Both contain the number of bytes necessary to contain a cpumap
> >>> bitmask and in fact I suspect they are both equal at this point (see
> >>> point about rounding above).
> >>
> >> The hypervisor returns a cpumask based on bytes, the tools use uint64-based
> >> cpumasks.
> >
> > Oh, I see, as well as xc_cpupool_info_t and the cpumap which it contains
> > being allocated together in a single buffer you are also including a
> > third buffer which is for local use in this function only but which is
> > included in the memory region returned to the caller (who doesn't know
> > about it). Seems a bit odd to me, why not just allocate it locally then
> > free it (or use alloca)?
> >
> > Actually, when I complete my hypercall buffer patch this memory will
> > need to be separately allocated any way since it needs to come from a
> > special pool. I'd prefer it if you just used explicit separate
> > allocation for this buffer from the start.
>
> Okay.
>
> >
> >> In practice this is equivalent as long as multiple of 8 bytes are
> >> written by the hypervisor and the system is running little endian.
> >> I prefer a clean interface mapping here.
> >
> > Using a single uint64 when there was a limit of 64 cpus made sense but
> > now that it is variable length why not just use bytes everywhere? It
> > would avoid a lot of confusion about what various size units are at
> > various points etc. You would avoid needing to translate between the
> > hypervisor and tools representations too, wouldn't you?
>
> This would suggest changing xc_vcpu_setaffinity() and xc_vcpu_getaffinity(),
> too.
Juergen told me privately that he will do this after this patchset,
which seems reasonable given the number of iterations we've been through
so far. So:
Acked-by: Ian Campbell <ian.campbell@citrix.com>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: Re: [Patch] update cpumask handling for cpu pools in libxc and python
2010-09-17 9:20 ` Juergen Gross
2010-09-17 9:44 ` Ian Campbell
@ 2010-09-17 15:51 ` Ian Jackson
1 sibling, 0 replies; 10+ messages in thread
From: Ian Jackson @ 2010-09-17 15:51 UTC (permalink / raw)
To: Juergen Gross; +Cc: Ian Campbell, xen-devel
Juergen Gross writes ("Re: [Xen-devel] Re: [Patch] update cpumask handling for cpu pools in libxc and python"):
> The hypervisor returns a cpumask based on bytes, the tools use uint64-based
> cpumasks. In practice this is equivalent as long as multiple of 8 bytes are
> written by the hypervisor and the system is running little endian.
Surely we don't intend to tie our code permanently to little-endian
cpus ? I don't know whether we have supported any big-endian
architectures in the past but I don't think we should rule it out
where it's not necessary to do so.
Ian.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: Re: [Patch] update cpumask handling for cpu pools in libxc and python [and 1 more messages]
[not found] ` <4C983A51.5000105@ts.fujitsu.com>
@ 2010-09-21 15:04 ` Ian Jackson
0 siblings, 0 replies; 10+ messages in thread
From: Ian Jackson @ 2010-09-21 15:04 UTC (permalink / raw)
To: Juergen Gross; +Cc: Ian Campbell, xen-devel
Juergen Gross writes ("Re: [Xen-devel] Re: [Patch] update cpumask handling for cpu pools in libxc and python"):
> On 09/20/10 18:11, Ian Jackson wrote:
> > The cpumask one still seems to assume little-endian, doesn't it ?
>
> No, I don't think so.
> The hypervisor is using byte arrays for cpumasks in its interface to the
> tools. There bitmap_byte_to_64() is being used to convert it to uint64_t
> arrays.
Ah, OK.
> The main reason to change cpumask representation in the tools to a byte array
> (which I will do soon in another patch) is to avoid extra allocation of a
> buffer for the interface to the hypervisor.
OK.
However, I haven't applied your patch because it breaks the libxl
build. You need to fix up all callers of xc_cpupool_getinfo
(libxl.c:622 and perhaps elsewhere).
Thanks,
Ian.
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2010-09-21 15:04 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-09-17 5:51 [Patch] update cpumask handling for cpu pools in libxc and python Juergen Gross
2010-09-17 9:00 ` Ian Campbell
2010-09-17 9:20 ` Juergen Gross
2010-09-17 9:44 ` Ian Campbell
2010-09-17 10:04 ` Juergen Gross
2010-09-17 10:10 ` Ian Campbell
[not found] ` <4C934619.1000807@ts.fujitsu.com>
[not found] ` <1284720423.16095.3257.camel@zakaz.uk.xensource.com>
[not found] ` <4C9348BA.4000705@ts.fujitsu.com>
[not found] ` <1284723441.16095.3355.camel@zakaz.uk.xensource.com>
[not found] ` <19603.39418.809534.435478@mariner.uk.xensource.com>
[not found] ` <4C96EC80.4000901@ts.fujitsu.com>
[not found] ` <19607.34717.433923.486852@mariner.uk.xensource.com>
[not found] ` <4C983A51.5000105@ts.fujitsu.com>
2010-09-21 15:04 ` Re: [Patch] update cpumask handling for cpu pools in libxc and python [and 1 more messages] Ian Jackson
2010-09-17 10:08 ` Re: [Patch] update cpumask handling for cpu pools in libxc and python Juergen Gross
2010-09-17 13:40 ` Ian Campbell
2010-09-17 15:51 ` Ian Jackson
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.