All of lore.kernel.org
 help / color / mirror / Atom feed
* [Patch] adjust the cpu-affinity to more than 64 cpus
@ 2010-03-17  8:56 James (song wei)
  2010-03-17  9:25 ` Jan Beulich
  0 siblings, 1 reply; 14+ messages in thread
From: James (song wei) @ 2010-03-17  8:56 UTC (permalink / raw)
  To: xen-devel


Hi,
 There are more than 64 cpus on new intel platform especially on NUMA
system, so that we need break the pcpu limit (that is 64)  when set affinity
of a VCPU. 

-James (Song Wei)

Signed-off-by: James (song wei) <jsong@novell.com> 
diff -r c1f272c3a441 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c	Mon Mar 15 17:08:29 2010 +0000
+++ b/tools/libxc/xc_domain.c	Wed Mar 17 16:51:07 2010 +0800
@@ -98,23 +98,28 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t cpumap)
+                        uint64_t *cpumap, int cpusize)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t local[sizeof (cpumap)];
+    uint8_t *local = malloc(cpusize); 
 
+    if(local == NULL)
+    {
+        PERROR("Could not alloc memory for Xen hypercall");
+        goto out;
+    }
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu    = vcpu;
 
-    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
+    bitmap_64_to_byte(local, cpumap, cpusize * 8);
 
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
 
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
     
-    if ( lock_pages(local, sizeof(local)) != 0 )
+    if ( lock_pages(local, cpusize) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -122,9 +127,10 @@
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, sizeof(local));
+    unlock_pages(local, cpusize);
 
  out:
+    free(local);
     return ret;
 }
 
@@ -132,18 +138,26 @@
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap)
+                        uint64_t *cpumap,
+                        int cpusize)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t local[sizeof (cpumap)];
+    uint8_t * local = malloc(cpusize);
+
+    if(local == NULL)
+    {
+        PERROR("Could not alloc memory for Xen hypercall");
+        goto out;
+    }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
 
+
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
     
     if ( lock_pages(local, sizeof(local)) != 0 )
     {
@@ -154,8 +168,9 @@
     ret = do_domctl(xc_handle, &domctl);
 
     unlock_pages(local, sizeof (local));
-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
- out:
+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+out:
+    free(local);
     return ret;
 }
 
diff -r c1f272c3a441 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Mon Mar 15 17:08:29 2010 +0000
+++ b/tools/libxc/xenctrl.h	Wed Mar 17 16:51:07 2010 +0800
@@ -309,11 +309,13 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 
 /**
  * This function will return information about one or more domains. It is
diff -r c1f272c3a441 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Mon Mar 15 17:08:29 2010 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 16:51:07 2010 +0800
@@ -215,35 +215,54 @@
 {
     uint32_t dom;
     int vcpu = 0, i;
-    uint64_t  cpumap = ~0ULL;
+    uint64_t  *cpumap;
     PyObject *cpulist = NULL;
+    int nr_cpus, size;
+    xc_physinfo_t info; 
+    xc_cpu_to_node_t map[1];
+    uint64_t cpumap_size = sizeof(cpumap); 
 
     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
+    
 
     if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|iO", kwd_list, 
                                       &dom, &vcpu, &cpulist) )
         return NULL;
 
+    set_xen_guest_handle(info.cpu_to_node, map);
+    info.max_cpu_id = 1;
+    if ( xc_physinfo(self->xc_handle, &info) != 0 )
+        return pyxc_error_to_exception();
+  
+    nr_cpus = info.nr_cpus;
+
+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
+    cpumap = malloc(cpumap_size * size);
+    if(cpumap == NULL)
+        return pyxc_error_to_exception();
+    
+
     if ( (cpulist != NULL) && PyList_Check(cpulist) )
     {
-        cpumap = 0ULL;
+        for ( i = 0; i < size; i++)
+        {
+            cpumap[i] = 0ULL;
+        }
         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
         {
             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
-            if ( cpu >= 64 )
-            {
-                errno = EINVAL;
-                PyErr_SetFromErrno(xc_error_obj);
-                return NULL;
-            }
-            cpumap |= (uint64_t)1 << cpu;
+            *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu %
(cpumap_size * 8));
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size *
cpumap_size) != 0 )
+    {
+        free(cpumap);
         return pyxc_error_to_exception();
-    
+    }
+
     Py_INCREF(zero);
+    free(cpumap); 
     return zero;
 }
 
@@ -362,7 +381,11 @@
     uint32_t dom, vcpu = 0;
     xc_vcpuinfo_t info;
     int rc, i;
-    uint64_t cpumap;
+    uint64_t *cpumap;
+    int nr_cpus, size;
+    xc_physinfo_t pinfo = { 0 };
+    xc_cpu_to_node_t map[1];
+    uint64_t cpumap_size = sizeof(cpumap);
 
     static char *kwd_list[] = { "domid", "vcpu", NULL };
     
@@ -370,12 +393,25 @@
                                       &dom, &vcpu) )
         return NULL;
 
+    set_xen_guest_handle(pinfo.cpu_to_node, map);
+    pinfo.max_cpu_id = 1;
+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
+        return pyxc_error_to_exception();
+    nr_cpus = pinfo.nr_cpus;
     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
     if ( rc < 0 )
         return pyxc_error_to_exception();
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
+
+    if((cpumap = malloc(cpumap_size * size)) == NULL)
+        return pyxc_error_to_exception(); 
+
+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
cpumap_size * size);
     if ( rc < 0 )
+    {
+        free(cpumap);
         return pyxc_error_to_exception();
+    }
 
     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
                               "online",   info.online,
@@ -385,17 +421,18 @@
                               "cpu",      info.cpu);
 
     cpulist = PyList_New(0);
-    for ( i = 0; cpumap != 0; i++ )
+    for ( i = 0; i < size * cpumap_size * 8; i++ )
     {
-        if ( cpumap & 1 ) {
+        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
             PyObject *pyint = PyInt_FromLong(i);
             PyList_Append(cpulist, pyint);
             Py_DECREF(pyint);
         }
-        cpumap >>= 1;
+        *(cpumap + i / (cpumap_size * 8)) >>= 1;
     }
     PyDict_SetItemString(info_dict, "cpumap", cpulist);
     Py_DECREF(cpulist);
+    free(cpumap);
     return info_dict;
 }
 

http://old.nabble.com/file/p27928229/adjust_vcpuaffinity_more_cpu.patch
adjust_vcpuaffinity_more_cpu.patch 
-- 
View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-affinity-to-more-than-64-cpus-tp27928229p27928229.html
Sent from the Xen - Dev mailing list archive at Nabble.com.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Patch] adjust the cpu-affinity to more than 64 cpus
  2010-03-17  8:56 [Patch] adjust the cpu-affinity to more than 64 cpus James (song wei)
@ 2010-03-17  9:25 ` Jan Beulich
  2010-03-18  2:26   ` James (song wei)
  0 siblings, 1 reply; 14+ messages in thread
From: Jan Beulich @ 2010-03-17  9:25 UTC (permalink / raw)
  To: James Song; +Cc: xen-devel

>>> "James (song wei)" <jsong@novell.com> 17.03.10 09:56 >>>
>--- a/tools/python/xen/lowlevel/xc/xc.c	Mon Mar 15 17:08:29 2010 +0000
>+++ b/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 16:51:07 2010 +0800
>@@ -215,35 +215,54 @@
> {
>     uint32_t dom;
>     int vcpu = 0, i;
>-    uint64_t  cpumap = ~0ULL;
>+    uint64_t  *cpumap;
>     PyObject *cpulist = NULL;
>+    int nr_cpus, size;
>+    xc_physinfo_t info; 
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(cpumap); 

Perhaps sizeof(*cpumap)?

>...
>+            *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu %
(cpumap_size * 8));

Using [] here and in similar places further down would likely make these
constructs a little bit more legible.

>@@ -362,7 +381,11 @@
>     uint32_t dom, vcpu = 0;
>     xc_vcpuinfo_t info;
>     int rc, i;
>-    uint64_t cpumap;
>+    uint64_t *cpumap;
>+    int nr_cpus, size;
>+    xc_physinfo_t pinfo = { 0 };
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(cpumap);

Same as above.
 
>@@ -385,17 +421,18 @@
>                               "cpu",      info.cpu);
> 
>     cpulist = PyList_New(0);
>-    for ( i = 0; cpumap != 0; i++ )
>+    for ( i = 0; i < size * cpumap_size * 8; i++ )

Why not simply use nr_cpus here?

Jan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Patch] adjust the cpu-affinity to more than 64 cpus
  2010-03-17  9:25 ` Jan Beulich
@ 2010-03-18  2:26   ` James (song wei)
  2010-03-18  3:41     ` James (song wei)
  0 siblings, 1 reply; 14+ messages in thread
From: James (song wei) @ 2010-03-18  2:26 UTC (permalink / raw)
  To: xen-devel




Jan Beulich wrote:
> 
>>>> "James (song wei)" <jsong@novell.com> 17.03.10 09:56 >>>
>>--- a/tools/python/xen/lowlevel/xc/xc.c	Mon Mar 15 17:08:29 2010 +0000
>>+++ b/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 16:51:07 2010 +0800
>>@@ -215,35 +215,54 @@
>> {
>>     uint32_t dom;
>>     int vcpu = 0, i;
>>-    uint64_t  cpumap = ~0ULL;
>>+    uint64_t  *cpumap;
>>     PyObject *cpulist = NULL;
>>+    int nr_cpus, size;
>>+    xc_physinfo_t info; 
>>+    xc_cpu_to_node_t map[1];
>>+    uint64_t cpumap_size = sizeof(cpumap); 
> 
> Perhaps sizeof(*cpumap)? 
> 
> -- Yeah,you are right.
> 
>>...
>>+            *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu %
> (cpumap_size * 8));
> 
> Using [] here and in similar places further down would likely make these
> constructs a little bit more legible.
> --yes. 
> 
>>@@ -362,7 +381,11 @@
>>     uint32_t dom, vcpu = 0;
>>     xc_vcpuinfo_t info;
>>     int rc, i;
>>-    uint64_t cpumap;
>>+    uint64_t *cpumap;
>>+    int nr_cpus, size;
>>+    xc_physinfo_t pinfo = { 0 };
>>+    xc_cpu_to_node_t map[1];
>>+    uint64_t cpumap_size = sizeof(cpumap);
> 
> Same as above.
>  
>>@@ -385,17 +421,18 @@
>>                               "cpu",      info.cpu);
>> 
>>     cpulist = PyList_New(0);
>>-    for ( i = 0; cpumap != 0; i++ )
>>+    for ( i = 0; i < size * cpumap_size * 8; i++ )
> 
> Why not simply use nr_cpus here?
> --Yes, copy amount of nr_cpus bitsare enough here.
> 
> Jan, thank you very much! I'll post the new patch here soon.
> 
> -Jame (Song Wei)
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
> 
> 

-- 
View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-affinity-to-more-than-64-cpus-tp27928229p27941020.html
Sent from the Xen - Dev mailing list archive at Nabble.com.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Patch] adjust the cpu-affinity to more than 64 cpus
  2010-03-18  2:26   ` James (song wei)
@ 2010-03-18  3:41     ` James (song wei)
  2010-03-19  3:14       ` Masaki Kanno
  0 siblings, 1 reply; 14+ messages in thread
From: James (song wei) @ 2010-03-18  3:41 UTC (permalink / raw)
  To: xen-devel


Keir, could you take a look at this issue.
New Patch for this issue:
Singed-off-by: James (Song Wei) <jsong@novell.com>

diff -r 8b269215464b tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c	Wed Mar 10 14:01:32 2010 +0800
+++ b/tools/libxc/xc_domain.c	Thu Mar 18 11:37:55 2010 +0800
@@ -105,23 +105,28 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t cpumap)
+                        uint64_t *cpumap, int cpusize)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t local[sizeof (cpumap)];
+    uint8_t *local = malloc(cpusize); 
 
+    if(local == NULL)
+    {
+        PERROR("Could not alloc memory for Xen hypercall");
+        goto out;
+    }
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu    = vcpu;
 
-    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
+    bitmap_64_to_byte(local, cpumap, cpusize * 8);
 
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
 
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
     
-    if ( lock_pages(local, sizeof(local)) != 0 )
+    if ( lock_pages(local, cpusize) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -129,9 +134,10 @@
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, sizeof(local));
+    unlock_pages(local, cpusize);
 
  out:
+    free(local);
     return ret;
 }
 
@@ -139,18 +145,25 @@
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap)
+                        uint64_t *cpumap, int cpusize)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t local[sizeof (cpumap)];
+    uint8_t * local = malloc(cpusize);
+
+    if(local == NULL)
+    {
+        PERROR("Could not alloc memory for Xen hypercall");
+        goto out;
+    }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
 
+
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
     
     if ( lock_pages(local, sizeof(local)) != 0 )
     {
@@ -161,8 +174,9 @@
     ret = do_domctl(xc_handle, &domctl);
 
     unlock_pages(local, sizeof (local));
-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
- out:
+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+out:
+    free(local);
     return ret;
 }
 
diff -r 8b269215464b tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Wed Mar 10 14:01:32 2010 +0800
+++ b/tools/libxc/xenctrl.h	Thu Mar 18 11:37:55 2010 +0800
@@ -310,11 +310,13 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 
 /**
  * This function will return information about one or more domains. It is
diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 10 14:01:32 2010 +0800
+++ b/tools/python/xen/lowlevel/xc/xc.c	Thu Mar 18 11:37:55 2010 +0800
@@ -217,8 +217,12 @@
 {
     uint32_t dom;
     int vcpu = 0, i;
-    uint64_t  cpumap = ~0ULL;
+    uint64_t  *cpumap;
     PyObject *cpulist = NULL;
+    int nr_cpus, size;
+    xc_physinfo_t info; 
+    xc_cpu_to_node_t map[1];
+    uint64_t cpumap_size = sizeof(*cpumap); 
 
     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
 
@@ -226,26 +230,38 @@
                                       &dom, &vcpu, &cpulist) )
         return NULL;
 
+    set_xen_guest_handle(info.cpu_to_node, map);
+    info.max_cpu_id = 1;
+    if ( xc_physinfo(self->xc_handle, &info) != 0 )
+        return pyxc_error_to_exception();
+  
+    nr_cpus = info.nr_cpus;
+
+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
+    cpumap = malloc(cpumap_size * size);
+    if(cpumap == NULL)
+        return pyxc_error_to_exception();
+
     if ( (cpulist != NULL) && PyList_Check(cpulist) )
     {
-        cpumap = 0ULL;
+        for ( i = 0; i < size; i++)
+        {
+            cpumap[i] = 0ULL;
+        }
         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
         {
             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
-            if ( cpu >= 64 )
-            {
-                errno = EINVAL;
-                PyErr_SetFromErrno(xc_error_obj);
-                return NULL;
-            }
-            cpumap |= (uint64_t)1 << cpu;
+            cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu %
(cpumap_size * 8));
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size *
cpumap_size) != 0 )
+    {
+        free(cpumap);
         return pyxc_error_to_exception();
-    
+    }
     Py_INCREF(zero);
+    free(cpumap); 
     return zero;
 }
 
@@ -365,7 +381,11 @@
     uint32_t dom, vcpu = 0;
     xc_vcpuinfo_t info;
     int rc, i;
-    uint64_t cpumap;
+    uint64_t *cpumap;
+    int nr_cpus, size;
+    xc_physinfo_t pinfo = { 0 };
+    xc_cpu_to_node_t map[1];
+    uint64_t cpumap_size = sizeof(*cpumap);
 
     static char *kwd_list[] = { "domid", "vcpu", NULL };
     
@@ -373,12 +393,25 @@
                                       &dom, &vcpu) )
         return NULL;
 
+    set_xen_guest_handle(pinfo.cpu_to_node, map);
+    pinfo.max_cpu_id = 1;
+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
+        return pyxc_error_to_exception();
+    nr_cpus = pinfo.nr_cpus;
     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
     if ( rc < 0 )
         return pyxc_error_to_exception();
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
+
+    if((cpumap = malloc(cpumap_size * size)) == NULL)
+        return pyxc_error_to_exception(); 
+
+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
cpumap_size * size);
     if ( rc < 0 )
+    {
+        free(cpumap);
         return pyxc_error_to_exception();
+    }
 
     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
                               "online",   info.online,
@@ -386,19 +419,19 @@
                               "running",  info.running,
                               "cpu_time", info.cpu_time,
                               "cpu",      info.cpu);
-
     cpulist = PyList_New(0);
-    for ( i = 0; cpumap != 0; i++ )
+    for ( i = 0; i < nr_cpus; i++ )
     {
-        if ( cpumap & 1 ) {
+        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
             PyObject *pyint = PyInt_FromLong(i);
             PyList_Append(cpulist, pyint);
             Py_DECREF(pyint);
         }
-        cpumap >>= 1;
+        cpumap[i / (cpumap_size * 8)] >>= 1;
     }
     PyDict_SetItemString(info_dict, "cpumap", cpulist);
     Py_DECREF(cpulist);
+    free(cpumap);
     return info_dict;
 }
 
  http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch
adjust_vcpuaffinity_more_cpu.patch 
 


-- 
View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-affinity-to-more-than-64-cpus-tp27928229p27941371.html
Sent from the Xen - Dev mailing list archive at Nabble.com.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Patch] adjust the cpu-affinity to more than 64 cpus
  2010-03-18  3:41     ` James (song wei)
@ 2010-03-19  3:14       ` Masaki Kanno
  2010-03-19  9:09         ` James Song
  0 siblings, 1 reply; 14+ messages in thread
From: Masaki Kanno @ 2010-03-19  3:14 UTC (permalink / raw)
  To: James (song wei), xen-devel

Hi James,

I tested xm vcpu-pin command with xen-unstable changeset 21044. 
The command failed as follows.  Also Time(s) values by xm vcpu-list 
command were strange.  The values were same in all VCPUs.

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     1   r--     116.5 any cpu
Domain-0                             0     1     1   r--     116.5 any cpu
# xm vcpu-pin Domain-0 0 0
Error: Cannot pin vcpu: 0 to cpu: [0] - (22, 'Invalid argument')
Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all>

Set which CPUs a VCPU can use.
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     1   r--     117.0 any cpu
Domain-0                             0     1     1   r--     117.0 any cpu


I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin 
command again.  The command succeeded as follows. 

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      60.8 any cpu
Domain-0                             0     1     1   -b-      42.8 any cpu
# xm vcpu-pin Domain-0 0 0
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      61.6 0
Domain-0                             0     1     1   -b-      43.2 any cpu

Best regards,
 Kan

Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:

>
>Keir, could you take a look at this issue.
>New Patch for this issue:
>Singed-off-by: James (Song Wei) <jsong@novell.com>
>
>diff -r 8b269215464b tools/libxc/xc_domain.c
>--- a/tools/libxc/xc_domain.c	Wed Mar 10 14:01:32 2010 +0800
>+++ b/tools/libxc/xc_domain.c	Thu Mar 18 11:37:55 2010 +0800
>@@ -105,23 +105,28 @@
> int xc_vcpu_setaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t cpumap)
>+                        uint64_t *cpumap, int cpusize)
> {
>     DECLARE_DOMCTL;
>     int ret = -1;
>-    uint8_t local[sizeof (cpumap)];
>+    uint8_t *local = malloc(cpusize); 
> 
>+    if(local == NULL)
>+    {
>+        PERROR("Could not alloc memory for Xen hypercall");
>+        goto out;
>+    }
>     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
>     domctl.domain = (domid_t)domid;
>     domctl.u.vcpuaffinity.vcpu    = vcpu;
> 
>-    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
>+    bitmap_64_to_byte(local, cpumap, cpusize * 8);
> 
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
> 
>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>     
>-    if ( lock_pages(local, sizeof(local)) != 0 )
>+    if ( lock_pages(local, cpusize) != 0 )
>     {
>         PERROR("Could not lock memory for Xen hypercall");
>         goto out;
>@@ -129,9 +134,10 @@
> 
>     ret = do_domctl(xc_handle, &domctl);
> 
>-    unlock_pages(local, sizeof(local));
>+    unlock_pages(local, cpusize);
> 
>  out:
>+    free(local);
>     return ret;
> }
> 
>@@ -139,18 +145,25 @@
> int xc_vcpu_getaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t *cpumap)
>+                        uint64_t *cpumap, int cpusize)
> {
>     DECLARE_DOMCTL;
>     int ret = -1;
>-    uint8_t local[sizeof (cpumap)];
>+    uint8_t * local = malloc(cpusize);
>+
>+    if(local == NULL)
>+    {
>+        PERROR("Could not alloc memory for Xen hypercall");
>+        goto out;
>+    }
> 
>     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
>     domctl.domain = (domid_t)domid;
>     domctl.u.vcpuaffinity.vcpu = vcpu;
> 
>+
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>     
>     if ( lock_pages(local, sizeof(local)) != 0 )
>     {
>@@ -161,8 +174,9 @@
>     ret = do_domctl(xc_handle, &domctl);
> 
>     unlock_pages(local, sizeof (local));
>-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
>- out:
>+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
>+out:
>+    free(local);
>     return ret;
> }
> 
>diff -r 8b269215464b tools/libxc/xenctrl.h
>--- a/tools/libxc/xenctrl.h	Wed Mar 10 14:01:32 2010 +0800
>+++ b/tools/libxc/xenctrl.h	Thu Mar 18 11:37:55 2010 +0800
>@@ -310,11 +310,13 @@
> int xc_vcpu_setaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t cpumap);
>+                        uint64_t *cpumap,
>+                        int cpusize);
> int xc_vcpu_getaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t *cpumap);
>+                        uint64_t *cpumap,
>+                        int cpusize);
> 
> /**
>  * This function will return information about one or more domains. It is
>diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c
>--- a/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 10 14:01:32 2010 +0800
>+++ b/tools/python/xen/lowlevel/xc/xc.c	Thu Mar 18 11:37:55 2010 +0800
>@@ -217,8 +217,12 @@
> {
>     uint32_t dom;
>     int vcpu = 0, i;
>-    uint64_t  cpumap = ~0ULL;
>+    uint64_t  *cpumap;
>     PyObject *cpulist = NULL;
>+    int nr_cpus, size;
>+    xc_physinfo_t info; 
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(*cpumap); 
> 
>     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
> 
>@@ -226,26 +230,38 @@
>                                       &dom, &vcpu, &cpulist) )
>         return NULL;
> 
>+    set_xen_guest_handle(info.cpu_to_node, map);
>+    info.max_cpu_id = 1;
>+    if ( xc_physinfo(self->xc_handle, &info) != 0 )
>+        return pyxc_error_to_exception();
>+  
>+    nr_cpus = info.nr_cpus;
>+
>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
>+    cpumap = malloc(cpumap_size * size);
>+    if(cpumap == NULL)
>+        return pyxc_error_to_exception();
>+
>     if ( (cpulist != NULL) && PyList_Check(cpulist) )
>     {
>-        cpumap = 0ULL;
>+        for ( i = 0; i < size; i++)
>+        {
>+            cpumap[i] = 0ULL;
>+        }
>         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
>         {
>             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
>-            if ( cpu >= 64 )
>-            {
>-                errno = EINVAL;
>-                PyErr_SetFromErrno(xc_error_obj);
>-                return NULL;
>-            }
>-            cpumap |= (uint64_t)1 << cpu;
>+            cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu %
>(cpumap_size * 8));
>         }
>     }
>   
>-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
>+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size *
>cpumap_size) != 0 )
>+    {
>+        free(cpumap);
>         return pyxc_error_to_exception();
>-    
>+    }
>     Py_INCREF(zero);
>+    free(cpumap); 
>     return zero;
> }
> 
>@@ -365,7 +381,11 @@
>     uint32_t dom, vcpu = 0;
>     xc_vcpuinfo_t info;
>     int rc, i;
>-    uint64_t cpumap;
>+    uint64_t *cpumap;
>+    int nr_cpus, size;
>+    xc_physinfo_t pinfo = { 0 };
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(*cpumap);
> 
>     static char *kwd_list[] = { "domid", "vcpu", NULL };
>     
>@@ -373,12 +393,25 @@
>                                       &dom, &vcpu) )
>         return NULL;
> 
>+    set_xen_guest_handle(pinfo.cpu_to_node, map);
>+    pinfo.max_cpu_id = 1;
>+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
>+        return pyxc_error_to_exception();
>+    nr_cpus = pinfo.nr_cpus;
>     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
>     if ( rc < 0 )
>         return pyxc_error_to_exception();
>-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
>+
>+    if((cpumap = malloc(cpumap_size * size)) == NULL)
>+        return pyxc_error_to_exception(); 
>+
>+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
>cpumap_size * size);
>     if ( rc < 0 )
>+    {
>+        free(cpumap);
>         return pyxc_error_to_exception();
>+    }
> 
>     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
>                               "online",   info.online,
>@@ -386,19 +419,19 @@
>                               "running",  info.running,
>                               "cpu_time", info.cpu_time,
>                               "cpu",      info.cpu);
>-
>     cpulist = PyList_New(0);
>-    for ( i = 0; cpumap != 0; i++ )
>+    for ( i = 0; i < nr_cpus; i++ )
>     {
>-        if ( cpumap & 1 ) {
>+        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
>             PyObject *pyint = PyInt_FromLong(i);
>             PyList_Append(cpulist, pyint);
>             Py_DECREF(pyint);
>         }
>-        cpumap >>= 1;
>+        cpumap[i / (cpumap_size * 8)] >>= 1;
>     }
>     PyDict_SetItemString(info_dict, "cpumap", cpulist);
>     Py_DECREF(cpulist);
>+    free(cpumap);
>     return info_dict;
> }
> 
>  http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch
>adjust_vcpuaffinity_more_cpu.patch 
> 
>
>
>-- 
>View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-
>affinity-to-more-than-64-cpus-tp27928229p27941371.html
>Sent from the Xen - Dev mailing list archive at Nabble.com.
>
>
>_______________________________________________
>Xen-devel mailing list
>Xen-devel@lists.xensource.com
>http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Patch] adjust the cpu-affinity to more than 64 cpus
  2010-03-19  3:14       ` Masaki Kanno
@ 2010-03-19  9:09         ` James Song
  2010-03-19  9:39           ` issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus) Jan Beulich
                             ` (4 more replies)
  0 siblings, 5 replies; 14+ messages in thread
From: James Song @ 2010-03-19  9:09 UTC (permalink / raw)
  To: Masaki Kanno, xen-devel


[-- Attachment #1.1: Type: text/plain, Size: 10611 bytes --]

I update upstream code to c/s 20145. This patch works fine and I haven't meet this problem.
 
linux-2j72:/home # xm vcpu-pin  0 1 0
linux-2j72:/home # xm vcpu-list 0
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   ---      23.4 0
Domain-0                             0     1     0   r--      19.2 0 
 
 
B.T.W, there are something wrong when compile c/s 21046! 
 
-James

>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>>
Hi James,

I tested xm vcpu-pin command with xen-unstable changeset 21044. 
The command failed as follows.  Also Time(s) values by xm vcpu-list 
command were strange.  The values were same in all VCPUs.

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     1   r--     116.5 any cpu
Domain-0                             0     1     1   r--     116.5 any cpu
# xm vcpu-pin Domain-0 0 0
Error: Cannot pin vcpu: 0 to cpu: [0] - (22, 'Invalid argument')
Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all>

Set which CPUs a VCPU can use.
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     1   r--     117.0 any cpu
Domain-0                             0     1     1   r--     117.0 any cpu


I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin 
command again.  The command succeeded as follows. 

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      60.8 any cpu
Domain-0                             0     1     1   -b-      42.8 any cpu
# xm vcpu-pin Domain-0 0 0
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      61.6 0
Domain-0                             0     1     1   -b-      43.2 any cpu

Best regards,
Kan

Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:

>
>Keir, could you take a look at this issue.
>New Patch for this issue:
>Singed-off-by: James (Song Wei) <jsong@novell.com>
>
>diff -r 8b269215464b tools/libxc/xc_domain.c
>--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800
>+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800
>@@ -105,23 +105,28 @@
> int xc_vcpu_setaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t cpumap)
>+                        uint64_t *cpumap, int cpusize)
> {
>     DECLARE_DOMCTL;
>     int ret = -1;
>-    uint8_t local[sizeof (cpumap)];
>+    uint8_t *local = malloc(cpusize); 
> 
>+    if(local == NULL)
>+    {
>+        PERROR("Could not alloc memory for Xen hypercall");
>+        goto out;
>+    }
>     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
>     domctl.domain = (domid_t)domid;
>     domctl.u.vcpuaffinity.vcpu    = vcpu;
> 
>-    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
>+    bitmap_64_to_byte(local, cpumap, cpusize * 8);
> 
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
> 
>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>     
>-    if ( lock_pages(local, sizeof(local)) != 0 )
>+    if ( lock_pages(local, cpusize) != 0 )
>     {
>         PERROR("Could not lock memory for Xen hypercall");
>         goto out;
>@@ -129,9 +134,10 @@
> 
>     ret = do_domctl(xc_handle, &domctl);
> 
>-    unlock_pages(local, sizeof(local));
>+    unlock_pages(local, cpusize);
> 
>  out:
>+    free(local);
>     return ret;
> }
> 
>@@ -139,18 +145,25 @@
> int xc_vcpu_getaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t *cpumap)
>+                        uint64_t *cpumap, int cpusize)
> {
>     DECLARE_DOMCTL;
>     int ret = -1;
>-    uint8_t local[sizeof (cpumap)];
>+    uint8_t * local = malloc(cpusize);
>+
>+    if(local == NULL)
>+    {
>+        PERROR("Could not alloc memory for Xen hypercall");
>+        goto out;
>+    }
> 
>     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
>     domctl.domain = (domid_t)domid;
>     domctl.u.vcpuaffinity.vcpu = vcpu;
> 
>+
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>     
>     if ( lock_pages(local, sizeof(local)) != 0 )
>     {
>@@ -161,8 +174,9 @@
>     ret = do_domctl(xc_handle, &domctl);
> 
>     unlock_pages(local, sizeof (local));
>-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
>- out:
>+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
>+out:
>+    free(local);
>     return ret;
> }
> 
>diff -r 8b269215464b tools/libxc/xenctrl.h
>--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800
>+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800
>@@ -310,11 +310,13 @@
> int xc_vcpu_setaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t cpumap);
>+                        uint64_t *cpumap,
>+                        int cpusize);
> int xc_vcpu_getaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t *cpumap);
>+                        uint64_t *cpumap,
>+                        int cpusize);
> 
> /**
>  * This function will return information about one or more domains. It is
>diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c
>--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800
>+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800
>@@ -217,8 +217,12 @@
> {
>     uint32_t dom;
>     int vcpu = 0, i;
>-    uint64_t  cpumap = ~0ULL;
>+    uint64_t  *cpumap;
>     PyObject *cpulist = NULL;
>+    int nr_cpus, size;
>+    xc_physinfo_t info; 
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(*cpumap); 
> 
>     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
> 
>@@ -226,26 +230,38 @@
>                                       &dom, &vcpu, &cpulist) )
>         return NULL;
> 
>+    set_xen_guest_handle(info.cpu_to_node, map);
>+    info.max_cpu_id = 1;
>+    if ( xc_physinfo(self->xc_handle, &info) != 0 )
>+        return pyxc_error_to_exception();
>+  
>+    nr_cpus = info.nr_cpus;
>+
>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
>+    cpumap = malloc(cpumap_size * size);
>+    if(cpumap == NULL)
>+        return pyxc_error_to_exception();
>+
>     if ( (cpulist != NULL) && PyList_Check(cpulist) )
>     {
>-        cpumap = 0ULL;
>+        for ( i = 0; i < size; i++)
>+        {
>+            cpumap[i] = 0ULL;
>+        }
>         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
>         {
>             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
>-            if ( cpu >= 64 )
>-            {
>-                errno = EINVAL;
>-                PyErr_SetFromErrno(xc_error_obj);
>-                return NULL;
>-            }
>-            cpumap |= (uint64_t)1 << cpu;
>+            cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu %
>(cpumap_size * 8));
>         }
>     }
>   
>-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
>+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size *
>cpumap_size) != 0 )
>+    {
>+        free(cpumap);
>         return pyxc_error_to_exception();
>-    
>+    }
>     Py_INCREF(zero);
>+    free(cpumap); 
>     return zero;
> }
> 
>@@ -365,7 +381,11 @@
>     uint32_t dom, vcpu = 0;
>     xc_vcpuinfo_t info;
>     int rc, i;
>-    uint64_t cpumap;
>+    uint64_t *cpumap;
>+    int nr_cpus, size;
>+    xc_physinfo_t pinfo = { 0 };
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(*cpumap);
> 
>     static char *kwd_list[] = { "domid", "vcpu", NULL };
>     
>@@ -373,12 +393,25 @@
>                                       &dom, &vcpu) )
>         return NULL;
> 
>+    set_xen_guest_handle(pinfo.cpu_to_node, map);
>+    pinfo.max_cpu_id = 1;
>+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
>+        return pyxc_error_to_exception();
>+    nr_cpus = pinfo.nr_cpus;
>     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
>     if ( rc < 0 )
>         return pyxc_error_to_exception();
>-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
>+
>+    if((cpumap = malloc(cpumap_size * size)) == NULL)
>+        return pyxc_error_to_exception(); 
>+
>+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
>cpumap_size * size);
>     if ( rc < 0 )
>+    {
>+        free(cpumap);
>         return pyxc_error_to_exception();
>+    }
> 
>     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
>                               "online",   info.online,
>@@ -386,19 +419,19 @@
>                               "running",  info.running,
>                               "cpu_time", info.cpu_time,
>                               "cpu",      info.cpu);
>-
>     cpulist = PyList_New(0);
>-    for ( i = 0; cpumap != 0; i++ )
>+    for ( i = 0; i < nr_cpus; i++ )
>     {
>-        if ( cpumap & 1 ) {
>+        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
>             PyObject *pyint = PyInt_FromLong(i);
>             PyList_Append(cpulist, pyint);
>             Py_DECREF(pyint);
>         }
>-        cpumap >>= 1;
>+        cpumap[i / (cpumap_size * 8)] >>= 1;
>     }
>     PyDict_SetItemString(info_dict, "cpumap", cpulist);
>     Py_DECREF(cpulist);
>+    free(cpumap);
>     return info_dict;
> }
> 
>  http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch
>adjust_vcpuaffinity_more_cpu.patch 
> 
>
>
>-- 
>View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-
>affinity-to-more-than-64-cpus-tp27928229p27941371.html
>Sent from the Xen - Dev mailing list archive at Nabble.com.
>
>
>_______________________________________________
>Xen-devel mailing list
>Xen-devel@lists.xensource.com
>http://lists.xensource.com/xen-devel


[-- Attachment #1.2: Type: text/html, Size: 22702 bytes --]

[-- Attachment #2: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus)
  2010-03-19  9:09         ` James Song
@ 2010-03-19  9:39           ` Jan Beulich
  2010-03-19 10:28           ` [Patch] adjust the cpu-affinity to more than64 cpus Masaki Kanno
                             ` (3 subsequent siblings)
  4 siblings, 0 replies; 14+ messages in thread
From: Jan Beulich @ 2010-03-19  9:39 UTC (permalink / raw)
  To: James Song; +Cc: xen-devel

>>> "James Song" <jsong@novell.com> 19.03.10 10:09 >>>
>B.T.W, there are something wrong when compile c/s 21046! 
 
Mind stating what it is that is wrong with this?

Jan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Patch] adjust the cpu-affinity to more than64 cpus
  2010-03-19  9:09         ` James Song
  2010-03-19  9:39           ` issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus) Jan Beulich
@ 2010-03-19 10:28           ` Masaki Kanno
  2010-03-19 10:47           ` issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus) James Song
                             ` (2 subsequent siblings)
  4 siblings, 0 replies; 14+ messages in thread
From: Masaki Kanno @ 2010-03-19 10:28 UTC (permalink / raw)
  To: James Song, xen-devel

Hi James,

I tried changeset 21045, but I still meet the problem.

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      31.9 any cpu
Domain-0                             0     1     0   r--      31.9 any cpu
# xm vcpu-pin Domain-0 0 0
Error: Cannot pin vcpu: 0 to cpu: [0] - (22, 'Invalid argument')
Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all>

Set which CPUs a VCPU can use.
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      32.7 any cpu
Domain-0                             0     1     0   r--      32.7 any cpu


FYI, I tried changeset 21045 on IPF(ia64) server too.

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      17.8 any cpu
Domain-0                             0     1     0   r--      17.8 any cpu
Domain-0                             0     2     0   r--      17.8 any cpu
Domain-0                             0     3     0   r--      17.8 any cpu
# xm vcpu-pin Domain-0 0 0
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      18.0 0
Domain-0                             0     1     0   r--      18.0 0
Domain-0                             0     2     0   r--      18.0 0
Domain-0                             0     3     0   r--      18.0 0

Best regards,
 Kan

Fri, 19 Mar 2010 03:09:53 -0600, "James Song" wrote:

>I update upstream code to c/s 20145. This patch works fine and I haven't 
>meet this problem.
> 
>linux-2j72:/home # xm vcpu-pin  0 1 0
>linux-2j72:/home # xm vcpu-list 0
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     0   ---      23.4 0
>Domain-0                             0     1     0   r--      19.2 0 
> 
> 
>B.T.W, there are something wrong when compile c/s 21046! 
> 
>-James
>
>>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>>
>Hi James,
>
>I tested xm vcpu-pin command with xen-unstable changeset 21044. 
>The command failed as follows.  Also Time(s) values by xm vcpu-list 
>command were strange.  The values were same in all VCPUs.
>
># xm vcpu-list
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     1   r--     116.5 any cpu
>Domain-0                             0     1     1   r--     116.5 any cpu
># xm vcpu-pin Domain-0 0 0
>Error: Cannot pin vcpu: 0 to cpu: [0] - (22, 'Invalid argument')
>Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all>
>
>Set which CPUs a VCPU can use.
># xm vcpu-list
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     1   r--     117.0 any cpu
>Domain-0                             0     1     1   r--     117.0 any cpu
>
>
>I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin 
>command again.  The command succeeded as follows. 
>
># xm vcpu-list
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     0   r--      60.8 any cpu
>Domain-0                             0     1     1   -b-      42.8 any cpu
># xm vcpu-pin Domain-0 0 0
># xm vcpu-list
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     0   r--      61.6 0
>Domain-0                             0     1     1   -b-      43.2 any cpu
>
>Best regards,
>Kan
>
>Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:
>
>>
>>Keir, could you take a look at this issue.
>>New Patch for this issue:
>>Singed-off-by: James (Song Wei) <jsong@novell.com>
>>
>>diff -r 8b269215464b tools/libxc/xc_domain.c
>>--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800
>>+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800
>>@@ -105,23 +105,28 @@
>> int xc_vcpu_setaffinity(int xc_handle,
>>                         uint32_t domid,
>>                         int vcpu,
>>-                        uint64_t cpumap)
>>+                        uint64_t *cpumap, int cpusize)
>> {
>>     DECLARE_DOMCTL;
>>     int ret = -1;
>>-    uint8_t local[sizeof (cpumap)];
>>+    uint8_t *local = malloc(cpusize); 
>> 
>>+    if(local == NULL)
>>+    {
>>+        PERROR("Could not alloc memory for Xen hypercall");
>>+        goto out;
>>+    }
>>     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
>>     domctl.domain = (domid_t)domid;
>>     domctl.u.vcpuaffinity.vcpu    = vcpu;
>> 
>>-    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
>>+    bitmap_64_to_byte(local, cpumap, cpusize * 8);
>> 
>>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>> 
>>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>>     
>>-    if ( lock_pages(local, sizeof(local)) != 0 )
>>+    if ( lock_pages(local, cpusize) != 0 )
>>     {
>>         PERROR("Could not lock memory for Xen hypercall");
>>         goto out;
>>@@ -129,9 +134,10 @@
>> 
>>     ret = do_domctl(xc_handle, &domctl);
>> 
>>-    unlock_pages(local, sizeof(local));
>>+    unlock_pages(local, cpusize);
>> 
>>  out:
>>+    free(local);
>>     return ret;
>> }
>> 
>>@@ -139,18 +145,25 @@
>> int xc_vcpu_getaffinity(int xc_handle,
>>                         uint32_t domid,
>>                         int vcpu,
>>-                        uint64_t *cpumap)
>>+                        uint64_t *cpumap, int cpusize)
>> {
>>     DECLARE_DOMCTL;
>>     int ret = -1;
>>-    uint8_t local[sizeof (cpumap)];
>>+    uint8_t * local = malloc(cpusize);
>>+
>>+    if(local == NULL)
>>+    {
>>+        PERROR("Could not alloc memory for Xen hypercall");
>>+        goto out;
>>+    }
>> 
>>     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
>>     domctl.domain = (domid_t)domid;
>>     domctl.u.vcpuaffinity.vcpu = vcpu;
>> 
>>+
>>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>>     
>>     if ( lock_pages(local, sizeof(local)) != 0 )
>>     {
>>@@ -161,8 +174,9 @@
>>     ret = do_domctl(xc_handle, &domctl);
>> 
>>     unlock_pages(local, sizeof (local));
>>-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
>>- out:
>>+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
>>+out:
>>+    free(local);
>>     return ret;
>> }
>> 
>>diff -r 8b269215464b tools/libxc/xenctrl.h
>>--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800
>>+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800
>>@@ -310,11 +310,13 @@
>> int xc_vcpu_setaffinity(int xc_handle,
>>                         uint32_t domid,
>>                         int vcpu,
>>-                        uint64_t cpumap);
>>+                        uint64_t *cpumap,
>>+                        int cpusize);
>> int xc_vcpu_getaffinity(int xc_handle,
>>                         uint32_t domid,
>>                         int vcpu,
>>-                        uint64_t *cpumap);
>>+                        uint64_t *cpumap,
>>+                        int cpusize);
>> 
>> /**
>>  * This function will return information about one or more domains. It is
>>diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c
>>--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800
>>+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800
>>@@ -217,8 +217,12 @@
>> {
>>     uint32_t dom;
>>     int vcpu = 0, i;
>>-    uint64_t  cpumap = ~0ULL;
>>+    uint64_t  *cpumap;
>>     PyObject *cpulist = NULL;
>>+    int nr_cpus, size;
>>+    xc_physinfo_t info; 
>>+    xc_cpu_to_node_t map[1];
>>+    uint64_t cpumap_size = sizeof(*cpumap); 
>> 
>>     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
>> 
>>@@ -226,26 +230,38 @@
>>                                       &dom, &vcpu, &cpulist) )
>>         return NULL;
>> 
>>+    set_xen_guest_handle(info.cpu_to_node, map);
>>+    info.max_cpu_id = 1;
>>+    if ( xc_physinfo(self->xc_handle, &info) != 0 )
>>+        return pyxc_error_to_exception();
>>+  
>>+    nr_cpus = info.nr_cpus;
>>+
>>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
>>+    cpumap = malloc(cpumap_size * size);
>>+    if(cpumap == NULL)
>>+        return pyxc_error_to_exception();
>>+
>>     if ( (cpulist != NULL) && PyList_Check(cpulist) )
>>     {
>>-        cpumap = 0ULL;
>>+        for ( i = 0; i < size; i++)
>>+        {
>>+            cpumap[i] = 0ULL;
>>+        }
>>         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
>>         {
>>             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
>>-            if ( cpu >= 64 )
>>-            {
>>-                errno = EINVAL;
>>-                PyErr_SetFromErrno(xc_error_obj);
>>-                return NULL;
>>-            }
>>-            cpumap |= (uint64_t)1 << cpu;
>>+            cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu %
>>(cpumap_size * 8));
>>         }
>>     }
>>   
>>-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
>>+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size *
>>cpumap_size) != 0 )
>>+    {
>>+        free(cpumap);
>>         return pyxc_error_to_exception();
>>-    
>>+    }
>>     Py_INCREF(zero);
>>+    free(cpumap); 
>>     return zero;
>> }
>> 
>>@@ -365,7 +381,11 @@
>>     uint32_t dom, vcpu = 0;
>>     xc_vcpuinfo_t info;
>>     int rc, i;
>>-    uint64_t cpumap;
>>+    uint64_t *cpumap;
>>+    int nr_cpus, size;
>>+    xc_physinfo_t pinfo = { 0 };
>>+    xc_cpu_to_node_t map[1];
>>+    uint64_t cpumap_size = sizeof(*cpumap);
>> 
>>     static char *kwd_list[] = { "domid", "vcpu", NULL };
>>     
>>@@ -373,12 +393,25 @@
>>                                       &dom, &vcpu) )
>>         return NULL;
>> 
>>+    set_xen_guest_handle(pinfo.cpu_to_node, map);
>>+    pinfo.max_cpu_id = 1;
>>+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
>>+        return pyxc_error_to_exception();
>>+    nr_cpus = pinfo.nr_cpus;
>>     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
>>     if ( rc < 0 )
>>         return pyxc_error_to_exception();
>>-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
>>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
>>+
>>+    if((cpumap = malloc(cpumap_size * size)) == NULL)
>>+        return pyxc_error_to_exception(); 
>>+
>>+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
>>cpumap_size * size);
>>     if ( rc < 0 )
>>+    {
>>+        free(cpumap);
>>         return pyxc_error_to_exception();
>>+    }
>> 
>>     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
>>                               "online",   info.online,
>>@@ -386,19 +419,19 @@
>>                               "running",  info.running,
>>                               "cpu_time", info.cpu_time,
>>                               "cpu",      info.cpu);
>>-
>>     cpulist = PyList_New(0);
>>-    for ( i = 0; cpumap != 0; i++ )
>>+    for ( i = 0; i < nr_cpus; i++ )
>>     {
>>-        if ( cpumap & 1 ) {
>>+        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
>>             PyObject *pyint = PyInt_FromLong(i);
>>             PyList_Append(cpulist, pyint);
>>             Py_DECREF(pyint);
>>         }
>>-        cpumap >>= 1;
>>+        cpumap[i / (cpumap_size * 8)] >>= 1;
>>     }
>>     PyDict_SetItemString(info_dict, "cpumap", cpulist);
>>     Py_DECREF(cpulist);
>>+    free(cpumap);
>>     return info_dict;
>> }
>> 
>>  http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch
>>adjust_vcpuaffinity_more_cpu.patch 
>> 
>>
>>
>>-- 
>>View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-
>>affinity-to-more-than-64-cpus-tp27928229p27941371.html
>>Sent from the Xen - Dev mailing list archive at Nabble.com.
>>
>>
>>_______________________________________________
>>Xen-devel mailing list
>>Xen-devel@lists.xensource.com
>>http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus)
  2010-03-19  9:09         ` James Song
  2010-03-19  9:39           ` issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus) Jan Beulich
  2010-03-19 10:28           ` [Patch] adjust the cpu-affinity to more than64 cpus Masaki Kanno
@ 2010-03-19 10:47           ` James Song
  2010-03-19 11:10           ` Jan Beulich
  2010-03-30 18:23           ` [Patch] adjust the cpu-affinity to more than 64 cpus Liu, Jinsong
  4 siblings, 0 replies; 14+ messages in thread
From: James Song @ 2010-03-19 10:47 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel


[-- Attachment #1.1: Type: text/plain, Size: 1441 bytes --]

sorry, I forgot to post the error message, belowing is the error message:
helper2.c: In function a?cpu_x86_inita?:
helper2.c:142: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa?
helper2.c: In function a?sp_infoa?:
helper2.c:219: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa?
helper2.c:224: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 6 has type a?uint32_ta?
helper2.c:224: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta?
helper2.c:226: error: a?ioreq_ta? has no member named a?io_counta?
helper2.c: In function a?__cpu_get_ioreqa?:
helper2.c:235: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa?
helper2.c:242: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta?
helper2.c:242: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 8 has type a?uint32_ta?
helper2.c: In function a?cpu_handle_ioreqa?:
helper2.c:517: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta?
helper2.c:517: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 8 has type a?uint32_ta

>>> Jan Beulich 2010-3-19 17:39 >>>
>>> "James Song" <jsong@novell.com> 19.03.10 10:09 >>>
>B.T.W, there are something wrong when compile c/s 21046! 

Mind stating what it is that is wrong with this?

Jan

[-- Attachment #1.2: Type: text/html, Size: 1998 bytes --]

[-- Attachment #2: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus)
  2010-03-19  9:09         ` James Song
                             ` (2 preceding siblings ...)
  2010-03-19 10:47           ` issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus) James Song
@ 2010-03-19 11:10           ` Jan Beulich
  2010-03-30 18:23           ` [Patch] adjust the cpu-affinity to more than 64 cpus Liu, Jinsong
  4 siblings, 0 replies; 14+ messages in thread
From: Jan Beulich @ 2010-03-19 11:10 UTC (permalink / raw)
  To: James Song; +Cc: xen-devel

That's in the qemu tree, so it has nothing to do with c/s 21046. Jan

>>> James Song 19.03.10 11:47 >>>
sorry, I forgot to post the error message, belowing is the error message:
helper2.c: In function a?cpu_x86_inita?:
helper2.c:142: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa?
helper2.c: In function a?sp_infoa?:
helper2.c:219: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa?
helper2.c:224: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 6 has type a?uint32_ta?
helper2.c:224: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta?
helper2.c:226: error: a?ioreq_ta? has no member named a?io_counta?
helper2.c: In function a?__cpu_get_ioreqa?:
helper2.c:235: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa?
helper2.c:242: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta?
helper2.c:242: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 8 has type a?uint32_ta?
helper2.c: In function a?cpu_handle_ioreqa?:
helper2.c:517: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta?
helper2.c:517: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 8 has type a?uint32_ta

>>> Jan Beulich 2010-3-19 17:39 >>>
>>> "James Song" <jsong@novell.com> 19.03.10 10:09 >>>
>B.T.W, there are something wrong when compile c/s 21046! 

Mind stating what it is that is wrong with this?

Jan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [Patch] adjust the cpu-affinity to more than 64 cpus
  2010-03-19  9:09         ` James Song
                             ` (3 preceding siblings ...)
  2010-03-19 11:10           ` Jan Beulich
@ 2010-03-30 18:23           ` Liu, Jinsong
  2010-03-31  2:12             ` James Song
  2010-03-31  6:29             ` Masaki Kanno
  4 siblings, 2 replies; 14+ messages in thread
From: Liu, Jinsong @ 2010-03-30 18:23 UTC (permalink / raw)
  To: James Song, Masaki Kanno, xen-devel


[-- Attachment #1.1: Type: text/plain, Size: 11006 bytes --]

James and Masaki,

The issue Masaki said does exist, it appear under ia32pae.
A patch (c/s 21087) is to fix the bug.

Thanks,
Jinsong

________________________________
From: xen-devel-bounces@lists.xensource.com [mailto:xen-devel-bounces@lists.xensource.com] On Behalf Of James Song
Sent: Friday, March 19, 2010 5:10 PM
To: Masaki Kanno; xen-devel@lists.xensource.com
Subject: Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus

I update upstream code to c/s 20145. This patch works fine and I haven't meet this problem.

linux-2j72:/home # xm vcpu-pin  0 1 0
linux-2j72:/home # xm vcpu-list 0
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   ---      23.4 0
Domain-0                             0     1     0   r--      19.2 0


B.T.W, there are something wrong when compile c/s 21046!

-James

>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>>
Hi James,

I tested xm vcpu-pin command with xen-unstable changeset 21044.
The command failed as follows.  Also Time(s) values by xm vcpu-list
command were strange.  The values were same in all VCPUs.

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     1   r--     116.5 any cpu
Domain-0                             0     1     1   r--     116.5 any cpu
# xm vcpu-pin Domain-0 0 0
Error: Cannot pin vcpu: 0 to cpu: [0] - (22, 'Invalid argument')
Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all>

Set which CPUs a VCPU can use.
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     1   r--     117.0 any cpu
Domain-0                             0     1     1   r--     117.0 any cpu


I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin
command again.  The command succeeded as follows.

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      60.8 any cpu
Domain-0                             0     1     1   -b-      42.8 any cpu
# xm vcpu-pin Domain-0 0 0
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      61.6 0
Domain-0                             0     1     1   -b-      43.2 any cpu

Best regards,
Kan

Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:

>
>Keir, could you take a look at this issue.
>New Patch for this issue:
>Singed-off-by: James (Song Wei) <jsong@novell.com>
>
>diff -r 8b269215464b tools/libxc/xc_domain.c
>--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800
>+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800
>@@ -105,23 +105,28 @@
> int xc_vcpu_setaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t cpumap)
>+                        uint64_t *cpumap, int cpusize)
> {
>     DECLARE_DOMCTL;
>     int ret = -1;
>-    uint8_t local[sizeof (cpumap)];
>+    uint8_t *local = malloc(cpusize);
>
>+    if(local == NULL)
>+    {
>+        PERROR("Could not alloc memory for Xen hypercall");
>+        goto out;
>+    }
>     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
>     domctl.domain = (domid_t)domid;
>     domctl.u.vcpuaffinity.vcpu    = vcpu;
>
>-    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
>+    bitmap_64_to_byte(local, cpumap, cpusize * 8);
>
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>
>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>
>-    if ( lock_pages(local, sizeof(local)) != 0 )
>+    if ( lock_pages(local, cpusize) != 0 )
>     {
>         PERROR("Could not lock memory for Xen hypercall");
>         goto out;
>@@ -129,9 +134,10 @@
>
>     ret = do_domctl(xc_handle, &domctl);
>
>-    unlock_pages(local, sizeof(local));
>+    unlock_pages(local, cpusize);
>
>  out:
>+    free(local);
>     return ret;
> }
>
>@@ -139,18 +145,25 @@
> int xc_vcpu_getaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t *cpumap)
>+                        uint64_t *cpumap, int cpusize)
> {
>     DECLARE_DOMCTL;
>     int ret = -1;
>-    uint8_t local[sizeof (cpumap)];
>+    uint8_t * local = malloc(cpusize);
>+
>+    if(local == NULL)
>+    {
>+        PERROR("Could not alloc memory for Xen hypercall");
>+        goto out;
>+    }
>
>     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
>     domctl.domain = (domid_t)domid;
>     domctl.u.vcpuaffinity.vcpu = vcpu;
>
>+
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>
>     if ( lock_pages(local, sizeof(local)) != 0 )
>     {
>@@ -161,8 +174,9 @@
>     ret = do_domctl(xc_handle, &domctl);
>
>     unlock_pages(local, sizeof (local));
>-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
>- out:
>+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
>+out:
>+    free(local);
>     return ret;
> }
>
>diff -r 8b269215464b tools/libxc/xenctrl.h
>--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800
>+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800
>@@ -310,11 +310,13 @@
> int xc_vcpu_setaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t cpumap);
>+                        uint64_t *cpumap,
>+                        int cpusize);
> int xc_vcpu_getaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t *cpumap);
>+                        uint64_t *cpumap,
>+                        int cpusize);
>
> /**
>  * This function will return information about one or more domains. It is
>diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c
>--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800
>+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800
>@@ -217,8 +217,12 @@
> {
>     uint32_t dom;
>     int vcpu = 0, i;
>-    uint64_t  cpumap = ~0ULL;
>+    uint64_t  *cpumap;
>     PyObject *cpulist = NULL;
>+    int nr_cpus, size;
>+    xc_physinfo_t info;
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(*cpumap);
>
>     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
>
>@@ -226,26 +230,38 @@
>                                       &dom, &vcpu, &cpulist) )
>         return NULL;
>
>+    set_xen_guest_handle(info.cpu_to_node, map);
>+    info.max_cpu_id = 1;
>+    if ( xc_physinfo(self->xc_handle, &info) != 0 )
>+        return pyxc_error_to_exception();
>+
>+    nr_cpus = info.nr_cpus;
>+
>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
>+    cpumap = malloc(cpumap_size * size);
>+    if(cpumap == NULL)
>+        return pyxc_error_to_exception();
>+
>     if ( (cpulist != NULL) && PyList_Check(cpulist) )
>     {
>-        cpumap = 0ULL;
>+        for ( i = 0; i < size; i++)
>+        {
>+            cpumap[i] = 0ULL;
>+        }
>         for ( i = 0; i < PyList_Size(cpulist); i++ )
>         {
>             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
>-            if ( cpu >= 64 )
>-            {
>-                errno = EINVAL;
>-                PyErr_SetFromErrno(xc_error_obj);
>-                return NULL;
>-            }
>-            cpumap |= (uint64_t)1 << cpu;
>+            cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu %
>(cpumap_size * 8));
>         }
>     }
>
>-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
>+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size *
>cpumap_size) != 0 )
>+    {
>+        free(cpumap);
>         return pyxc_error_to_exception();
>-
>+    }
>     Py_INCREF(zero);
>+    free(cpumap);
>     return zero;
> }
>
>@@ -365,7 +381,11 @@
>     uint32_t dom, vcpu = 0;
>     xc_vcpuinfo_t info;
>     int rc, i;
>-    uint64_t cpumap;
>+    uint64_t *cpumap;
>+    int nr_cpus, size;
>+    xc_physinfo_t pinfo = { 0 };
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(*cpumap);
>
>     static char *kwd_list[] = { "domid", "vcpu", NULL };
>
>@@ -373,12 +393,25 @@
>                                       &dom, &vcpu) )
>         return NULL;
>
>+    set_xen_guest_handle(pinfo.cpu_to_node, map);
>+    pinfo.max_cpu_id = 1;
>+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
>+        return pyxc_error_to_exception();
>+    nr_cpus = pinfo.nr_cpus;
>     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
>     if ( rc < 0 )
>         return pyxc_error_to_exception();
>-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
>+
>+    if((cpumap = malloc(cpumap_size * size)) == NULL)
>+        return pyxc_error_to_exception();
>+
>+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
>cpumap_size * size);
>     if ( rc < 0 )
>+    {
>+        free(cpumap);
>         return pyxc_error_to_exception();
>+    }
>
>     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
>                               "online",   info.online,
>@@ -386,19 +419,19 @@
>                               "running",  info.running,
>                               "cpu_time", info.cpu_time,
>                               "cpu",      info.cpu);
>-
>     cpulist = PyList_New(0);
>-    for ( i = 0; cpumap != 0; i++ )
>+    for ( i = 0; i < nr_cpus; i++ )
>     {
>-        if ( cpumap & 1 ) {
>+        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
>             PyObject *pyint = PyInt_FromLong(i);
>             PyList_Append(cpulist, pyint);
>             Py_DECREF(pyint);
>         }
>-        cpumap >>= 1;
>+        cpumap[i / (cpumap_size * 8)] >>= 1;
>     }
>     PyDict_SetItemString(info_dict, "cpumap", cpulist);
>     Py_DECREF(cpulist);
>+    free(cpumap);
>     return info_dict;
> }
>
>  http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch
>adjust_vcpuaffinity_more_cpu.patch
>
>
>
>--
>View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-
>affinity-to-more-than-64-cpus-tp27928229p27941371.html
>Sent from the Xen - Dev mailing list archive at Nabble.com.
>
>
>_______________________________________________
>Xen-devel mailing list
>Xen-devel@lists.xensource.com
>http://lists.xensource.com/xen-devel


[-- Attachment #1.2: Type: text/html, Size: 24462 bytes --]

[-- Attachment #2: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [Patch] adjust the cpu-affinity to more than 64 cpus
  2010-03-30 18:23           ` [Patch] adjust the cpu-affinity to more than 64 cpus Liu, Jinsong
@ 2010-03-31  2:12             ` James Song
  2010-03-31  6:29             ` Masaki Kanno
  1 sibling, 0 replies; 14+ messages in thread
From: James Song @ 2010-03-31  2:12 UTC (permalink / raw)
  To: Jinsong Liu, Masaki Kanno, xen-devel


[-- Attachment #1.1: Type: text/plain, Size: 11208 bytes --]

Yeah, perhaps, I haven't tested this patch on IA32PAE box. Thanks for fixed it.
 
-Song Wei (James)

>>> "Liu, Jinsong" <jinsong.liu@intel.com> 2010-3-31 2:23 >>>
James and Masaki,
 
The issue Masaki said does exist, it appear under ia32pae.
A patch (c/s 21087) is to fix the bug.
 
Thanks,
Jinsong

From: xen-devel-bounces@lists.xensource.com [mailto:xen-devel-bounces@lists.xensource.com] On Behalf Of James Song
Sent: Friday, March 19, 2010 5:10 PM
To: Masaki Kanno; xen-devel@lists.xensource.com
Subject: Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus

I update upstream code to c/s 20145. This patch works fine and I haven't meet this problem.
 
linux-2j72:/home # xm vcpu-pin  0 1 0
linux-2j72:/home # xm vcpu-list 0
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   ---      23.4 0
Domain-0                             0     1     0   r--      19.2 0 
 
 
B.T.W, there are something wrong when compile c/s 21046! 
 
-James

>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>>
Hi James,

I tested xm vcpu-pin command with xen-unstable changeset 21044. 
The command failed as follows.  Also Time(s) values by xm vcpu-list 
command were strange.  The values were same in all VCPUs.

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     1   r--     116.5 any cpu
Domain-0                             0     1     1   r--     116.5 any cpu
# xm vcpu-pin Domain-0 0 0
Error: Cannot pin vcpu: 0 to cpu: [0] - (22, 'Invalid argument')
Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all>

Set which CPUs a VCPU can use.
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     1   r--     117.0 any cpu
Domain-0                             0     1     1   r--     117.0 any cpu


I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin 
command again.  The command succeeded as follows. 

# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      60.8 any cpu
Domain-0                             0     1     1   -b-      42.8 any cpu
# xm vcpu-pin Domain-0 0 0
# xm vcpu-list
Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
Domain-0                             0     0     0   r--      61.6 0
Domain-0                             0     1     1   -b-      43.2 any cpu

Best regards,
Kan

Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:

>
>Keir, could you take a look at this issue.
>New Patch for this issue:
>Singed-off-by: James (Song Wei) <jsong@novell.com>
>
>diff -r 8b269215464b tools/libxc/xc_domain.c
>--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800
>+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800
>@@ -105,23 +105,28 @@
> int xc_vcpu_setaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t cpumap)
>+                        uint64_t *cpumap, int cpusize)
> {
>     DECLARE_DOMCTL;
>     int ret = -1;
>-    uint8_t local[sizeof (cpumap)];
>+    uint8_t *local = malloc(cpusize); 
> 
>+    if(local == NULL)
>+    {
>+        PERROR("Could not alloc memory for Xen hypercall");
>+        goto out;
>+    }
>     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
>     domctl.domain = (domid_t)domid;
>     domctl.u.vcpuaffinity.vcpu    = vcpu;
> 
>-    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
>+    bitmap_64_to_byte(local, cpumap, cpusize * 8);
> 
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
> 
>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>     
>-    if ( lock_pages(local, sizeof(local)) != 0 )
>+    if ( lock_pages(local, cpusize) != 0 )
>     {
>         PERROR("Could not lock memory for Xen hypercall");
>         goto out;
>@@ -129,9 +134,10 @@
> 
>     ret = do_domctl(xc_handle, &domctl);
> 
>-    unlock_pages(local, sizeof(local));
>+    unlock_pages(local, cpusize);
> 
>  out:
>+    free(local);
>     return ret;
> }
> 
>@@ -139,18 +145,25 @@
> int xc_vcpu_getaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t *cpumap)
>+                        uint64_t *cpumap, int cpusize)
> {
>     DECLARE_DOMCTL;
>     int ret = -1;
>-    uint8_t local[sizeof (cpumap)];
>+    uint8_t * local = malloc(cpusize);
>+
>+    if(local == NULL)
>+    {
>+        PERROR("Could not alloc memory for Xen hypercall");
>+        goto out;
>+    }
> 
>     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
>     domctl.domain = (domid_t)domid;
>     domctl.u.vcpuaffinity.vcpu = vcpu;
> 
>+
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>     
>     if ( lock_pages(local, sizeof(local)) != 0 )
>     {
>@@ -161,8 +174,9 @@
>     ret = do_domctl(xc_handle, &domctl);
> 
>     unlock_pages(local, sizeof (local));
>-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
>- out:
>+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
>+out:
>+    free(local);
>     return ret;
> }
> 
>diff -r 8b269215464b tools/libxc/xenctrl.h
>--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800
>+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800
>@@ -310,11 +310,13 @@
> int xc_vcpu_setaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t cpumap);
>+                        uint64_t *cpumap,
>+                        int cpusize);
> int xc_vcpu_getaffinity(int xc_handle,
>                         uint32_t domid,
>                         int vcpu,
>-                        uint64_t *cpumap);
>+                        uint64_t *cpumap,
>+                        int cpusize);
> 
> /**
>  * This function will return information about one or more domains. It is
>diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c
>--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800
>+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800
>@@ -217,8 +217,12 @@
> {
>     uint32_t dom;
>     int vcpu = 0, i;
>-    uint64_t  cpumap = ~0ULL;
>+    uint64_t  *cpumap;
>     PyObject *cpulist = NULL;
>+    int nr_cpus, size;
>+    xc_physinfo_t info; 
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(*cpumap); 
> 
>     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
> 
>@@ -226,26 +230,38 @@
>                                       &dom, &vcpu, &cpulist) )
>         return NULL;
> 
>+    set_xen_guest_handle(info.cpu_to_node, map);
>+    info.max_cpu_id = 1;
>+    if ( xc_physinfo(self->xc_handle, &info) != 0 )
>+        return pyxc_error_to_exception();
>+  
>+    nr_cpus = info.nr_cpus;
>+
>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
>+    cpumap = malloc(cpumap_size * size);
>+    if(cpumap == NULL)
>+        return pyxc_error_to_exception();
>+
>     if ( (cpulist != NULL) && PyList_Check(cpulist) )
>     {
>-        cpumap = 0ULL;
>+        for ( i = 0; i < size; i++)
>+        {
>+            cpumap[i] = 0ULL;
>+        }
>         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
>         {
>             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
>-            if ( cpu >= 64 )
>-            {
>-                errno = EINVAL;
>-                PyErr_SetFromErrno(xc_error_obj);
>-                return NULL;
>-            }
>-            cpumap |= (uint64_t)1 << cpu;
>+            cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu %
>(cpumap_size * 8));
>         }
>     }
>   
>-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
>+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size *
>cpumap_size) != 0 )
>+    {
>+        free(cpumap);
>         return pyxc_error_to_exception();
>-    
>+    }
>     Py_INCREF(zero);
>+    free(cpumap); 
>     return zero;
> }
> 
>@@ -365,7 +381,11 @@
>     uint32_t dom, vcpu = 0;
>     xc_vcpuinfo_t info;
>     int rc, i;
>-    uint64_t cpumap;
>+    uint64_t *cpumap;
>+    int nr_cpus, size;
>+    xc_physinfo_t pinfo = { 0 };
>+    xc_cpu_to_node_t map[1];
>+    uint64_t cpumap_size = sizeof(*cpumap);
> 
>     static char *kwd_list[] = { "domid", "vcpu", NULL };
>     
>@@ -373,12 +393,25 @@
>                                       &dom, &vcpu) )
>         return NULL;
> 
>+    set_xen_guest_handle(pinfo.cpu_to_node, map);
>+    pinfo.max_cpu_id = 1;
>+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
>+        return pyxc_error_to_exception();
>+    nr_cpus = pinfo.nr_cpus;
>     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
>     if ( rc < 0 )
>         return pyxc_error_to_exception();
>-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
>+
>+    if((cpumap = malloc(cpumap_size * size)) == NULL)
>+        return pyxc_error_to_exception(); 
>+
>+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
>cpumap_size * size);
>     if ( rc < 0 )
>+    {
>+        free(cpumap);
>         return pyxc_error_to_exception();
>+    }
> 
>     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
>                               "online",   info.online,
>@@ -386,19 +419,19 @@
>                               "running",  info.running,
>                               "cpu_time", info.cpu_time,
>                               "cpu",      info.cpu);
>-
>     cpulist = PyList_New(0);
>-    for ( i = 0; cpumap != 0; i++ )
>+    for ( i = 0; i < nr_cpus; i++ )
>     {
>-        if ( cpumap & 1 ) {
>+        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
>             PyObject *pyint = PyInt_FromLong(i);
>             PyList_Append(cpulist, pyint);
>             Py_DECREF(pyint);
>         }
>-        cpumap >>= 1;
>+        cpumap[i / (cpumap_size * 8)] >>= 1;
>     }
>     PyDict_SetItemString(info_dict, "cpumap", cpulist);
>     Py_DECREF(cpulist);
>+    free(cpumap);
>     return info_dict;
> }
> 
>  http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch
>adjust_vcpuaffinity_more_cpu.patch 
> 
>
>
>-- 
>View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-
>affinity-to-more-than-64-cpus-tp27928229p27941371.html
>Sent from the Xen - Dev mailing list archive at Nabble.com.
>
>
>_______________________________________________
>Xen-devel mailing list
>Xen-devel@lists.xensource.com
>http://lists.xensource.com/xen-devel


[-- Attachment #1.2: Type: text/html, Size: 24039 bytes --]

[-- Attachment #2: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [Patch] adjust the cpu-affinity to more than 64 cpus
  2010-03-30 18:23           ` [Patch] adjust the cpu-affinity to more than 64 cpus Liu, Jinsong
  2010-03-31  2:12             ` James Song
@ 2010-03-31  6:29             ` Masaki Kanno
  2010-03-31  8:26               ` Liu, Jinsong
  1 sibling, 1 reply; 14+ messages in thread
From: Masaki Kanno @ 2010-03-31  6:29 UTC (permalink / raw)
  To: Liu, Jinsong, James Song, xen-devel

Hi Jinsong,

Thank you for fixing the issue.
I confirmed that your patch works on both ia32pae and ia64.

ia32pae:
 # xm vcpu-list
 Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
 Domain-0                             0     0     1   r--      33.9 any cpu
 Domain-0                             0     1     0   -b-      22.5 any cpu
 # xm vcpu-pin Domain-0 0 0
 # xm vcpu-list
 Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
 Domain-0                             0     0     0   -b-      34.4 0
 Domain-0                             0     1     1   r--      23.2 any cpu

ia64:
 # xm vcpu-list
 Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
 Domain-0                             0     0     0   r--      23.7 any cpu
 Domain-0                             0     1     1   -b-      10.6 any cpu
 Domain-0                             0     2     2   -b-       6.8 any cpu
 Domain-0                             0     3     3   -b-       9.7 any cpu
 # xm vcpu-pin Domain-0 0 0
 # xm vcpu-list
 Name                                ID  VCPU   CPU State   Time(s) CPU Affinity
 Domain-0                             0     0     0   -b-      24.0 0
 Domain-0                             0     1     1   -b-      10.8 any cpu
 Domain-0                             0     2     2   r--       6.8 any cpu
 Domain-0                             0     3     3   -b-       9.8 any cpu

Best regards,
 Kan

Wed, 31 Mar 2010 02:23:47 +0800, "Liu, Jinsong" wrote:

>James and Masaki,
>
>The issue Masaki said does exist, it appear under ia32pae.
>A patch (c/s 21087) is to fix the bug.
>
>Thanks,
>Jinsong
>
>________________________________
>From: xen-devel-bounces@lists.xensource.com [mailto:xen-devel-bounces@lists
>.xensource.com] On Behalf Of James Song
>Sent: Friday, March 19, 2010 5:10 PM
>To: Masaki Kanno; xen-devel@lists.xensource.com
>Subject: Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
>
>I update upstream code to c/s 20145. This patch works fine and I haven't 
>meet this problem.
>
>linux-2j72:/home # xm vcpu-pin  0 1 0
>linux-2j72:/home # xm vcpu-list 0
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     0   ---      23.4 0
>Domain-0                             0     1     0   r--      19.2 0
>
>
>B.T.W, there are something wrong when compile c/s 21046!
>
>-James
>
>>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>>
>Hi James,
>
>I tested xm vcpu-pin command with xen-unstable changeset 21044.
>The command failed as follows.  Also Time(s) values by xm vcpu-list
>command were strange.  The values were same in all VCPUs.
>
># xm vcpu-list
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     1   r--     116.5 any cpu
>Domain-0                             0     1     1   r--     116.5 any cpu
># xm vcpu-pin Domain-0 0 0
>Error: Cannot pin vcpu: 0 to cpu: [0] - (22, 'Invalid argument')
>Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all>
>
>Set which CPUs a VCPU can use.
># xm vcpu-list
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     1   r--     117.0 any cpu
>Domain-0                             0     1     1   r--     117.0 any cpu
>
>
>I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin
>command again.  The command succeeded as follows.
>
># xm vcpu-list
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     0   r--      60.8 any cpu
>Domain-0                             0     1     1   -b-      42.8 any cpu
># xm vcpu-pin Domain-0 0 0
># xm vcpu-list
>Name                                ID  VCPU   CPU State   Time(s) CPU 
>Affinity
>Domain-0                             0     0     0   r--      61.6 0
>Domain-0                             0     1     1   -b-      43.2 any cpu
>
>Best regards,
>Kan
>
>Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:
>
>>
>>Keir, could you take a look at this issue.
>>New Patch for this issue:
>>Singed-off-by: James (Song Wei) <jsong@novell.com>
>>
>>diff -r 8b269215464b tools/libxc/xc_domain.c
>>--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800
>>+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800
>>@@ -105,23 +105,28 @@
>> int xc_vcpu_setaffinity(int xc_handle,
>>                         uint32_t domid,
>>                         int vcpu,
>>-                        uint64_t cpumap)
>>+                        uint64_t *cpumap, int cpusize)
>> {
>>     DECLARE_DOMCTL;
>>     int ret = -1;
>>-    uint8_t local[sizeof (cpumap)];
>>+    uint8_t *local = malloc(cpusize);
>>
>>+    if(local == NULL)
>>+    {
>>+        PERROR("Could not alloc memory for Xen hypercall");
>>+        goto out;
>>+    }
>>     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
>>     domctl.domain = (domid_t)domid;
>>     domctl.u.vcpuaffinity.vcpu    = vcpu;
>>
>>-    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
>>+    bitmap_64_to_byte(local, cpumap, cpusize * 8);
>>
>>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>>
>>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>>
>>-    if ( lock_pages(local, sizeof(local)) != 0 )
>>+    if ( lock_pages(local, cpusize) != 0 )
>>     {
>>         PERROR("Could not lock memory for Xen hypercall");
>>         goto out;
>>@@ -129,9 +134,10 @@
>>
>>     ret = do_domctl(xc_handle, &domctl);
>>
>>-    unlock_pages(local, sizeof(local));
>>+    unlock_pages(local, cpusize);
>>
>>  out:
>>+    free(local);
>>     return ret;
>> }
>>
>>@@ -139,18 +145,25 @@
>> int xc_vcpu_getaffinity(int xc_handle,
>>                         uint32_t domid,
>>                         int vcpu,
>>-                        uint64_t *cpumap)
>>+                        uint64_t *cpumap, int cpusize)
>> {
>>     DECLARE_DOMCTL;
>>     int ret = -1;
>>-    uint8_t local[sizeof (cpumap)];
>>+    uint8_t * local = malloc(cpusize);
>>+
>>+    if(local == NULL)
>>+    {
>>+        PERROR("Could not alloc memory for Xen hypercall");
>>+        goto out;
>>+    }
>>
>>     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
>>     domctl.domain = (domid_t)domid;
>>     domctl.u.vcpuaffinity.vcpu = vcpu;
>>
>>+
>>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>>-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>>+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>>
>>     if ( lock_pages(local, sizeof(local)) != 0 )
>>     {
>>@@ -161,8 +174,9 @@
>>     ret = do_domctl(xc_handle, &domctl);
>>
>>     unlock_pages(local, sizeof (local));
>>-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
>>- out:
>>+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
>>+out:
>>+    free(local);
>>     return ret;
>> }
>>
>>diff -r 8b269215464b tools/libxc/xenctrl.h
>>--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800
>>+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800
>>@@ -310,11 +310,13 @@
>> int xc_vcpu_setaffinity(int xc_handle,
>>                         uint32_t domid,
>>                         int vcpu,
>>-                        uint64_t cpumap);
>>+                        uint64_t *cpumap,
>>+                        int cpusize);
>> int xc_vcpu_getaffinity(int xc_handle,
>>                         uint32_t domid,
>>                         int vcpu,
>>-                        uint64_t *cpumap);
>>+                        uint64_t *cpumap,
>>+                        int cpusize);
>>
>> /**
>>  * This function will return information about one or more domains. It is
>>diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c
>>--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800
>>+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800
>>@@ -217,8 +217,12 @@
>> {
>>     uint32_t dom;
>>     int vcpu = 0, i;
>>-    uint64_t  cpumap = ~0ULL;
>>+    uint64_t  *cpumap;
>>     PyObject *cpulist = NULL;
>>+    int nr_cpus, size;
>>+    xc_physinfo_t info;
>>+    xc_cpu_to_node_t map[1];
>>+    uint64_t cpumap_size = sizeof(*cpumap);
>>
>>     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
>>
>>@@ -226,26 +230,38 @@
>>                                       &dom, &vcpu, &cpulist) )
>>         return NULL;
>>
>>+    set_xen_guest_handle(info.cpu_to_node, map);
>>+    info.max_cpu_id = 1;
>>+    if ( xc_physinfo(self->xc_handle, &info) != 0 )
>>+        return pyxc_error_to_exception();
>>+
>>+    nr_cpus = info.nr_cpus;
>>+
>>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
>>+    cpumap = malloc(cpumap_size * size);
>>+    if(cpumap == NULL)
>>+        return pyxc_error_to_exception();
>>+
>>     if ( (cpulist != NULL) && PyList_Check(cpulist) )
>>     {
>>-        cpumap = 0ULL;
>>+        for ( i = 0; i < size; i++)
>>+        {
>>+            cpumap[i] = 0ULL;
>>+        }
>>         for ( i = 0; i < PyList_Size(cpulist); i++ )
>>         {
>>             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
>>-            if ( cpu >= 64 )
>>-            {
>>-                errno = EINVAL;
>>-                PyErr_SetFromErrno(xc_error_obj);
>>-                return NULL;
>>-            }
>>-            cpumap |= (uint64_t)1 << cpu;
>>+            cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu %
>>(cpumap_size * 8));
>>         }
>>     }
>>
>>-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
>>+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size *
>>cpumap_size) != 0 )
>>+    {
>>+        free(cpumap);
>>         return pyxc_error_to_exception();
>>-
>>+    }
>>     Py_INCREF(zero);
>>+    free(cpumap);
>>     return zero;
>> }
>>
>>@@ -365,7 +381,11 @@
>>     uint32_t dom, vcpu = 0;
>>     xc_vcpuinfo_t info;
>>     int rc, i;
>>-    uint64_t cpumap;
>>+    uint64_t *cpumap;
>>+    int nr_cpus, size;
>>+    xc_physinfo_t pinfo = { 0 };
>>+    xc_cpu_to_node_t map[1];
>>+    uint64_t cpumap_size = sizeof(*cpumap);
>>
>>     static char *kwd_list[] = { "domid", "vcpu", NULL };
>>
>>@@ -373,12 +393,25 @@
>>                                       &dom, &vcpu) )
>>         return NULL;
>>
>>+    set_xen_guest_handle(pinfo.cpu_to_node, map);
>>+    pinfo.max_cpu_id = 1;
>>+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
>>+        return pyxc_error_to_exception();
>>+    nr_cpus = pinfo.nr_cpus;
>>     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
>>     if ( rc < 0 )
>>         return pyxc_error_to_exception();
>>-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
>>+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
>>+
>>+    if((cpumap = malloc(cpumap_size * size)) == NULL)
>>+        return pyxc_error_to_exception();
>>+
>>+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
>>cpumap_size * size);
>>     if ( rc < 0 )
>>+    {
>>+        free(cpumap);
>>         return pyxc_error_to_exception();
>>+    }
>>
>>     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
>>                               "online",   info.online,
>>@@ -386,19 +419,19 @@
>>                               "running",  info.running,
>>                               "cpu_time", info.cpu_time,
>>                               "cpu",      info.cpu);
>>-
>>     cpulist = PyList_New(0);
>>-    for ( i = 0; cpumap != 0; i++ )
>>+    for ( i = 0; i < nr_cpus; i++ )
>>     {
>>-        if ( cpumap & 1 ) {
>>+        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
>>             PyObject *pyint = PyInt_FromLong(i);
>>             PyList_Append(cpulist, pyint);
>>             Py_DECREF(pyint);
>>         }
>>-        cpumap >>= 1;
>>+        cpumap[i / (cpumap_size * 8)] >>= 1;
>>     }
>>     PyDict_SetItemString(info_dict, "cpumap", cpulist);
>>     Py_DECREF(cpulist);
>>+    free(cpumap);
>>     return info_dict;
>> }
>>
>>  http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch
>>adjust_vcpuaffinity_more_cpu.patch
>>
>>
>>
>>--
>>View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-
>>affinity-to-more-than-64-cpus-tp27928229p27941371.html
>>Sent from the Xen - Dev mailing list archive at Nabble.com.
>>
>>
>>_______________________________________________
>>Xen-devel mailing list
>>Xen-devel@lists.xensource.com
>>http://lists.xensource.com/xen-devel
>
>
>-------------------------------text/plain-------------------------------
>_______________________________________________
>Xen-devel mailing list
>Xen-devel@lists.xensource.com
>http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [Patch] adjust the cpu-affinity to more than 64 cpus
  2010-03-31  6:29             ` Masaki Kanno
@ 2010-03-31  8:26               ` Liu, Jinsong
  0 siblings, 0 replies; 14+ messages in thread
From: Liu, Jinsong @ 2010-03-31  8:26 UTC (permalink / raw)
  To: Masaki Kanno, James Song, xen-devel

Yeah, now cpu affinity and pin are fine under all IA platform, with no 64 limit :)
Thanks James for original patch, and thanks Masaki confirm it!

Jinsong

Masaki Kanno wrote:
> Hi Jinsong,
> 
> Thank you for fixing the issue.
> I confirmed that your patch works on both ia32pae and ia64.
> 
> ia32pae:
>  # xm vcpu-list
>  Name                                ID  VCPU   CPU State   Time(s)
>  CPU Affinity Domain-0                             0     0     1  
>  r--      33.9 any cpu Domain-0                             0     1  
>  0   -b-      22.5 any cpu # xm vcpu-pin Domain-0 0 0
>  # xm vcpu-list
>  Name                                ID  VCPU   CPU State   Time(s)
>  CPU Affinity Domain-0                             0     0     0  
>  -b-      34.4 0 Domain-0                             0     1     1  
> r--      23.2 any cpu 
> 
> ia64:
>  # xm vcpu-list
>  Name                                ID  VCPU   CPU State   Time(s)
>  CPU Affinity Domain-0                             0     0     0  
>  r--      23.7 any cpu Domain-0                             0     1  
>  1   -b-      10.6 any cpu Domain-0                             0    
>  2     2   -b-       6.8 any cpu Domain-0                            
>  0     3     3   -b-       9.7 any cpu # xm vcpu-pin Domain-0 0 0
>  # xm vcpu-list
>  Name                                ID  VCPU   CPU State   Time(s)
>  CPU Affinity Domain-0                             0     0     0  
>  -b-      24.0 0 Domain-0                             0     1     1  
>  -b-      10.8 any cpu Domain-0                             0     2  
>  2   r--       6.8 any cpu Domain-0                             0    
> 3     3   -b-       9.8 any cpu 
> 
> Best regards,
>  Kan
> 
> Wed, 31 Mar 2010 02:23:47 +0800, "Liu, Jinsong" wrote:
> 
>> James and Masaki,
>> 
>> The issue Masaki said does exist, it appear under ia32pae.
>> A patch (c/s 21087) is to fix the bug.
>> 
>> Thanks,
>> Jinsong
>> 
>> ________________________________
>> From: xen-devel-bounces@lists.xensource.com
>> [mailto:xen-devel-bounces@lists .xensource.com] On Behalf Of James
>> Song 
>> Sent: Friday, March 19, 2010 5:10 PM
>> To: Masaki Kanno; xen-devel@lists.xensource.com
>> Subject: Re: [Xen-devel] [Patch] adjust the cpu-affinity to more
>> than 64 cpus 
>> 
>> I update upstream code to c/s 20145. This patch works fine and I
>> haven't meet this problem. 
>> 
>> linux-2j72:/home # xm vcpu-pin  0 1 0
>> linux-2j72:/home # xm vcpu-list 0
>> Name                                ID  VCPU   CPU State   Time(s)
>> CPU Affinity Domain-0                             0     0     0  
>> ---      23.4 0 Domain-0                             0     1     0  
>> r--      19.2 0 
>> 
>> 
>> B.T.W, there are something wrong when compile c/s 21046!
>> 
>> -James
>> 
>>>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>> Hi
>>>>> James, 
>> 
>> I tested xm vcpu-pin command with xen-unstable changeset 21044.
>> The command failed as follows.  Also Time(s) values by xm vcpu-list
>> command were strange.  The values were same in all VCPUs.
>> 
>> # xm vcpu-list
>> Name                                ID  VCPU   CPU State   Time(s)
>> CPU Affinity Domain-0                             0     0     1  
>> r--     116.5 any cpu Domain-0                             0     1  
>> 1   r--     116.5 any cpu # xm vcpu-pin Domain-0 0 0 Error: Cannot
>> pin vcpu: 0 to cpu: [0] - (22, 'Invalid argument') 
>> Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all>
>> 
>> Set which CPUs a VCPU can use.
>> # xm vcpu-list
>> Name                                ID  VCPU   CPU State   Time(s)
>> CPU Affinity Domain-0                             0     0     1  
>> r--     117.0 any cpu Domain-0                             0     1  
>> 1   r--     117.0 any cpu 
>> 
>> 
>> I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin
>> command again.  The command succeeded as follows.
>> 
>> # xm vcpu-list
>> Name                                ID  VCPU   CPU State   Time(s)
>> CPU Affinity Domain-0                             0     0     0  
>> r--      60.8 any cpu Domain-0                             0     1  
>> 1   -b-      42.8 any cpu # xm vcpu-pin Domain-0 0 0 # xm vcpu-list
>> Name                                ID  VCPU   CPU State   Time(s)
>> CPU Affinity Domain-0                             0     0     0  
>> r--      61.6 0 Domain-0                             0     1     1  
>> -b-      43.2 any cpu 
>> 
>> Best regards,
>> Kan
>> 
>> Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:
>> 
>>> 
>>> Keir, could you take a look at this issue.
>>> New Patch for this issue:
>>> Singed-off-by: James (Song Wei) <jsong@novell.com>
>>> 
>>> diff -r 8b269215464b tools/libxc/xc_domain.c
>>> --- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800
>>> +++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800 @@
>>> -105,23 +105,28 @@ int xc_vcpu_setaffinity(int xc_handle,
>>>                         uint32_t domid,
>>>                         int vcpu,
>>> -                        uint64_t cpumap)
>>> +                        uint64_t *cpumap, int cpusize) {
>>>     DECLARE_DOMCTL;
>>>     int ret = -1;
>>> -    uint8_t local[sizeof (cpumap)];
>>> +    uint8_t *local = malloc(cpusize);
>>> 
>>> +    if(local == NULL)
>>> +    {
>>> +        PERROR("Could not alloc memory for Xen hypercall"); +     
>>> goto out; +    }
>>>     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
>>>     domctl.domain = (domid_t)domid;
>>>     domctl.u.vcpuaffinity.vcpu    = vcpu;
>>> 
>>> -    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
>>> +    bitmap_64_to_byte(local, cpumap, cpusize * 8);
>>> 
>>>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap,
>>> local); 
>>> 
>>> -    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
>>> +    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>>> 
>>> -    if ( lock_pages(local, sizeof(local)) != 0 )
>>> +    if ( lock_pages(local, cpusize) != 0 )
>>>     {
>>>         PERROR("Could not lock memory for Xen hypercall");        
>>> goto out; @@ -129,9 +134,10 @@
>>> 
>>>     ret = do_domctl(xc_handle, &domctl);
>>> 
>>> -    unlock_pages(local, sizeof(local));
>>> +    unlock_pages(local, cpusize);
>>> 
>>>  out:
>>> +    free(local);
>>>     return ret;
>>> }
>>> 
>>> @@ -139,18 +145,25 @@
>>> int xc_vcpu_getaffinity(int xc_handle,
>>>                         uint32_t domid,
>>>                         int vcpu,
>>> -                        uint64_t *cpumap)
>>> +                        uint64_t *cpumap, int cpusize) {
>>>     DECLARE_DOMCTL;
>>>     int ret = -1;
>>> -    uint8_t local[sizeof (cpumap)];
>>> +    uint8_t * local = malloc(cpusize);
>>> +
>>> +    if(local == NULL)
>>> +    {
>>> +        PERROR("Could not alloc memory for Xen hypercall"); +     
>>> goto out; +    }
>>> 
>>>     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
>>>     domctl.domain = (domid_t)domid;
>>>     domctl.u.vcpuaffinity.vcpu = vcpu;
>>> 
>>> +
>>>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap,
>>> local); -    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap)
>>> * 8; +    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
>>> 
>>>     if ( lock_pages(local, sizeof(local)) != 0 )
>>>     {
>>> @@ -161,8 +174,9 @@
>>>     ret = do_domctl(xc_handle, &domctl);
>>> 
>>>     unlock_pages(local, sizeof (local));
>>> -    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
>>> - out:
>>> +    bitmap_byte_to_64(cpumap, local, cpusize * 8); +out:
>>> +    free(local);
>>>     return ret;
>>> }
>>> 
>>> diff -r 8b269215464b tools/libxc/xenctrl.h
>>> --- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800
>>> +++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800 @@
>>> -310,11 +310,13 @@ int xc_vcpu_setaffinity(int xc_handle,
>>>                         uint32_t domid,
>>>                         int vcpu,
>>> -                        uint64_t cpumap);
>>> +                        uint64_t *cpumap,
>>> +                        int cpusize);
>>> int xc_vcpu_getaffinity(int xc_handle,
>>>                         uint32_t domid,
>>>                         int vcpu,
>>> -                        uint64_t *cpumap);
>>> +                        uint64_t *cpumap,
>>> +                        int cpusize);
>>> 
>>> /**
>>>  * This function will return information about one or more domains.
>>> It is diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c
>>> --- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010
>>> +0800 +++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55
>>> 2010 +0800 @@ -217,8 +217,12 @@ {
>>>     uint32_t dom;
>>>     int vcpu = 0, i;
>>> -    uint64_t  cpumap = ~0ULL;
>>> +    uint64_t  *cpumap;
>>>     PyObject *cpulist = NULL;
>>> +    int nr_cpus, size;
>>> +    xc_physinfo_t info;
>>> +    xc_cpu_to_node_t map[1];
>>> +    uint64_t cpumap_size = sizeof(*cpumap);
>>> 
>>>     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
>>> 
>>> @@ -226,26 +230,38 @@
>>>                                       &dom, &vcpu, &cpulist) )     
>>> return NULL; 
>>> 
>>> +    set_xen_guest_handle(info.cpu_to_node, map);
>>> +    info.max_cpu_id = 1;
>>> +    if ( xc_physinfo(self->xc_handle, &info) != 0 )
>>> +        return pyxc_error_to_exception();
>>> +
>>> +    nr_cpus = info.nr_cpus;
>>> +
>>> +    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
>>> +    cpumap = malloc(cpumap_size * size);
>>> +    if(cpumap == NULL)
>>> +        return pyxc_error_to_exception();
>>> +
>>>     if ( (cpulist != NULL) && PyList_Check(cpulist) )     {
>>> -        cpumap = 0ULL;
>>> +        for ( i = 0; i < size; i++)
>>> +        {
>>> +            cpumap[i] = 0ULL;
>>> +        }
>>>         for ( i = 0; i < PyList_Size(cpulist); i++ )         {
>>>             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
>>> -            if ( cpu >= 64 )
>>> -            {
>>> -                errno = EINVAL;
>>> -                PyErr_SetFromErrno(xc_error_obj);
>>> -                return NULL;
>>> -            }
>>> -            cpumap |= (uint64_t)1 << cpu;
>>> +            cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu
>>>         % (cpumap_size * 8)); }
>>>     }
>>> 
>>> -    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap)
>>> != 0 ) +    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu,
>>> cpumap, size * cpumap_size) != 0 ) +    {
>>> +        free(cpumap);
>>>         return pyxc_error_to_exception();
>>> -
>>> +    }
>>>     Py_INCREF(zero);
>>> +    free(cpumap);
>>>     return zero;
>>> }
>>> 
>>> @@ -365,7 +381,11 @@
>>>     uint32_t dom, vcpu = 0;
>>>     xc_vcpuinfo_t info;
>>>     int rc, i;
>>> -    uint64_t cpumap;
>>> +    uint64_t *cpumap;
>>> +    int nr_cpus, size;
>>> +    xc_physinfo_t pinfo = { 0 };
>>> +    xc_cpu_to_node_t map[1];
>>> +    uint64_t cpumap_size = sizeof(*cpumap);
>>> 
>>>     static char *kwd_list[] = { "domid", "vcpu", NULL };
>>> 
>>> @@ -373,12 +393,25 @@
>>>                                       &dom, &vcpu) )         return
>>> NULL; 
>>> 
>>> +    set_xen_guest_handle(pinfo.cpu_to_node, map);
>>> +    pinfo.max_cpu_id = 1;
>>> +    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
>>> +        return pyxc_error_to_exception();
>>> +    nr_cpus = pinfo.nr_cpus;
>>>     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);     if
>>>         ( rc < 0 ) return pyxc_error_to_exception();
>>> -    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
>>> +    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); +
>>> +    if((cpumap = malloc(cpumap_size * size)) == NULL)
>>> +        return pyxc_error_to_exception();
>>> +
>>> +    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
>>>     cpumap_size * size); if ( rc < 0 )
>>> +    {
>>> +        free(cpumap);
>>>         return pyxc_error_to_exception();
>>> +    }
>>> 
>>>     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
>>>                               "online",   info.online, @@ -386,19
>>>                               +419,19 @@ "running",  info.running,
>>>                               "cpu_time", info.cpu_time,
>>>                               "cpu",      info.cpu); -
>>>     cpulist = PyList_New(0);
>>> -    for ( i = 0; cpumap != 0; i++ )
>>> +    for ( i = 0; i < nr_cpus; i++ )
>>>     {
>>> -        if ( cpumap & 1 ) {
>>> +        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
>>>             PyObject *pyint = PyInt_FromLong(i);
>>>             PyList_Append(cpulist, pyint);
>>>             Py_DECREF(pyint);
>>>         }
>>> -        cpumap >>= 1;
>>> +        cpumap[i / (cpumap_size * 8)] >>= 1;
>>>     }
>>>     PyDict_SetItemString(info_dict, "cpumap", cpulist);    
>>> Py_DECREF(cpulist); +    free(cpumap);
>>>     return info_dict;
>>> }
>>> 
>>>  http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch
>>> adjust_vcpuaffinity_more_cpu.patch
>>> 
>>> 
>>> 
>>> --
>>> View this message in context:
>>> http://old.nabble.com/-Patch--adjust-the-cpu-
>>> affinity-to-more-than-64-cpus-tp27928229p27941371.html 
>>> Sent from the Xen - Dev mailing list archive at Nabble.com.
>>> 
>>> 
>>> _______________________________________________
>>> Xen-devel mailing list
>>> Xen-devel@lists.xensource.com
>>> http://lists.xensource.com/xen-devel
>> 
>> 
>> -------------------------------text/plain-------------------------------
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@lists.xensource.com
>> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2010-03-31  8:26 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-03-17  8:56 [Patch] adjust the cpu-affinity to more than 64 cpus James (song wei)
2010-03-17  9:25 ` Jan Beulich
2010-03-18  2:26   ` James (song wei)
2010-03-18  3:41     ` James (song wei)
2010-03-19  3:14       ` Masaki Kanno
2010-03-19  9:09         ` James Song
2010-03-19  9:39           ` issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus) Jan Beulich
2010-03-19 10:28           ` [Patch] adjust the cpu-affinity to more than64 cpus Masaki Kanno
2010-03-19 10:47           ` issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus) James Song
2010-03-19 11:10           ` Jan Beulich
2010-03-30 18:23           ` [Patch] adjust the cpu-affinity to more than 64 cpus Liu, Jinsong
2010-03-31  2:12             ` James Song
2010-03-31  6:29             ` Masaki Kanno
2010-03-31  8:26               ` Liu, Jinsong

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.