All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4 ]libxl: allow to set more than 31 vcpus
@ 2012-06-28  3:24 Zhang, Yang Z
  2012-06-28 16:52 ` Ian Jackson
  0 siblings, 1 reply; 2+ messages in thread
From: Zhang, Yang Z @ 2012-06-28  3:24 UTC (permalink / raw)
  To: xen-devel; +Cc: Ian Campbell

Change from v3:
Rebased on latest head
Add cpu limit check, the max vcpus of guest is 128.
According Ian's comments, modified some codes to make the logic more reasonable.
Except the following one:
> +        while (l-- > 0)
> +            libxl_cpumap_set((&b_info->avail_vcpus), l);
Ian: This while loop is == libxl_cpumap_set_any.
yang: No, it's different. libxl_cpumap_set_any() will set all bits with the granularity of byte. This may wrong when calling libxl_cpumap_count_set.

Change from v2:
Add function libxl_cpumap_to_hex_string to covert cpumap to hex string.
According to Ian's comments, modified some codes to make the logic more reasonable.

In current implementation, it uses integer to record current avail cpus and this only allows user to specify 31 vcpus. 
In following patch, it uses cpumap instead integer which make more sense than before. Also there is no limit to the max vcpus.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>

diff -r 079fce9e5557 tools/libxl/libxl_create.c
--- a/tools/libxl/libxl_create.c        Wed Jun 27 10:38:16 2012 +0800
+++ b/tools/libxl/libxl_create.c        Thu Jun 28 10:25:33 2012 +0800
@@ -22,6 +22,7 @@

 #include <xc_dom.h>
 #include <xenguest.h>
+#include <xen/hvm/hvm_info_table.h>

 void libxl_domain_config_init(libxl_domain_config *d_config)
 {
@@ -201,8 +202,12 @@ int libxl__domain_build_info_setdefault(

     if (!b_info->max_vcpus)
         b_info->max_vcpus = 1;
-    if (!b_info->cur_vcpus)
-        b_info->cur_vcpus = 1;
+    if (!b_info->avail_vcpus.size) {
+        if (libxl_cpumap_alloc(CTX, &b_info->avail_vcpus, 1))
+            return ERROR_FAIL;
+        libxl_cpumap_set(&b_info->avail_vcpus, 0);
+    } else if(b_info->avail_vcpus.size > HVM_MAX_VCPUS)
+        return ERROR_FAIL;

     if (!b_info->cpumap.size) {
         if (libxl_cpumap_alloc(CTX, &b_info->cpumap, 0))
diff -r 079fce9e5557 tools/libxl/libxl_dm.c
--- a/tools/libxl/libxl_dm.c    Wed Jun 27 10:38:16 2012 +0800
+++ b/tools/libxl/libxl_dm.c    Thu Jun 28 10:25:33 2012 +0800
@@ -160,6 +160,8 @@ static char ** libxl__build_device_model
     }
     if (b_info->type == LIBXL_DOMAIN_TYPE_HVM) {
         int ioemu_vifs = 0;
+        int nr_set_cpus = 0;
+        char *s;

         if (b_info->u.hvm.serial) {
             flexarray_vappend(dm_args, "-serial", b_info->u.hvm.serial, NULL);
@@ -200,11 +202,13 @@ static char ** libxl__build_device_model
                               libxl__sprintf(gc, "%d", b_info->max_vcpus),
                               NULL);
         }
-        if (b_info->cur_vcpus) {
-            flexarray_vappend(dm_args, "-vcpu_avail",
-                              libxl__sprintf(gc, "0x%x", b_info->cur_vcpus),
-                              NULL);
-        }
+
+        nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus);
+        s = libxl_cpumap_to_hex_string(&b_info->avail_vcpus);
+        flexarray_vappend(dm_args, "-vcpu_avail",
+                              libxl__sprintf(gc, "%s", s), NULL);
+        free(s);
+
         for (i = 0; i < num_vifs; i++) {
             if (vifs[i].nictype == LIBXL_NIC_TYPE_IOEMU) {
                 char *smac = libxl__sprintf(gc,
@@ -443,11 +447,14 @@ static char ** libxl__build_device_model
         }
         if (b_info->max_vcpus > 1) {
             flexarray_append(dm_args, "-smp");
-            if (b_info->cur_vcpus)
+            if (b_info->avail_vcpus.size) {
+                int nr_set_cpus = 0;
+                nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus);
+
                 flexarray_append(dm_args, libxl__sprintf(gc, "%d,maxcpus=%d",
                                                          b_info->max_vcpus,
-                                                         b_info->cur_vcpus));
-            else
+                                                         nr_set_cpus));
+            } else
                 flexarray_append(dm_args, libxl__sprintf(gc, "%d",
                                                          b_info->max_vcpus));
         }
diff -r 079fce9e5557 tools/libxl/libxl_dom.c
--- a/tools/libxl/libxl_dom.c   Wed Jun 27 10:38:16 2012 +0800
+++ b/tools/libxl/libxl_dom.c   Thu Jun 28 10:25:33 2012 +0800
@@ -199,8 +199,8 @@ int libxl__build_post(libxl__gc *gc, uin
     ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn);
     for (i = 0; i < info->max_vcpus; i++) {
         ents[12+(i*2)]   = libxl__sprintf(gc, "cpu/%d/availability", i);
-        ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 << i)))
-                            ? "offline" : "online";
+        ents[12+(i*2)+1] = libxl_cpumap_test(&info->avail_vcpus, i)
+                            ? "online" : "offline";
     }

     hvm_ents = NULL;
@@ -354,7 +354,7 @@ static int hvm_build_set_params(xc_inter
     va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET);
     va_hvm->apic_mode = libxl_defbool_val(info->u.hvm.apic);
     va_hvm->nr_vcpus = info->max_vcpus;
-    memcpy(va_hvm->vcpu_online, &info->cur_vcpus, sizeof(info->cur_vcpus));
+    memcpy(va_hvm->vcpu_online, info->avail_vcpus.map, info->avail_vcpus.size);
     for (i = 0, sum = 0; i < va_hvm->length; i++)
         sum += ((uint8_t *) va_hvm)[i];
     va_hvm->checksum -= sum;
diff -r 079fce9e5557 tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl       Wed Jun 27 10:38:16 2012 +0800
+++ b/tools/libxl/libxl_types.idl       Thu Jun 28 10:25:33 2012 +0800
@@ -237,7 +237,7 @@ libxl_domain_sched_params = Struct("doma

 libxl_domain_build_info = Struct("domain_build_info",[
     ("max_vcpus",       integer),
-    ("cur_vcpus",       integer),
+    ("avail_vcpus",     libxl_cpumap),
     ("cpumap",          libxl_cpumap),
     ("tsc_mode",        libxl_tsc_mode),
     ("max_memkb",       MemKB),
diff -r 079fce9e5557 tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c Wed Jun 27 10:38:16 2012 +0800
+++ b/tools/libxl/libxl_utils.c Thu Jun 28 10:25:33 2012 +0800
@@ -511,7 +511,7 @@ void libxl_cpumap_dispose(libxl_cpumap *
     free(map->map);
 }

-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu)
+int libxl_cpumap_test(const libxl_cpumap *cpumap, int cpu)
 {
     if (cpu >= cpumap->size * 8)
         return 0;
@@ -532,6 +532,31 @@ void libxl_cpumap_reset(libxl_cpumap *cp
     cpumap->map[cpu / 8] &= ~(1 << (cpu & 7));
 }

+int libxl_cpumap_count_set(const libxl_cpumap *cpumap)
+{
+    int i, nr_set_cpus = 0;
+    libxl_for_each_set_cpu(i, *cpumap)
+        nr_set_cpus++;
+
+    return nr_set_cpus;
+}
+
+/* NB. caller is responsible for freeing the memory */
+char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap)
+{
+    int i = cpumap->size;
+    char *p = libxl__zalloc(NULL, cpumap->size * 2 + 3);
+    char *q = p;
+    strncpy(p, "0x", 2);
+    p += 2;
+    while(--i >= 0) {
+        sprintf(p, "%02x", cpumap->map[i]);
+        p += 2;
+    }
+    *p = '\0';
+    return q;
+}
+
 int libxl_get_max_cpus(libxl_ctx *ctx)
 {
     return xc_get_max_cpus(ctx->xch);
diff -r 079fce9e5557 tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h Wed Jun 27 10:38:16 2012 +0800
+++ b/tools/libxl/libxl_utils.h Thu Jun 28 10:25:33 2012 +0800
@@ -64,9 +64,11 @@ int libxl_vdev_to_device_disk(libxl_ctx
                                libxl_device_disk *disk);

 int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap, int max_cpus);
-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu);
+int libxl_cpumap_test(const libxl_cpumap *cpumap, int cpu);
 void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu);
 void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu);
+int libxl_cpumap_count_set(const libxl_cpumap *cpumap);
+char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap);
 static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap)
 {
     memset(cpumap->map, -1, cpumap->size);
diff -r 079fce9e5557 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c  Wed Jun 27 10:38:16 2012 +0800
+++ b/tools/libxl/xl_cmdimpl.c  Thu Jun 28 10:25:33 2012 +0800
@@ -647,7 +647,14 @@ static void parse_config_data(const char

     if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) {
         b_info->max_vcpus = l;
-        b_info->cur_vcpus = (1 << l) - 1;
+
+        if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) {
+            fprintf(stderr, "Unable to allocate cpumap\n");
+            exit(1);
+        }
+        libxl_cpumap_set_none(&b_info->avail_vcpus);
+        while (l-- > 0)
+            libxl_cpumap_set((&b_info->avail_vcpus), l);
     }

     if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH v4 ]libxl: allow to set more than 31 vcpus
  2012-06-28  3:24 [PATCH v4 ]libxl: allow to set more than 31 vcpus Zhang, Yang Z
@ 2012-06-28 16:52 ` Ian Jackson
  0 siblings, 0 replies; 2+ messages in thread
From: Ian Jackson @ 2012-06-28 16:52 UTC (permalink / raw)
  To: Zhang, Yang Z; +Cc: xen-devel, Ian Campbell

Zhang, Yang Z writes ("[Xen-devel] [PATCH v4 ]libxl: allow to set more than 31 vcpus"):
> In current implementation, it uses integer to record current avail cpus and this only allows user to specify 31 vcpus. 
> In following patch, it uses cpumap instead integer which make more sense than before. Also there is no limit to the max vcpus.
> 
> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>

Thanks,

Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>

I also fixed one trivial style error (a missing space).

Ian.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2012-06-28 16:52 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-06-28  3:24 [PATCH v4 ]libxl: allow to set more than 31 vcpus Zhang, Yang Z
2012-06-28 16:52 ` Ian Jackson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.