xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Anthony PERARD <anthony.perard@citrix.com>
To: <xen-devel@lists.xenproject.org>
Cc: Anthony PERARD <anthony.perard@citrix.com>,
	Jason Andryuk <jandryuk@gmail.com>,
	Ian Jackson <iwj@xenproject.org>, Wei Liu <wl@xen.org>
Subject: [XEN PATCH v2 1/8] libxl: Replace deprecated QMP command by "query-cpus-fast"
Date: Tue, 11 May 2021 10:28:03 +0100	[thread overview]
Message-ID: <20210511092810.13759-2-anthony.perard@citrix.com> (raw)
In-Reply-To: <20210511092810.13759-1-anthony.perard@citrix.com>

We use the deprecated QMP command "query-cpus" which is removed in the
QEMU 6.0 release. There's a replacement which is "query-cpus-fast",
and have been available since QEMU 2.12 (April 2018).

This patch try the new command first and when the command isn't
available, it fall back to the deprecated one so libxl still works
with older QEMU versions.

Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
Reviewed-by: Jason Andryuk <jandryuk@gmail.com>
---

Notes:
    This is v2 of '[XEN PATCH for-4.15] libxl: Replace deprecated QMP
    command by "query-cpus-fast"' as the patch never made it into the
    release.
    
    changes:
    - introduce a fallback for when the new command isn't available.

 tools/libs/light/libxl_domain.c | 103 ++++++++++++++++++++++++++++++--
 1 file changed, 98 insertions(+), 5 deletions(-)

diff --git a/tools/libs/light/libxl_domain.c b/tools/libs/light/libxl_domain.c
index 5d4ec9071160..8c003aa7cb04 100644
--- a/tools/libs/light/libxl_domain.c
+++ b/tools/libs/light/libxl_domain.c
@@ -1740,6 +1740,35 @@ static int libxl__set_vcpuonline_xenstore(libxl__gc *gc, uint32_t domid,
     return rc;
 }
 
+static int qmp_parse_query_cpus_fast(libxl__gc *gc,
+                                     libxl_domid domid,
+                                     const libxl__json_object *response,
+                                     libxl_bitmap *const map)
+{
+    int i;
+    const libxl__json_object *cpu;
+
+    libxl_bitmap_set_none(map);
+    /* Parse response to QMP command "query-cpus-fast":
+     * [ { 'cpu-index': 'int',...} ]
+     */
+    for (i = 0; (cpu = libxl__json_array_get(response, i)); i++) {
+        unsigned int cpu_index;
+        const libxl__json_object *o;
+
+        o = libxl__json_map_get("cpu-index", cpu, JSON_INTEGER);
+        if (!o) {
+            LOGD(ERROR, domid, "Failed to retrieve CPU index.");
+            return ERROR_QEMU_API;
+        }
+
+        cpu_index = libxl__json_object_get_integer(o);
+        libxl_bitmap_set(map, cpu_index);
+    }
+
+    return 0;
+}
+
 static int qmp_parse_query_cpus(libxl__gc *gc,
                                 libxl_domid domid,
                                 const libxl__json_object *response,
@@ -1778,8 +1807,13 @@ typedef struct set_vcpuonline_state {
     int index; /* for loop on final_map */
 } set_vcpuonline_state;
 
+static void set_vcpuonline_qmp_cpus_fast_queried(libxl__egc *,
+    libxl__ev_qmp *, const libxl__json_object *, int rc);
 static void set_vcpuonline_qmp_cpus_queried(libxl__egc *,
     libxl__ev_qmp *, const libxl__json_object *, int rc);
+static void set_vcpuonline_qmp_query_cpus_parse(libxl__egc *,
+    libxl__ev_qmp *qmp, const libxl__json_object *,
+    bool query_cpus_fast, int rc);
 static void set_vcpuonline_qmp_add_cpu(libxl__egc *,
     libxl__ev_qmp *, const libxl__json_object *response, int rc);
 static void set_vcpuonline_timeout(libxl__egc *egc,
@@ -1840,8 +1874,8 @@ int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid,
                                              set_vcpuonline_timeout,
                                              LIBXL_QMP_CMD_TIMEOUT * 1000);
             if (rc) goto out;
-            qmp->callback = set_vcpuonline_qmp_cpus_queried;
-            rc = libxl__ev_qmp_send(egc, qmp, "query-cpus", NULL);
+            qmp->callback = set_vcpuonline_qmp_cpus_fast_queried;
+            rc = libxl__ev_qmp_send(egc, qmp, "query-cpus-fast", NULL);
             if (rc) goto out;
             return AO_INPROGRESS;
         default:
@@ -1860,11 +1894,39 @@ int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid,
     return AO_INPROGRESS;
 }
 
+static void set_vcpuonline_qmp_cpus_fast_queried(libxl__egc *egc,
+    libxl__ev_qmp *qmp, const libxl__json_object *response, int rc)
+{
+    EGC_GC;
+    set_vcpuonline_state *svos = CONTAINER_OF(qmp, *svos, qmp);
+
+    if (rc == ERROR_QMP_COMMAND_NOT_FOUND) {
+        /* Try again, we probably talking to a QEMU older than 2.12 */
+        qmp->callback = set_vcpuonline_qmp_cpus_queried;
+        rc = libxl__ev_qmp_send(egc, qmp, "query-cpus", NULL);
+        if (rc) goto out;
+        return;
+    }
+
+out:
+    set_vcpuonline_qmp_query_cpus_parse(egc, qmp, response, true, rc);
+}
+
 static void set_vcpuonline_qmp_cpus_queried(libxl__egc *egc,
     libxl__ev_qmp *qmp, const libxl__json_object *response, int rc)
 {
     EGC_GC;
     set_vcpuonline_state *svos = CONTAINER_OF(qmp, *svos, qmp);
+
+    set_vcpuonline_qmp_query_cpus_parse(egc, qmp, response, false, rc);
+}
+
+static void set_vcpuonline_qmp_query_cpus_parse(libxl__egc *egc,
+    libxl__ev_qmp *qmp, const libxl__json_object *response,
+    bool query_cpus_fast, int rc)
+{
+    EGC_GC;
+    set_vcpuonline_state *svos = CONTAINER_OF(qmp, *svos, qmp);
     int i;
     libxl_bitmap current_map;
 
@@ -1876,7 +1938,11 @@ static void set_vcpuonline_qmp_cpus_queried(libxl__egc *egc,
     if (rc) goto out;
 
     libxl_bitmap_alloc(CTX, &current_map, svos->info.vcpu_max_id + 1);
-    rc = qmp_parse_query_cpus(gc, qmp->domid, response, &current_map);
+    if (query_cpus_fast) {
+        rc = qmp_parse_query_cpus_fast(gc, qmp->domid, response, &current_map);
+    } else {
+        rc = qmp_parse_query_cpus(gc, qmp->domid, response, &current_map);
+    }
     if (rc) goto out;
 
     libxl_bitmap_copy_alloc(CTX, final_map, svos->cpumap);
@@ -2121,6 +2187,9 @@ typedef struct {
 
 static void retrieve_domain_configuration_lock_acquired(
     libxl__egc *egc, libxl__ev_slowlock *, int rc);
+static void retrieve_domain_configuration_cpu_fast_queried(
+    libxl__egc *egc, libxl__ev_qmp *qmp,
+    const libxl__json_object *response, int rc);
 static void retrieve_domain_configuration_cpu_queried(
     libxl__egc *egc, libxl__ev_qmp *qmp,
     const libxl__json_object *response, int rc);
@@ -2198,8 +2267,8 @@ static void retrieve_domain_configuration_lock_acquired(
         if (rc) goto out;
         libxl_bitmap_alloc(CTX, &rdcs->qemuu_cpus,
                            d_config->b_info.max_vcpus);
-        rdcs->qmp.callback = retrieve_domain_configuration_cpu_queried;
-        rc = libxl__ev_qmp_send(egc, &rdcs->qmp, "query-cpus", NULL);
+        rdcs->qmp.callback = retrieve_domain_configuration_cpu_fast_queried;
+        rc = libxl__ev_qmp_send(egc, &rdcs->qmp, "query-cpus-fast", NULL);
         if (rc) goto out;
         has_callback = true;
     }
@@ -2210,6 +2279,30 @@ static void retrieve_domain_configuration_lock_acquired(
         retrieve_domain_configuration_end(egc, rdcs, rc);
 }
 
+static void retrieve_domain_configuration_cpu_fast_queried(
+    libxl__egc *egc, libxl__ev_qmp *qmp,
+    const libxl__json_object *response, int rc)
+{
+    EGC_GC;
+    retrieve_domain_configuration_state *rdcs =
+        CONTAINER_OF(qmp, *rdcs, qmp);
+
+    if (rc == ERROR_QMP_COMMAND_NOT_FOUND) {
+        /* Try again, we probably talking to a QEMU older than 2.12 */
+        rdcs->qmp.callback = retrieve_domain_configuration_cpu_queried;
+        rc = libxl__ev_qmp_send(egc, &rdcs->qmp, "query-cpus", NULL);
+        if (rc) goto out;
+        return;
+    }
+
+    if (rc) goto out;
+
+    rc = qmp_parse_query_cpus_fast(gc, qmp->domid, response, &rdcs->qemuu_cpus);
+
+out:
+    retrieve_domain_configuration_end(egc, rdcs, rc);
+}
+
 static void retrieve_domain_configuration_cpu_queried(
     libxl__egc *egc, libxl__ev_qmp *qmp,
     const libxl__json_object *response, int rc)
-- 
Anthony PERARD



  reply	other threads:[~2021-05-11  9:28 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-11  9:28 [XEN PATCH v2 0/8] Fix libxl with QEMU 6.0 + remove some more deprecated usages Anthony PERARD
2021-05-11  9:28 ` Anthony PERARD [this message]
2021-05-11  9:28 ` [XEN PATCH v2 2/8] libxl: Replace QEMU's command line short-form boolean option Anthony PERARD
2021-05-11  9:28 ` [XEN PATCH v2 3/8] libxl: Replace deprecated "cpu-add" QMP command by "device_add" Anthony PERARD
2021-05-11  9:28 ` [XEN PATCH v2 4/8] libxl: Use -device for cd-rom drives Anthony PERARD
2021-05-11  9:28 ` [XEN PATCH v2 5/8] libxl: Assert qmp_ev's state in qmp_ev_qemu_compare_version Anthony PERARD
2021-05-11  9:28 ` [XEN PATCH v2 6/8] libxl: Export libxl__qmp_ev_qemu_compare_version Anthony PERARD
2021-05-11  9:28 ` [XEN PATCH v2 7/8] libxl: Use `id` with the "eject" QMP command Anthony PERARD
2021-05-11  9:28 ` [XEN PATCH v2 8/8] libxl: Replace QMP command "change" by "blockdev-change-media" Anthony PERARD
2021-06-15 13:02 ` [XEN PATCH v2 0/8] Fix libxl with QEMU 6.0 + remove some more deprecated usages Julien Grall
2021-06-29  7:59 ` Olaf Hering

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210511092810.13759-2-anthony.perard@citrix.com \
    --to=anthony.perard@citrix.com \
    --cc=iwj@xenproject.org \
    --cc=jandryuk@gmail.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).