All of lore.kernel.org
 help / color / mirror / Atom feed
From: Haozhong Zhang <haozhong.zhang@intel.com>
To: xen-devel@lists.xen.org
Cc: Konrad Rzeszutek Wilk <konrad@darnok.org>,
	Dan Williams <dan.j.williams@intel.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Wei Liu <wei.liu2@citrix.com>,
	Haozhong Zhang <haozhong.zhang@intel.com>
Subject: [RFC XEN PATCH v2 14/15] tools/libxl: initiate pmem mapping via qmp callback
Date: Mon, 20 Mar 2017 08:09:48 +0800	[thread overview]
Message-ID: <20170320000949.24675-15-haozhong.zhang@intel.com> (raw)
In-Reply-To: <20170320000949.24675-1-haozhong.zhang@intel.com>

Get the backend device, the guest SPA and the size of each vNVDIMM
device via QMP commands "query-memory-device devtype=nvdimm" and
"qom-get", and pass them to libxl to map each backend device to guest.

Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
---
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>

Changes in v2:
 * Fail the domain creation if QMP initialization for NVDIMM fails.
   Other failures in QMP initialization do not fail the domain creation
   as before.
---
 tools/libxl/libxl_create.c |   4 +-
 tools/libxl/libxl_qmp.c    | 116 ++++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 117 insertions(+), 3 deletions(-)

diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index e741b9a39a..b8c867d0fa 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -1510,7 +1510,9 @@ static void domcreate_devmodel_started(libxl__egc *egc,
     if (dcs->sdss.dm.guest_domid) {
         if (d_config->b_info.device_model_version
             == LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN) {
-            libxl__qmp_initializations(gc, domid, d_config);
+            ret = libxl__qmp_initializations(gc, domid, d_config);
+            if (ret == ERROR_BADFAIL)
+                goto error_out;
         }
     }
 
diff --git a/tools/libxl/libxl_qmp.c b/tools/libxl/libxl_qmp.c
index a91643a4f9..244d4bee5a 100644
--- a/tools/libxl/libxl_qmp.c
+++ b/tools/libxl/libxl_qmp.c
@@ -26,6 +26,7 @@
 
 #include "_libxl_list.h"
 #include "libxl_internal.h"
+#include "libxl_nvdimm.h"
 
 /* #define DEBUG_RECEIVED */
 
@@ -1146,6 +1147,111 @@ out:
     return rc;
 }
 
+static int qmp_nvdimm_get_mempath(libxl__qmp_handler *qmp,
+                                  const libxl__json_object *o,
+                                  void *opaque)
+{
+    const char **output = opaque;
+    const char *mem_path;
+    int rc = 0;
+    GC_INIT(qmp->ctx);
+
+    if (!o) {
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
+    mem_path = libxl__json_object_get_string(o);
+    if (!mem_path) {
+        rc = ERROR_FAIL;
+        goto out;
+    }
+    *output = libxl__strdup(NOGC, mem_path);
+
+ out:
+    GC_FREE;
+    return 0;
+}
+
+static int qmp_register_nvdimm_callback(libxl__qmp_handler *qmp,
+                                        const libxl__json_object *o,
+                                        void *unused)
+{
+    GC_INIT(qmp->ctx);
+    const libxl__json_object *obj, *sub_obj, *sub_map;
+    libxl__json_object *args = NULL;
+    unsigned int i = 0;
+    const char *mem_path = NULL, *memdev;
+    uint64_t slot, spa, size;
+    int rc = 0;
+
+    for (i = 0; (obj = libxl__json_array_get(o, i)); i++) {
+        if (!libxl__json_object_is_map(obj))
+            continue;
+
+        sub_map = libxl__json_map_get("data", obj, JSON_MAP);
+        if (!sub_map)
+            continue;
+
+        sub_obj = libxl__json_map_get("slot", sub_map, JSON_INTEGER);
+        slot = libxl__json_object_get_integer(sub_obj);
+
+        sub_obj = libxl__json_map_get("memdev", sub_map, JSON_STRING);
+        memdev = libxl__json_object_get_string(sub_obj);
+        if (!memdev) {
+            LOG(ERROR, "Cannot get backend memdev of NVDIMM #%" PRId64, slot);
+            rc = ERROR_FAIL;
+            goto out;
+        }
+        qmp_parameters_add_string(gc, &args, "path", memdev);
+        qmp_parameters_add_string(gc, &args, "property", "mem-path");
+        rc = qmp_synchronous_send(qmp, "qom-get", args, qmp_nvdimm_get_mempath,
+                                  &mem_path, qmp->timeout);
+        if (rc) {
+            LOG(ERROR, "Cannot get the backend device of NVDIMM #%" PRId64, slot);
+            goto out;
+        }
+
+        sub_obj = libxl__json_map_get("addr", sub_map, JSON_INTEGER);
+        spa = libxl__json_object_get_integer(sub_obj);
+
+        sub_obj = libxl__json_map_get("size", sub_map, JSON_INTEGER);
+        size = libxl__json_object_get_integer(sub_obj);
+
+        LOG(DEBUG,
+            "vNVDIMM #%" PRId64 ": %s, spa 0x%" PRIx64 ", size 0x%" PRIx64,
+            slot, mem_path, spa, size);
+
+        rc = libxl_nvdimm_add_device(gc, qmp->domid, mem_path, spa, size);
+        if (rc) {
+            LOG(ERROR,
+                "Failed to add NVDIMM #%" PRId64
+                "(mem_path %s, spa 0x%" PRIx64 ", size 0x%" PRIx64 ") "
+                "to domain %d (err = %d)",
+                slot, mem_path, spa, size, qmp->domid, rc);
+            goto out;
+        }
+    }
+
+ out:
+    GC_FREE;
+    return rc;
+}
+
+static int libxl__qmp_query_nvdimms(libxl__qmp_handler *qmp)
+{
+    libxl__json_object *args = NULL;
+    int rc;
+    GC_INIT(qmp->ctx);
+
+    qmp_parameters_add_string(gc, &args, "devtype", "nvdimm");
+    rc = qmp_synchronous_send(qmp, "query-memory-devices", args,
+                              qmp_register_nvdimm_callback, NULL, qmp->timeout);
+
+    GC_FREE;
+    return rc;
+}
+
 int libxl__qmp_hmp(libxl__gc *gc, int domid, const char *command_line,
                    char **output)
 {
@@ -1174,11 +1280,12 @@ int libxl__qmp_initializations(libxl__gc *gc, uint32_t domid,
 {
     const libxl_vnc_info *vnc = libxl__dm_vnc(guest_config);
     libxl__qmp_handler *qmp = NULL;
+    bool ignore_error = true;
     int ret = 0;
 
     qmp = libxl__qmp_initialize(gc, domid);
     if (!qmp)
-        return -1;
+        return ERROR_FAIL;
     ret = libxl__qmp_query_serial(qmp);
     if (!ret && vnc && vnc->passwd) {
         ret = qmp_change(gc, qmp, "vnc", "password", vnc->passwd);
@@ -1187,8 +1294,13 @@ int libxl__qmp_initializations(libxl__gc *gc, uint32_t domid,
     if (!ret) {
         ret = qmp_query_vnc(qmp);
     }
+    if (!ret && guest_config->num_vnvdimms) {
+        ret = libxl__qmp_query_nvdimms(qmp);
+        ignore_error = false;
+    }
     libxl__qmp_close(qmp);
-    return ret;
+
+    return ret ? (ignore_error ? ERROR_FAIL : ERROR_BADFAIL) : 0;
 }
 
 /*
-- 
2.12.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-03-20  0:09 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-20  0:09 [RFC XEN PATCH v2 00/15] Add vNVDIMM support to HVM domains Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 01/15] xen/common: add Kconfig item for pmem support Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 02/15] xen: probe pmem regions via ACPI NFIT Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 03/15] xen/x86: allow customizing locations of extended frametable & M2P Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 04/15] xen/x86: add XEN_SYSCTL_nvdimm_pmem_setup to setup host pmem Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 05/15] xen/x86: add XENMEM_populate_pmem_map to map host pmem pages to HVM domain Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 06/15] tools: reserve guest memory for ACPI from device model Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 07/15] tools/libacpi: expose the minimum alignment used by mem_ops.alloc Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 08/15] tools/libacpi: add callback acpi_ctxt.p2v to get a pointer from physical address Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 09/15] tools/libacpi: add callbacks to access XenStore Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 10/15] tools/libacpi: add a simple AML builder Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 11/15] tools/libacpi: load ACPI built by the device model Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 12/15] tools/libxl: build qemu options from xl vNVDIMM configs Haozhong Zhang
2017-03-20  0:09 ` [RFC XEN PATCH v2 13/15] tools/libxl: add support to map host pmem device to guests Haozhong Zhang
2017-03-20  0:09 ` Haozhong Zhang [this message]
2017-03-20  0:09 ` [RFC XEN PATCH v2 15/15] tools/misc: add xen-ndctl Haozhong Zhang
2017-03-30  4:11   ` Dan Williams
2017-03-30  7:58     ` Haozhong Zhang
2017-04-01 11:55       ` Konrad Rzeszutek Wilk
2017-03-30  4:20 ` [RFC XEN PATCH v2 00/15] Add vNVDIMM support to HVM domains Dan Williams
2017-03-30  8:21   ` Haozhong Zhang
2017-03-30 16:01     ` Dan Williams
2017-04-01 11:54       ` Konrad Rzeszutek Wilk
2017-04-01 15:45         ` Dan Williams
2017-04-04 17:00           ` Konrad Rzeszutek Wilk
2017-04-04 17:16             ` Dan Williams
2017-04-04 17:34               ` Konrad Rzeszutek Wilk
2017-04-04 17:59                 ` Dan Williams
2017-04-04 18:05                   ` Konrad Rzeszutek Wilk
2017-04-04 18:59                     ` Dan Williams
2017-04-11 17:48                       ` Konrad Rzeszutek Wilk
2017-04-01 12:24 ` Konrad Rzeszutek Wilk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170320000949.24675-15-haozhong.zhang@intel.com \
    --to=haozhong.zhang@intel.com \
    --cc=dan.j.williams@intel.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=konrad@darnok.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.