* [Qemu-devel] [PATCH v4 1/4] QJSON: Add JSON writer
2015-01-22 14:01 [Qemu-devel] [PATCH v4 0/4] Migration Deciphering aid Alexander Graf
@ 2015-01-22 14:01 ` Alexander Graf
2015-01-22 14:01 ` [Qemu-devel] [PATCH v4 2/4] qemu-file: Add fast ftell code path Alexander Graf
` (3 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Alexander Graf @ 2015-01-22 14:01 UTC (permalink / raw)
To: qemu-devel; +Cc: quintela, amit.shah, pbonzini, alex.bennee, afaerber
To support programmatic JSON assembly while keeping the code that generates it
readable, this patch introduces a simple JSON writer. It emits JSON serially
into a buffer in memory.
The nice thing about this writer is its simplicity and low memory overhead.
Unlike the QMP JSON writer, this one does not need to spawn QObjects for every
element it wants to represent.
This is a prerequisite for the migration stream format description generator.
Signed-off-by: Alexander Graf <agraf@suse.de>
---
v2 -> v3:
- QOMify the QJSON object, makes for easier destruction
v3 -> v4:
- Squash properly
- remove unused field from struct
---
Makefile.objs | 1 +
include/qjson.h | 29 +++++++++++++
qjson.c | 129 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 159 insertions(+)
create mode 100644 include/qjson.h
create mode 100644 qjson.c
diff --git a/Makefile.objs b/Makefile.objs
index abeb902..28999d3 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -51,6 +51,7 @@ common-obj-$(CONFIG_LINUX) += fsdev/
common-obj-y += migration/
common-obj-y += qemu-char.o #aio.o
common-obj-y += page_cache.o
+common-obj-y += qjson.o
common-obj-$(CONFIG_SPICE) += spice-qemu-char.o
diff --git a/include/qjson.h b/include/qjson.h
new file mode 100644
index 0000000..7c54fdf
--- /dev/null
+++ b/include/qjson.h
@@ -0,0 +1,29 @@
+/*
+ * QEMU JSON writer
+ *
+ * Copyright Alexander Graf
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+#ifndef QEMU_QJSON_H
+#define QEMU_QJSON_H
+
+#define TYPE_QJSON "QJSON"
+typedef struct QJSON QJSON;
+
+QJSON *qjson_new(void);
+void json_prop_str(QJSON *json, const char *name, const char *str);
+void json_prop_int(QJSON *json, const char *name, int64_t val);
+void json_end_array(QJSON *json);
+void json_start_array(QJSON *json, const char *name);
+void json_end_object(QJSON *json);
+void json_start_object(QJSON *json, const char *name);
+const char *qjson_get_str(QJSON *json);
+void qjson_finish(QJSON *json);
+
+#endif /* QEMU_QJSON_H */
diff --git a/qjson.c b/qjson.c
new file mode 100644
index 0000000..b242222
--- /dev/null
+++ b/qjson.c
@@ -0,0 +1,129 @@
+/*
+ * QEMU JSON writer
+ *
+ * Copyright Alexander Graf
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include <qapi/qmp/qstring.h>
+#include <stdbool.h>
+#include <glib.h>
+#include <qjson.h>
+#include <qemu/module.h>
+#include <qom/object.h>
+
+struct QJSON {
+ Object obj;
+ QString *str;
+ bool omit_comma;
+};
+
+static void json_emit_element(QJSON *json, const char *name)
+{
+ /* Check whether we need to print a , before an element */
+ if (json->omit_comma) {
+ json->omit_comma = false;
+ } else {
+ qstring_append(json->str, ", ");
+ }
+
+ if (name) {
+ qstring_append(json->str, "\"");
+ qstring_append(json->str, name);
+ qstring_append(json->str, "\" : ");
+ }
+}
+
+void json_start_object(QJSON *json, const char *name)
+{
+ json_emit_element(json, name);
+ qstring_append(json->str, "{ ");
+ json->omit_comma = true;
+}
+
+void json_end_object(QJSON *json)
+{
+ qstring_append(json->str, " }");
+ json->omit_comma = false;
+}
+
+void json_start_array(QJSON *json, const char *name)
+{
+ json_emit_element(json, name);
+ qstring_append(json->str, "[ ");
+ json->omit_comma = true;
+}
+
+void json_end_array(QJSON *json)
+{
+ qstring_append(json->str, " ]");
+ json->omit_comma = false;
+}
+
+void json_prop_int(QJSON *json, const char *name, int64_t val)
+{
+ json_emit_element(json, name);
+ qstring_append_int(json->str, val);
+}
+
+void json_prop_str(QJSON *json, const char *name, const char *str)
+{
+ json_emit_element(json, name);
+ qstring_append_chr(json->str, '"');
+ qstring_append(json->str, str);
+ qstring_append_chr(json->str, '"');
+}
+
+const char *qjson_get_str(QJSON *json)
+{
+ return qstring_get_str(json->str);
+}
+
+QJSON *qjson_new(void)
+{
+ QJSON *json = (QJSON *)object_new(TYPE_QJSON);
+ return json;
+}
+
+void qjson_finish(QJSON *json)
+{
+ json_end_object(json);
+}
+
+static void qjson_initfn(Object *obj)
+{
+ QJSON *json = (QJSON *)object_dynamic_cast(obj, TYPE_QJSON);
+ assert(json);
+
+ json->str = qstring_from_str("{ ");
+ json->omit_comma = true;
+}
+
+static void qjson_finalizefn(Object *obj)
+{
+ QJSON *json = (QJSON *)object_dynamic_cast(obj, TYPE_QJSON);
+
+ assert(json);
+ qobject_decref(QOBJECT(json->str));
+}
+
+static const TypeInfo qjson_type_info = {
+ .name = TYPE_QJSON,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(QJSON),
+ .instance_init = qjson_initfn,
+ .instance_finalize = qjson_finalizefn,
+};
+
+static void qjson_register_types(void)
+{
+ type_register_static(&qjson_type_info);
+}
+
+type_init(qjson_register_types)
--
1.7.12.4
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Qemu-devel] [PATCH v4 3/4] migration: Append JSON description of migration stream
2015-01-22 14:01 [Qemu-devel] [PATCH v4 0/4] Migration Deciphering aid Alexander Graf
2015-01-22 14:01 ` [Qemu-devel] [PATCH v4 1/4] QJSON: Add JSON writer Alexander Graf
2015-01-22 14:01 ` [Qemu-devel] [PATCH v4 2/4] qemu-file: Add fast ftell code path Alexander Graf
@ 2015-01-22 14:01 ` Alexander Graf
2015-01-22 14:01 ` [Qemu-devel] [PATCH v4 4/4] Add migration stream analyzation script Alexander Graf
2015-02-04 10:24 ` [Qemu-devel] [PATCH v4 0/4] Migration Deciphering aid Amit Shah
4 siblings, 0 replies; 7+ messages in thread
From: Alexander Graf @ 2015-01-22 14:01 UTC (permalink / raw)
To: qemu-devel; +Cc: quintela, amit.shah, pbonzini, alex.bennee, afaerber
One of the annoyances of the current migration format is the fact that
it's not self-describing. In fact, it's not properly describing at all.
Some code randomly scattered throughout QEMU elaborates roughly how to
read and write a stream of bytes.
We discussed an idea during KVM Forum 2013 to add a JSON description of
the migration protocol itself to the migration stream. This patch
adds a section after the VM_END migration end marker that contains
description data on what the device sections of the stream are composed of.
This approach is backwards compatible with any QEMU version reading the
stream, because QEMU just stops reading after the VM_END marker and ignores
any data following it.
With an additional external program this allows us to decipher the
contents of any migration stream and hopefully make migration bugs easier
to track down.
Signed-off-by: Alexander Graf <agraf@suse.de>
---
v2 -> v3:
- Dont compress objects with subsections
- Destroy QJSON object
v3 -> v4:
- Update patch description
---
hw/pci/pci.c | 2 +-
hw/scsi/spapr_vscsi.c | 2 +-
hw/virtio/virtio.c | 2 +-
include/migration/migration.h | 1 +
include/migration/vmstate.h | 3 +-
migration/vmstate.c | 186 ++++++++++++++++++++++++++++++++++++++++--
savevm.c | 54 ++++++++++--
tests/test-vmstate.c | 6 +-
8 files changed, 237 insertions(+), 19 deletions(-)
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 371699c..38d0b7b 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -512,7 +512,7 @@ void pci_device_save(PCIDevice *s, QEMUFile *f)
* This makes us compatible with old devices
* which never set or clear this bit. */
s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
- vmstate_save_state(f, pci_get_vmstate(s), s);
+ vmstate_save_state(f, pci_get_vmstate(s), s, NULL);
/* Restore the interrupt status bit. */
pci_update_irq_status(s);
}
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
index 20b20f0..3639235 100644
--- a/hw/scsi/spapr_vscsi.c
+++ b/hw/scsi/spapr_vscsi.c
@@ -630,7 +630,7 @@ static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq)
vscsi_req *req = sreq->hba_private;
assert(req->active);
- vmstate_save_state(f, &vmstate_spapr_vscsi_req, req);
+ vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL);
DPRINTF("VSCSI: saving tag=%u, current desc#%d, offset=%x\n",
req->qtag, req->cur_desc_num, req->cur_desc_offset);
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 013979a..d735343 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -955,7 +955,7 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
}
/* Subsections */
- vmstate_save_state(f, &vmstate_virtio, vdev);
+ vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
}
int virtio_set_features(VirtIODevice *vdev, uint32_t val)
diff --git a/include/migration/migration.h b/include/migration/migration.h
index 3cb5ba8..f37348b 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -33,6 +33,7 @@
#define QEMU_VM_SECTION_END 0x03
#define QEMU_VM_SECTION_FULL 0x04
#define QEMU_VM_SUBSECTION 0x05
+#define QEMU_VM_VMDESCRIPTION 0x06
struct MigrationParams {
bool blk;
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index d712a65..42cc27e 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -29,6 +29,7 @@
#ifndef CONFIG_USER_ONLY
#include <migration/qemu-file.h>
#endif
+#include <qjson.h>
typedef void SaveStateHandler(QEMUFile *f, void *opaque);
typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
@@ -779,7 +780,7 @@ extern const VMStateInfo vmstate_info_bitmap;
int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, int version_id);
void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque);
+ void *opaque, QJSON *vmdesc);
int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
const VMStateDescription *vmsd,
diff --git a/migration/vmstate.c b/migration/vmstate.c
index 3dde574..2a91f1f 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -4,9 +4,10 @@
#include "migration/vmstate.h"
#include "qemu/bitops.h"
#include "trace.h"
+#include "qjson.h"
static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque);
+ void *opaque, QJSON *vmdesc);
static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque);
@@ -138,32 +139,181 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
return 0;
}
+static int vmfield_name_num(VMStateField *start, VMStateField *search)
+{
+ VMStateField *field;
+ int found = 0;
+
+ for (field = start; field->name; field++) {
+ if (!strcmp(field->name, search->name)) {
+ if (field == search) {
+ return found;
+ }
+ found++;
+ }
+ }
+
+ return -1;
+}
+
+static bool vmfield_name_is_unique(VMStateField *start, VMStateField *search)
+{
+ VMStateField *field;
+ int found = 0;
+
+ for (field = start; field->name; field++) {
+ if (!strcmp(field->name, search->name)) {
+ found++;
+ /* name found more than once, so it's not unique */
+ if (found > 1) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static const char *vmfield_get_type_name(VMStateField *field)
+{
+ const char *type = "unknown";
+
+ if (field->flags & VMS_STRUCT) {
+ type = "struct";
+ } else if (field->info->name) {
+ type = field->info->name;
+ }
+
+ return type;
+}
+
+static bool vmsd_can_compress(VMStateField *field)
+{
+ if (field->field_exists) {
+ /* Dynamically existing fields mess up compression */
+ return false;
+ }
+
+ if (field->flags & VMS_STRUCT) {
+ VMStateField *sfield = field->vmsd->fields;
+ while (sfield->name) {
+ if (!vmsd_can_compress(sfield)) {
+ /* Child elements can't compress, so can't we */
+ return false;
+ }
+ sfield++;
+ }
+
+ if (field->vmsd->subsections) {
+ /* Subsections may come and go, better don't compress */
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void vmsd_desc_field_start(const VMStateDescription *vmsd, QJSON *vmdesc,
+ VMStateField *field, int i, int max)
+{
+ char *name, *old_name;
+ bool is_array = max > 1;
+ bool can_compress = vmsd_can_compress(field);
+
+ if (!vmdesc) {
+ return;
+ }
+
+ name = g_strdup(field->name);
+
+ /* Field name is not unique, need to make it unique */
+ if (!vmfield_name_is_unique(vmsd->fields, field)) {
+ int num = vmfield_name_num(vmsd->fields, field);
+ old_name = name;
+ name = g_strdup_printf("%s[%d]", name, num);
+ g_free(old_name);
+ }
+
+ json_start_object(vmdesc, NULL);
+ json_prop_str(vmdesc, "name", name);
+ if (is_array) {
+ if (can_compress) {
+ json_prop_int(vmdesc, "array_len", max);
+ } else {
+ json_prop_int(vmdesc, "index", i);
+ }
+ }
+ json_prop_str(vmdesc, "type", vmfield_get_type_name(field));
+
+ if (field->flags & VMS_STRUCT) {
+ json_start_object(vmdesc, "struct");
+ }
+
+ g_free(name);
+}
+
+static void vmsd_desc_field_end(const VMStateDescription *vmsd, QJSON *vmdesc,
+ VMStateField *field, size_t size, int i)
+{
+ if (!vmdesc) {
+ return;
+ }
+
+ if (field->flags & VMS_STRUCT) {
+ /* We printed a struct in between, close its child object */
+ json_end_object(vmdesc);
+ }
+
+ json_prop_int(vmdesc, "size", size);
+ json_end_object(vmdesc);
+}
+
void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque)
+ void *opaque, QJSON *vmdesc)
{
VMStateField *field = vmsd->fields;
if (vmsd->pre_save) {
vmsd->pre_save(opaque);
}
+
+ if (vmdesc) {
+ json_prop_str(vmdesc, "vmsd_name", vmsd->name);
+ json_prop_int(vmdesc, "version", vmsd->version_id);
+ json_start_array(vmdesc, "fields");
+ }
+
while (field->name) {
if (!field->field_exists ||
field->field_exists(opaque, vmsd->version_id)) {
void *base_addr = vmstate_base_addr(opaque, field, false);
int i, n_elems = vmstate_n_elems(opaque, field);
int size = vmstate_size(opaque, field);
+ int64_t old_offset, written_bytes;
+ QJSON *vmdesc_loop = vmdesc;
for (i = 0; i < n_elems; i++) {
void *addr = base_addr + size * i;
+ vmsd_desc_field_start(vmsd, vmdesc_loop, field, i, n_elems);
+ old_offset = qemu_ftell_fast(f);
+
if (field->flags & VMS_ARRAY_OF_POINTER) {
addr = *(void **)addr;
}
if (field->flags & VMS_STRUCT) {
- vmstate_save_state(f, field->vmsd, addr);
+ vmstate_save_state(f, field->vmsd, addr, vmdesc_loop);
} else {
field->info->put(f, addr, size);
}
+
+ written_bytes = qemu_ftell_fast(f) - old_offset;
+ vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes, i);
+
+ /* Compressed arrays only care about the first element */
+ if (vmdesc_loop && vmsd_can_compress(field)) {
+ vmdesc_loop = NULL;
+ }
}
} else {
if (field->flags & VMS_MUST_EXIST) {
@@ -174,7 +324,12 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
}
field++;
}
- vmstate_subsection_save(f, vmsd, opaque);
+
+ if (vmdesc) {
+ json_end_array(vmdesc);
+ }
+
+ vmstate_subsection_save(f, vmsd, opaque, vmdesc);
}
static const VMStateDescription *
@@ -231,24 +386,43 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
}
static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque)
+ void *opaque, QJSON *vmdesc)
{
const VMStateSubsection *sub = vmsd->subsections;
+ bool subsection_found = false;
while (sub && sub->needed) {
if (sub->needed(opaque)) {
const VMStateDescription *vmsd = sub->vmsd;
uint8_t len;
+ if (vmdesc) {
+ /* Only create subsection array when we have any */
+ if (!subsection_found) {
+ json_start_array(vmdesc, "subsections");
+ subsection_found = true;
+ }
+
+ json_start_object(vmdesc, NULL);
+ }
+
qemu_put_byte(f, QEMU_VM_SUBSECTION);
len = strlen(vmsd->name);
qemu_put_byte(f, len);
qemu_put_buffer(f, (uint8_t *)vmsd->name, len);
qemu_put_be32(f, vmsd->version_id);
- vmstate_save_state(f, vmsd, opaque);
+ vmstate_save_state(f, vmsd, opaque, vmdesc);
+
+ if (vmdesc) {
+ json_end_object(vmdesc);
+ }
}
sub++;
}
+
+ if (vmdesc && subsection_found) {
+ json_end_array(vmdesc);
+ }
}
/* bool */
diff --git a/savevm.c b/savevm.c
index 08ec678..48e3e5c 100644
--- a/savevm.c
+++ b/savevm.c
@@ -572,14 +572,34 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
return vmstate_load_state(f, se->vmsd, se->opaque, version_id);
}
-static void vmstate_save(QEMUFile *f, SaveStateEntry *se)
+static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc)
+{
+ int64_t old_offset, size;
+
+ old_offset = qemu_ftell_fast(f);
+ se->ops->save_state(f, se->opaque);
+ size = qemu_ftell_fast(f) - old_offset;
+
+ if (vmdesc) {
+ json_prop_int(vmdesc, "size", size);
+ json_start_array(vmdesc, "fields");
+ json_start_object(vmdesc, NULL);
+ json_prop_str(vmdesc, "name", "data");
+ json_prop_int(vmdesc, "size", size);
+ json_prop_str(vmdesc, "type", "buffer");
+ json_end_object(vmdesc);
+ json_end_array(vmdesc);
+ }
+}
+
+static void vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc)
{
trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
- if (!se->vmsd) { /* Old style */
- se->ops->save_state(f, se->opaque);
+ if (!se->vmsd) {
+ vmstate_save_old_style(f, se, vmdesc);
return;
}
- vmstate_save_state(f, se->vmsd, se->opaque);
+ vmstate_save_state(f, se->vmsd, se->opaque, vmdesc);
}
bool qemu_savevm_state_blocked(Error **errp)
@@ -692,6 +712,8 @@ int qemu_savevm_state_iterate(QEMUFile *f)
void qemu_savevm_state_complete(QEMUFile *f)
{
+ QJSON *vmdesc;
+ int vmdesc_len;
SaveStateEntry *se;
int ret;
@@ -721,6 +743,9 @@ void qemu_savevm_state_complete(QEMUFile *f)
}
}
+ vmdesc = qjson_new();
+ json_prop_int(vmdesc, "page_size", TARGET_PAGE_SIZE);
+ json_start_array(vmdesc, "devices");
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
int len;
@@ -728,6 +753,11 @@ void qemu_savevm_state_complete(QEMUFile *f)
continue;
}
trace_savevm_section_start(se->idstr, se->section_id);
+
+ json_start_object(vmdesc, NULL);
+ json_prop_str(vmdesc, "name", se->idstr);
+ json_prop_int(vmdesc, "instance_id", se->instance_id);
+
/* Section type */
qemu_put_byte(f, QEMU_VM_SECTION_FULL);
qemu_put_be32(f, se->section_id);
@@ -740,11 +770,23 @@ void qemu_savevm_state_complete(QEMUFile *f)
qemu_put_be32(f, se->instance_id);
qemu_put_be32(f, se->version_id);
- vmstate_save(f, se);
+ vmstate_save(f, se, vmdesc);
+
+ json_end_object(vmdesc);
trace_savevm_section_end(se->idstr, se->section_id);
}
qemu_put_byte(f, QEMU_VM_EOF);
+
+ json_end_array(vmdesc);
+ qjson_finish(vmdesc);
+ vmdesc_len = strlen(qjson_get_str(vmdesc));
+
+ qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
+ qemu_put_be32(f, vmdesc_len);
+ qemu_put_buffer(f, (uint8_t *)qjson_get_str(vmdesc), vmdesc_len);
+ object_unref(OBJECT(vmdesc));
+
qemu_fflush(f);
}
@@ -843,7 +885,7 @@ static int qemu_save_device_state(QEMUFile *f)
qemu_put_be32(f, se->instance_id);
qemu_put_be32(f, se->version_id);
- vmstate_save(f, se);
+ vmstate_save(f, se, NULL);
}
qemu_put_byte(f, QEMU_VM_EOF);
diff --git a/tests/test-vmstate.c b/tests/test-vmstate.c
index 39b7b01..1d620e0 100644
--- a/tests/test-vmstate.c
+++ b/tests/test-vmstate.c
@@ -85,7 +85,7 @@ static void save_vmstate(const VMStateDescription *desc, void *obj)
QEMUFile *f = open_test_file(true);
/* Save file with vmstate */
- vmstate_save_state(f, desc, obj);
+ vmstate_save_state(f, desc, obj, NULL);
qemu_put_byte(f, QEMU_VM_EOF);
g_assert(!qemu_file_get_error(f));
qemu_fclose(f);
@@ -394,7 +394,7 @@ static void test_save_noskip(void)
QEMUFile *fsave = qemu_bufopen("w", NULL);
TestStruct obj = { .a = 1, .b = 2, .c = 3, .d = 4, .e = 5, .f = 6,
.skip_c_e = false };
- vmstate_save_state(fsave, &vmstate_skipping, &obj);
+ vmstate_save_state(fsave, &vmstate_skipping, &obj, NULL);
g_assert(!qemu_file_get_error(fsave));
uint8_t expected[] = {
@@ -414,7 +414,7 @@ static void test_save_skip(void)
QEMUFile *fsave = qemu_bufopen("w", NULL);
TestStruct obj = { .a = 1, .b = 2, .c = 3, .d = 4, .e = 5, .f = 6,
.skip_c_e = true };
- vmstate_save_state(fsave, &vmstate_skipping, &obj);
+ vmstate_save_state(fsave, &vmstate_skipping, &obj, NULL);
g_assert(!qemu_file_get_error(fsave));
uint8_t expected[] = {
--
1.7.12.4
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Qemu-devel] [PATCH v4 4/4] Add migration stream analyzation script
2015-01-22 14:01 [Qemu-devel] [PATCH v4 0/4] Migration Deciphering aid Alexander Graf
` (2 preceding siblings ...)
2015-01-22 14:01 ` [Qemu-devel] [PATCH v4 3/4] migration: Append JSON description of migration stream Alexander Graf
@ 2015-01-22 14:01 ` Alexander Graf
2015-02-04 9:35 ` Amit Shah
2015-02-04 10:24 ` [Qemu-devel] [PATCH v4 0/4] Migration Deciphering aid Amit Shah
4 siblings, 1 reply; 7+ messages in thread
From: Alexander Graf @ 2015-01-22 14:01 UTC (permalink / raw)
To: qemu-devel; +Cc: quintela, amit.shah, pbonzini, alex.bennee, afaerber
This patch adds a python tool to the scripts directory that can read
a dumped migration stream if it contains the JSON description of the
device states. I constructs a human readable JSON stream out of it.
It's very simple to use:
$ qemu-system-x86_64
(qemu) migrate "exec:cat > mig"
$ ./scripts/analyze_migration.py -f mig
Signed-off-by: Alexander Graf <agraf@suse.de>
---
v1 -> v2:
- Remove support for multiple vmsd versions
- Add support for HTAB
- Add support for array_len
- Move to new naming schema for fields
- Use dynamic page size
v2 -> v3:
- Add tell function to MigrationFile
- Report where subsections were not found
- Print ram sections with size
- Remove foreign desc file
- Add memory dump support (-m option)
- Add -x option to extract all sections into files
v3 -> v4:
- Change copyright date to 2015
---
scripts/analyze-migration.py | 592 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 592 insertions(+)
create mode 100755 scripts/analyze-migration.py
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
new file mode 100755
index 0000000..b8b9968
--- /dev/null
+++ b/scripts/analyze-migration.py
@@ -0,0 +1,592 @@
+#!/usr/bin/env python
+#
+# Migration Stream Analyzer
+#
+# Copyright (c) 2015 Alexander Graf <agraf@suse.de>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+import numpy as np
+import json
+import os
+import argparse
+import collections
+import pprint
+
+def mkdir_p(path):
+ try:
+ os.makedirs(path)
+ except OSError:
+ pass
+
+class MigrationFile(object):
+ def __init__(self, filename):
+ self.filename = filename
+ self.file = open(self.filename, "rb")
+
+ def read64(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i8')[0])
+
+ def read32(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i4')[0])
+
+ def read16(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i2')[0])
+
+ def read8(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i1')[0])
+
+ def readstr(self, len = None):
+ if len is None:
+ len = self.read8()
+ if len == 0:
+ return ""
+ return np.fromfile(self.file, count=1, dtype=('S%d' % len))[0]
+
+ def readvar(self, size = None):
+ if size is None:
+ size = self.read8()
+ if size == 0:
+ return ""
+ value = self.file.read(size)
+ if len(value) != size:
+ raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
+ return value
+
+ def tell(self):
+ return self.file.tell()
+
+ # The VMSD description is at the end of the file, after EOF. Look for
+ # the last NULL byte, then for the beginning brace of JSON.
+ def read_migration_debug_json(self):
+ QEMU_VM_VMDESCRIPTION = 0x06
+
+ # Remember the offset in the file when we started
+ entrypos = self.file.tell()
+
+ # Read the last 10MB
+ self.file.seek(0, os.SEEK_END)
+ endpos = self.file.tell()
+ self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
+ datapos = self.file.tell()
+ data = self.file.read()
+ # The full file read closed the file as well, reopen it
+ self.file = open(self.filename, "rb")
+
+ # Find the last NULL byte, then the first brace after that. This should
+ # be the beginning of our JSON data.
+ nulpos = data.rfind("\0")
+ jsonpos = data.find("{", nulpos)
+
+ # Check backwards from there and see whether we guessed right
+ self.file.seek(datapos + jsonpos - 5, 0)
+ if self.read8() != QEMU_VM_VMDESCRIPTION:
+ raise Exception("No Debug Migration device found")
+
+ jsonlen = self.read32()
+
+ # Seek back to where we were at the beginning
+ self.file.seek(entrypos, 0)
+
+ return data[jsonpos:jsonpos + jsonlen]
+
+ def close(self):
+ self.file.close()
+
+class RamSection(object):
+ RAM_SAVE_FLAG_COMPRESS = 0x02
+ RAM_SAVE_FLAG_MEM_SIZE = 0x04
+ RAM_SAVE_FLAG_PAGE = 0x08
+ RAM_SAVE_FLAG_EOS = 0x10
+ RAM_SAVE_FLAG_CONTINUE = 0x20
+ RAM_SAVE_FLAG_XBZRLE = 0x40
+ RAM_SAVE_FLAG_HOOK = 0x80
+
+ def __init__(self, file, version_id, ramargs, section_key):
+ if version_id != 4:
+ raise Exception("Unknown RAM version %d" % version_id)
+
+ self.file = file
+ self.section_key = section_key
+ self.TARGET_PAGE_SIZE = ramargs['page_size']
+ self.dump_memory = ramargs['dump_memory']
+ self.write_memory = ramargs['write_memory']
+ self.sizeinfo = collections.OrderedDict()
+ self.data = collections.OrderedDict()
+ self.data['section sizes'] = self.sizeinfo
+ self.name = ''
+ if self.write_memory:
+ self.files = { }
+ if self.dump_memory:
+ self.memory = collections.OrderedDict()
+ self.data['memory'] = self.memory
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def getDict(self):
+ return self.data
+
+ def read(self):
+ # Read all RAM sections
+ while True:
+ addr = self.file.read64()
+ flags = addr & (self.TARGET_PAGE_SIZE - 1)
+ addr &= ~(self.TARGET_PAGE_SIZE - 1)
+
+ if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
+ while True:
+ namelen = self.file.read8()
+ # We assume that no RAM chunk is big enough to ever
+ # hit the first byte of the address, so when we see
+ # a zero here we know it has to be an address, not the
+ # length of the next block.
+ if namelen == 0:
+ self.file.file.seek(-1, 1)
+ break
+ self.name = self.file.readstr(len = namelen)
+ len = self.file.read64()
+ self.sizeinfo[self.name] = '0x%016x' % len
+ if self.write_memory:
+ print self.name
+ mkdir_p('./' + os.path.dirname(self.name))
+ f = open('./' + self.name, "wb")
+ f.truncate(0)
+ f.truncate(len)
+ self.files[self.name] = f
+ flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
+
+ if flags & self.RAM_SAVE_FLAG_COMPRESS:
+ if flags & self.RAM_SAVE_FLAG_CONTINUE:
+ flags &= ~self.RAM_SAVE_FLAG_CONTINUE
+ else:
+ self.name = self.file.readstr()
+ fill_char = self.file.read8()
+ # The page in question is filled with fill_char now
+ if self.write_memory and fill_char != 0:
+ self.files[self.name].seek(addr, os.SEEK_SET)
+ self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
+ if self.dump_memory:
+ self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_char
+ flags &= ~self.RAM_SAVE_FLAG_COMPRESS
+ elif flags & self.RAM_SAVE_FLAG_PAGE:
+ if flags & self.RAM_SAVE_FLAG_CONTINUE:
+ flags &= ~self.RAM_SAVE_FLAG_CONTINUE
+ else:
+ self.name = self.file.readstr()
+
+ if self.write_memory or self.dump_memory:
+ data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
+ else: # Just skip RAM data
+ self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
+
+ if self.write_memory:
+ self.files[self.name].seek(addr, os.SEEK_SET)
+ self.files[self.name].write(data)
+ if self.dump_memory:
+ hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
+ self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
+
+ flags &= ~self.RAM_SAVE_FLAG_PAGE
+ elif flags & self.RAM_SAVE_FLAG_XBZRLE:
+ raise Exception("XBZRLE RAM compression is not supported yet")
+ elif flags & self.RAM_SAVE_FLAG_HOOK:
+ raise Exception("RAM hooks don't make sense with files")
+
+ # End of RAM section
+ if flags & self.RAM_SAVE_FLAG_EOS:
+ break
+
+ if flags != 0:
+ raise Exception("Unknown RAM flags: %x" % flags)
+
+ def __del__(self):
+ if self.write_memory:
+ for key in self.files:
+ self.files[key].close()
+
+
+class HTABSection(object):
+ HASH_PTE_SIZE_64 = 16
+
+ def __init__(self, file, version_id, device, section_key):
+ if version_id != 1:
+ raise Exception("Unknown HTAB version %d" % version_id)
+
+ self.file = file
+ self.section_key = section_key
+
+ def read(self):
+
+ header = self.file.read32()
+
+ if (header > 0):
+ # First section, just the hash shift
+ return
+
+ # Read until end marker
+ while True:
+ index = self.file.read32()
+ n_valid = self.file.read16()
+ n_invalid = self.file.read16()
+
+ if index == 0 and n_valid == 0 and n_invalid == 0:
+ break
+
+ self.file.readvar(n_valid * HASH_PTE_SIZE_64)
+
+ def getDict(self):
+ return ""
+
+class VMSDFieldGeneric(object):
+ def __init__(self, desc, file):
+ self.file = file
+ self.desc = desc
+ self.data = ""
+
+ def __repr__(self):
+ return str(self.__str__())
+
+ def __str__(self):
+ return " ".join("{0:02x}".format(ord(c)) for c in self.data)
+
+ def getDict(self):
+ return self.__str__()
+
+ def read(self):
+ size = int(self.desc['size'])
+ self.data = self.file.readvar(size)
+ return self.data
+
+class VMSDFieldInt(VMSDFieldGeneric):
+ def __init__(self, desc, file):
+ super(VMSDFieldInt, self).__init__(desc, file)
+ self.size = int(desc['size'])
+ self.format = '0x%%0%dx' % (self.size * 2)
+ self.sdtype = '>i%d' % self.size
+ self.udtype = '>u%d' % self.size
+
+ def __repr__(self):
+ if self.data < 0:
+ return ('%s (%d)' % ((self.format % self.udata), self.data))
+ else:
+ return self.format % self.data
+
+ def __str__(self):
+ return self.__repr__()
+
+ def getDict(self):
+ return self.__str__()
+
+ def read(self):
+ super(VMSDFieldInt, self).read()
+ self.sdata = np.fromstring(self.data, count=1, dtype=(self.sdtype))[0]
+ self.udata = np.fromstring(self.data, count=1, dtype=(self.udtype))[0]
+ self.data = self.sdata
+ return self.data
+
+class VMSDFieldUInt(VMSDFieldInt):
+ def __init__(self, desc, file):
+ super(VMSDFieldUInt, self).__init__(desc, file)
+
+ def read(self):
+ super(VMSDFieldUInt, self).read()
+ self.data = self.udata
+ return self.data
+
+class VMSDFieldIntLE(VMSDFieldInt):
+ def __init__(self, desc, file):
+ super(VMSDFieldIntLE, self).__init__(desc, file)
+ self.dtype = '<i%d' % self.size
+
+class VMSDFieldBool(VMSDFieldGeneric):
+ def __init__(self, desc, file):
+ super(VMSDFieldBool, self).__init__(desc, file)
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def getDict(self):
+ return self.data
+
+ def read(self):
+ super(VMSDFieldBool, self).read()
+ if self.data[0] == 0:
+ self.data = False
+ else:
+ self.data = True
+ return self.data
+
+class VMSDFieldStruct(VMSDFieldGeneric):
+ QEMU_VM_SUBSECTION = 0x05
+
+ def __init__(self, desc, file):
+ super(VMSDFieldStruct, self).__init__(desc, file)
+ self.data = collections.OrderedDict()
+
+ # When we see compressed array elements, unfold them here
+ new_fields = []
+ for field in self.desc['struct']['fields']:
+ if not 'array_len' in field:
+ new_fields.append(field)
+ continue
+ array_len = field.pop('array_len')
+ field['index'] = 0
+ new_fields.append(field)
+ for i in xrange(1, array_len):
+ c = field.copy()
+ c['index'] = i
+ new_fields.append(c)
+
+ self.desc['struct']['fields'] = new_fields
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def read(self):
+ for field in self.desc['struct']['fields']:
+ try:
+ reader = vmsd_field_readers[field['type']]
+ except:
+ reader = VMSDFieldGeneric
+
+ field['data'] = reader(field, self.file)
+ field['data'].read()
+
+ if 'index' in field:
+ if field['name'] not in self.data:
+ self.data[field['name']] = []
+ a = self.data[field['name']]
+ if len(a) != int(field['index']):
+ raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
+ a.append(field['data'])
+ else:
+ self.data[field['name']] = field['data']
+
+ if 'subsections' in self.desc['struct']:
+ for subsection in self.desc['struct']['subsections']:
+ if self.file.read8() != self.QEMU_VM_SUBSECTION:
+ raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
+ name = self.file.readstr()
+ version_id = self.file.read32()
+ self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
+ self.data[name].read()
+
+ def getDictItem(self, value):
+ # Strings would fall into the array category, treat
+ # them specially
+ if value.__class__ is ''.__class__:
+ return value
+
+ try:
+ return self.getDictOrderedDict(value)
+ except:
+ try:
+ return self.getDictArray(value)
+ except:
+ try:
+ return value.getDict()
+ except:
+ return value
+
+ def getDictArray(self, array):
+ r = []
+ for value in array:
+ r.append(self.getDictItem(value))
+ return r
+
+ def getDictOrderedDict(self, dict):
+ r = collections.OrderedDict()
+ for (key, value) in dict.items():
+ r[key] = self.getDictItem(value)
+ return r
+
+ def getDict(self):
+ return self.getDictOrderedDict(self.data)
+
+vmsd_field_readers = {
+ "bool" : VMSDFieldBool,
+ "int8" : VMSDFieldInt,
+ "int16" : VMSDFieldInt,
+ "int32" : VMSDFieldInt,
+ "int32 equal" : VMSDFieldInt,
+ "int32 le" : VMSDFieldIntLE,
+ "int64" : VMSDFieldInt,
+ "uint8" : VMSDFieldUInt,
+ "uint16" : VMSDFieldUInt,
+ "uint32" : VMSDFieldUInt,
+ "uint32 equal" : VMSDFieldUInt,
+ "uint64" : VMSDFieldUInt,
+ "int64 equal" : VMSDFieldInt,
+ "uint8 equal" : VMSDFieldInt,
+ "uint16 equal" : VMSDFieldInt,
+ "float64" : VMSDFieldGeneric,
+ "timer" : VMSDFieldGeneric,
+ "buffer" : VMSDFieldGeneric,
+ "unused_buffer" : VMSDFieldGeneric,
+ "bitmap" : VMSDFieldGeneric,
+ "struct" : VMSDFieldStruct,
+ "unknown" : VMSDFieldGeneric,
+}
+
+class VMSDSection(VMSDFieldStruct):
+ def __init__(self, file, version_id, device, section_key):
+ self.file = file
+ self.data = ""
+ self.vmsd_name = ""
+ self.section_key = section_key
+ desc = device
+ if 'vmsd_name' in device:
+ self.vmsd_name = device['vmsd_name']
+
+ # A section really is nothing but a FieldStruct :)
+ super(VMSDSection, self).__init__({ 'struct' : desc }, file)
+
+###############################################################################
+
+class MigrationDump(object):
+ QEMU_VM_FILE_MAGIC = 0x5145564d
+ QEMU_VM_FILE_VERSION = 0x00000003
+ QEMU_VM_EOF = 0x00
+ QEMU_VM_SECTION_START = 0x01
+ QEMU_VM_SECTION_PART = 0x02
+ QEMU_VM_SECTION_END = 0x03
+ QEMU_VM_SECTION_FULL = 0x04
+ QEMU_VM_SUBSECTION = 0x05
+ QEMU_VM_VMDESCRIPTION = 0x06
+
+ def __init__(self, filename):
+ self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ],
+ ( 'spapr/htab', 0) : ( HTABSection, None ) }
+ self.filename = filename
+ self.vmsd_desc = None
+
+ def read(self, desc_only = False, dump_memory = False, write_memory = False):
+ # Read in the whole file
+ file = MigrationFile(self.filename)
+
+ # File magic
+ data = file.read32()
+ if data != self.QEMU_VM_FILE_MAGIC:
+ raise Exception("Invalid file magic %x" % data)
+
+ # Version (has to be v3)
+ data = file.read32()
+ if data != self.QEMU_VM_FILE_VERSION:
+ raise Exception("Invalid version number %d" % data)
+
+ self.load_vmsd_json(file)
+
+ # Read sections
+ self.sections = collections.OrderedDict()
+
+ if desc_only:
+ return
+
+ ramargs = {}
+ ramargs['page_size'] = self.vmsd_desc['page_size']
+ ramargs['dump_memory'] = dump_memory
+ ramargs['write_memory'] = write_memory
+ self.section_classes[('ram',0)][1] = ramargs
+
+ while True:
+ section_type = file.read8()
+ if section_type == self.QEMU_VM_EOF:
+ break
+ elif section_type == self.QEMU_VM_SECTION_START or section_type == self.QEMU_VM_SECTION_FULL:
+ section_id = file.read32()
+ name = file.readstr()
+ instance_id = file.read32()
+ version_id = file.read32()
+ section_key = (name, instance_id)
+ classdesc = self.section_classes[section_key]
+ section = classdesc[0](file, version_id, classdesc[1], section_key)
+ self.sections[section_id] = section
+ section.read()
+ elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
+ section_id = file.read32()
+ self.sections[section_id].read()
+ else:
+ raise Exception("Unknown section type: %d" % section_type)
+ file.close()
+
+ def load_vmsd_json(self, file):
+ vmsd_json = file.read_migration_debug_json()
+ self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
+ for device in self.vmsd_desc['devices']:
+ key = (device['name'], device['instance_id'])
+ value = ( VMSDSection, device )
+ self.section_classes[key] = value
+
+ def getDict(self):
+ r = collections.OrderedDict()
+ for (key, value) in self.sections.items():
+ key = "%s (%d)" % ( value.section_key[0], key )
+ r[key] = value.getDict()
+ return r
+
+###############################################################################
+
+class JSONEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, VMSDFieldGeneric):
+ return str(o)
+ return json.JSONEncoder.default(self, o)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-f", "--file", help='migration dump to read from', required=True)
+parser.add_argument("-m", "--memory", help='dump RAM contents as well', action='store_true')
+parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default='state')
+parser.add_argument("-x", "--extract", help='extract contents into individual files', action='store_true')
+args = parser.parse_args()
+
+jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
+
+if args.extract:
+ dump = MigrationDump(args.file)
+
+ dump.read(desc_only = True)
+ print "desc.json"
+ f = open("desc.json", "wb")
+ f.truncate()
+ f.write(jsonenc.encode(dump.vmsd_desc))
+ f.close()
+
+ dump.read(write_memory = True)
+ dict = dump.getDict()
+ print "state.json"
+ f = open("state.json", "wb")
+ f.truncate()
+ f.write(jsonenc.encode(dict))
+ f.close()
+elif args.dump == "state":
+ dump = MigrationDump(args.file)
+ dump.read(dump_memory = args.memory)
+ dict = dump.getDict()
+ print jsonenc.encode(dict)
+elif args.dump == "desc":
+ dump = MigrationDump(args.file)
+ dump.read(desc_only = True)
+ print jsonenc.encode(dump.vmsd_desc)
+else:
+ raise Exception("Please specify either -x, -d state or -d dump")
--
1.7.12.4
^ permalink raw reply related [flat|nested] 7+ messages in thread