All of lore.kernel.org
 help / color / mirror / Atom feed
From: Xiao Guangrong <guangrong.xiao@linux.intel.com>
To: pbonzini@redhat.com, imammedo@redhat.com
Cc: gleb@kernel.org, mtosatti@redhat.com, stefanha@redhat.com,
	mst@redhat.com, rth@twiddle.net, ehabkost@redhat.com,
	dan.j.williams@intel.com, kvm@vger.kernel.org,
	qemu-devel@nongnu.org,
	Xiao Guangrong <guangrong.xiao@linux.intel.com>
Subject: [PATCH v4 1/3] nvdimm acpi: introduce fit buffer
Date: Thu,  3 Nov 2016 11:51:28 +0800	[thread overview]
Message-ID: <1478145090-11987-2-git-send-email-guangrong.xiao@linux.intel.com> (raw)
In-Reply-To: <1478145090-11987-1-git-send-email-guangrong.xiao@linux.intel.com>

The buffer is used to save the FIT info for all the presented nvdimm
devices which is updated after the nvdimm device is plugged or
unplugged. In the later patch, it will be used to construct NVDIMM
ACPI _FIT method which reflects the presented nvdimm devices after
nvdimm hotplug

As FIT buffer can not completely mapped into guest address space,
OSPM will exit to QEMU multiple times, however, there is the race
condition - FIT may be changed during these multiple exits, so that
we mark @dirty whenever the buffer is updated.

@dirty is cleared for the first time OSPM gets fit buffer, if
dirty is detected in the later access, OSPM will restart the
access

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 hw/acpi/nvdimm.c        | 57 ++++++++++++++++++++++++++++++-------------------
 hw/i386/acpi-build.c    |  2 +-
 hw/i386/pc.c            |  4 ++++
 include/hw/mem/nvdimm.h | 21 +++++++++++++++++-
 4 files changed, 60 insertions(+), 24 deletions(-)

diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
index b8a2e62..9fee077 100644
--- a/hw/acpi/nvdimm.c
+++ b/hw/acpi/nvdimm.c
@@ -38,11 +38,7 @@ static int nvdimm_plugged_device_list(Object *obj, void *opaque)
     GSList **list = opaque;
 
     if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
-        DeviceState *dev = DEVICE(obj);
-
-        if (dev->realized) { /* only realized NVDIMMs matter */
-            *list = g_slist_append(*list, DEVICE(obj));
-        }
+        *list = g_slist_append(*list, DEVICE(obj));
     }
 
     object_child_foreach(obj, nvdimm_plugged_device_list, opaque);
@@ -348,8 +344,9 @@ static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
                                          (DSM) in DSM Spec Rev1.*/);
 }
 
-static GArray *nvdimm_build_device_structure(GSList *device_list)
+static GArray *nvdimm_build_device_structure(void)
 {
+    GSList *device_list = nvdimm_get_plugged_device_list();
     GArray *structures = g_array_new(false, true /* clear */, 1);
 
     for (; device_list; device_list = device_list->next) {
@@ -367,28 +364,50 @@ static GArray *nvdimm_build_device_structure(GSList *device_list)
         /* build NVDIMM Control Region Structure. */
         nvdimm_build_structure_dcr(structures, dev);
     }
+    g_slist_free(device_list);
 
     return structures;
 }
 
-static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
+static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+    fit_buf->fit = g_array_new(false, true /* clear */, 1);
+}
+
+static void nvdimm_build_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+    g_array_free(fit_buf->fit, true);
+    fit_buf->fit = nvdimm_build_device_structure();
+    fit_buf->dirty = true;
+}
+
+void nvdimm_acpi_hotplug(AcpiNVDIMMState *state)
+{
+    nvdimm_build_fit_buffer(&state->fit_buf);
+}
+
+static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets,
                               GArray *table_data, BIOSLinker *linker)
 {
-    GArray *structures = nvdimm_build_device_structure(device_list);
+    NvdimmFitBuffer *fit_buf = &state->fit_buf;
     unsigned int header;
 
+    /* NVDIMM device is not plugged? */
+    if (!fit_buf->fit->len) {
+        return;
+    }
+
     acpi_add_table(table_offsets, table_data);
 
     /* NFIT header. */
     header = table_data->len;
     acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
     /* NVDIMM device structures. */
-    g_array_append_vals(table_data, structures->data, structures->len);
+    g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
 
     build_header(linker, table_data,
                  (void *)(table_data->data + header), "NFIT",
-                 sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL);
-    g_array_free(structures, true);
+                 sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL);
 }
 
 struct NvdimmDsmIn {
@@ -771,6 +790,8 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
     acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
     fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
                     state->dsm_mem->len);
+
+    nvdimm_init_fit_buffer(&state->fit_buf);
 }
 
 #define NVDIMM_COMMON_DSM       "NCAL"
@@ -1045,25 +1066,17 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
 }
 
 void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
-                       BIOSLinker *linker, GArray *dsm_dma_arrea,
+                       BIOSLinker *linker, AcpiNVDIMMState *state,
                        uint32_t ram_slots)
 {
-    GSList *device_list;
-
-    device_list = nvdimm_get_plugged_device_list();
-
-    /* NVDIMM device is plugged. */
-    if (device_list) {
-        nvdimm_build_nfit(device_list, table_offsets, table_data, linker);
-        g_slist_free(device_list);
-    }
+    nvdimm_build_nfit(state, table_offsets, table_data, linker);
 
     /*
      * NVDIMM device is allowed to be plugged only if there is available
      * slot.
      */
     if (ram_slots) {
-        nvdimm_build_ssdt(table_offsets, table_data, linker, dsm_dma_arrea,
+        nvdimm_build_ssdt(table_offsets, table_data, linker, state->dsm_mem,
                           ram_slots);
     }
 }
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 6ae4769..bc49958 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -2767,7 +2767,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
     }
     if (pcms->acpi_nvdimm_state.is_enabled) {
         nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
-                          pcms->acpi_nvdimm_state.dsm_mem, machine->ram_slots);
+                          &pcms->acpi_nvdimm_state, machine->ram_slots);
     }
 
     /* Add tables supplied by user (if any) */
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 93ff49c..77ca7f4 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1700,6 +1700,10 @@ static void pc_dimm_plug(HotplugHandler *hotplug_dev,
         goto out;
     }
 
+    if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+        nvdimm_acpi_hotplug(&pcms->acpi_nvdimm_state);
+    }
+
     hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
     hhc->plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &error_abort);
 out:
diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h
index 63a2b20..232437c 100644
--- a/include/hw/mem/nvdimm.h
+++ b/include/hw/mem/nvdimm.h
@@ -98,12 +98,30 @@ typedef struct NVDIMMClass NVDIMMClass;
 #define NVDIMM_ACPI_IO_BASE     0x0a18
 #define NVDIMM_ACPI_IO_LEN      4
 
+/*
+ * The buffer, @fit, saves the FIT info for all the presented NVDIMM
+ * devices which is updated after the NVDIMM device is plugged or
+ * unplugged.
+ *
+ * Mark @dirty whenever the buffer is updated so that it preserves NVDIMM
+ * ACPI _FIT method to read incomplete or obsolete fit info if fit update
+ * happens during multiple RFIT calls.
+ */
+struct NvdimmFitBuffer {
+    GArray *fit;
+    bool dirty;
+};
+typedef struct NvdimmFitBuffer NvdimmFitBuffer;
+
 struct AcpiNVDIMMState {
     /* detect if NVDIMM support is enabled. */
     bool is_enabled;
 
     /* the data of the fw_cfg file NVDIMM_DSM_MEM_FILE. */
     GArray *dsm_mem;
+
+    NvdimmFitBuffer fit_buf;
+
     /* the IO region used by OSPM to transfer control to QEMU. */
     MemoryRegion io_mr;
 };
@@ -112,6 +130,7 @@ typedef struct AcpiNVDIMMState AcpiNVDIMMState;
 void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
                             FWCfgState *fw_cfg, Object *owner);
 void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
-                       BIOSLinker *linker, GArray *dsm_dma_arrea,
+                       BIOSLinker *linker, AcpiNVDIMMState *state,
                        uint32_t ram_slots);
+void nvdimm_acpi_hotplug(AcpiNVDIMMState *state);
 #endif
-- 
1.8.3.1


WARNING: multiple messages have this Message-ID (diff)
From: Xiao Guangrong <guangrong.xiao@linux.intel.com>
To: pbonzini@redhat.com, imammedo@redhat.com
Cc: gleb@kernel.org, mtosatti@redhat.com, stefanha@redhat.com,
	mst@redhat.com, rth@twiddle.net, ehabkost@redhat.com,
	dan.j.williams@intel.com, kvm@vger.kernel.org,
	qemu-devel@nongnu.org,
	Xiao Guangrong <guangrong.xiao@linux.intel.com>
Subject: [Qemu-devel] [PATCH v4 1/3] nvdimm acpi: introduce fit buffer
Date: Thu,  3 Nov 2016 11:51:28 +0800	[thread overview]
Message-ID: <1478145090-11987-2-git-send-email-guangrong.xiao@linux.intel.com> (raw)
In-Reply-To: <1478145090-11987-1-git-send-email-guangrong.xiao@linux.intel.com>

The buffer is used to save the FIT info for all the presented nvdimm
devices which is updated after the nvdimm device is plugged or
unplugged. In the later patch, it will be used to construct NVDIMM
ACPI _FIT method which reflects the presented nvdimm devices after
nvdimm hotplug

As FIT buffer can not completely mapped into guest address space,
OSPM will exit to QEMU multiple times, however, there is the race
condition - FIT may be changed during these multiple exits, so that
we mark @dirty whenever the buffer is updated.

@dirty is cleared for the first time OSPM gets fit buffer, if
dirty is detected in the later access, OSPM will restart the
access

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 hw/acpi/nvdimm.c        | 57 ++++++++++++++++++++++++++++++-------------------
 hw/i386/acpi-build.c    |  2 +-
 hw/i386/pc.c            |  4 ++++
 include/hw/mem/nvdimm.h | 21 +++++++++++++++++-
 4 files changed, 60 insertions(+), 24 deletions(-)

diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
index b8a2e62..9fee077 100644
--- a/hw/acpi/nvdimm.c
+++ b/hw/acpi/nvdimm.c
@@ -38,11 +38,7 @@ static int nvdimm_plugged_device_list(Object *obj, void *opaque)
     GSList **list = opaque;
 
     if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
-        DeviceState *dev = DEVICE(obj);
-
-        if (dev->realized) { /* only realized NVDIMMs matter */
-            *list = g_slist_append(*list, DEVICE(obj));
-        }
+        *list = g_slist_append(*list, DEVICE(obj));
     }
 
     object_child_foreach(obj, nvdimm_plugged_device_list, opaque);
@@ -348,8 +344,9 @@ static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
                                          (DSM) in DSM Spec Rev1.*/);
 }
 
-static GArray *nvdimm_build_device_structure(GSList *device_list)
+static GArray *nvdimm_build_device_structure(void)
 {
+    GSList *device_list = nvdimm_get_plugged_device_list();
     GArray *structures = g_array_new(false, true /* clear */, 1);
 
     for (; device_list; device_list = device_list->next) {
@@ -367,28 +364,50 @@ static GArray *nvdimm_build_device_structure(GSList *device_list)
         /* build NVDIMM Control Region Structure. */
         nvdimm_build_structure_dcr(structures, dev);
     }
+    g_slist_free(device_list);
 
     return structures;
 }
 
-static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
+static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+    fit_buf->fit = g_array_new(false, true /* clear */, 1);
+}
+
+static void nvdimm_build_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+    g_array_free(fit_buf->fit, true);
+    fit_buf->fit = nvdimm_build_device_structure();
+    fit_buf->dirty = true;
+}
+
+void nvdimm_acpi_hotplug(AcpiNVDIMMState *state)
+{
+    nvdimm_build_fit_buffer(&state->fit_buf);
+}
+
+static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets,
                               GArray *table_data, BIOSLinker *linker)
 {
-    GArray *structures = nvdimm_build_device_structure(device_list);
+    NvdimmFitBuffer *fit_buf = &state->fit_buf;
     unsigned int header;
 
+    /* NVDIMM device is not plugged? */
+    if (!fit_buf->fit->len) {
+        return;
+    }
+
     acpi_add_table(table_offsets, table_data);
 
     /* NFIT header. */
     header = table_data->len;
     acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
     /* NVDIMM device structures. */
-    g_array_append_vals(table_data, structures->data, structures->len);
+    g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
 
     build_header(linker, table_data,
                  (void *)(table_data->data + header), "NFIT",
-                 sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL);
-    g_array_free(structures, true);
+                 sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL);
 }
 
 struct NvdimmDsmIn {
@@ -771,6 +790,8 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
     acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
     fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
                     state->dsm_mem->len);
+
+    nvdimm_init_fit_buffer(&state->fit_buf);
 }
 
 #define NVDIMM_COMMON_DSM       "NCAL"
@@ -1045,25 +1066,17 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
 }
 
 void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
-                       BIOSLinker *linker, GArray *dsm_dma_arrea,
+                       BIOSLinker *linker, AcpiNVDIMMState *state,
                        uint32_t ram_slots)
 {
-    GSList *device_list;
-
-    device_list = nvdimm_get_plugged_device_list();
-
-    /* NVDIMM device is plugged. */
-    if (device_list) {
-        nvdimm_build_nfit(device_list, table_offsets, table_data, linker);
-        g_slist_free(device_list);
-    }
+    nvdimm_build_nfit(state, table_offsets, table_data, linker);
 
     /*
      * NVDIMM device is allowed to be plugged only if there is available
      * slot.
      */
     if (ram_slots) {
-        nvdimm_build_ssdt(table_offsets, table_data, linker, dsm_dma_arrea,
+        nvdimm_build_ssdt(table_offsets, table_data, linker, state->dsm_mem,
                           ram_slots);
     }
 }
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 6ae4769..bc49958 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -2767,7 +2767,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
     }
     if (pcms->acpi_nvdimm_state.is_enabled) {
         nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
-                          pcms->acpi_nvdimm_state.dsm_mem, machine->ram_slots);
+                          &pcms->acpi_nvdimm_state, machine->ram_slots);
     }
 
     /* Add tables supplied by user (if any) */
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 93ff49c..77ca7f4 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1700,6 +1700,10 @@ static void pc_dimm_plug(HotplugHandler *hotplug_dev,
         goto out;
     }
 
+    if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+        nvdimm_acpi_hotplug(&pcms->acpi_nvdimm_state);
+    }
+
     hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
     hhc->plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &error_abort);
 out:
diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h
index 63a2b20..232437c 100644
--- a/include/hw/mem/nvdimm.h
+++ b/include/hw/mem/nvdimm.h
@@ -98,12 +98,30 @@ typedef struct NVDIMMClass NVDIMMClass;
 #define NVDIMM_ACPI_IO_BASE     0x0a18
 #define NVDIMM_ACPI_IO_LEN      4
 
+/*
+ * The buffer, @fit, saves the FIT info for all the presented NVDIMM
+ * devices which is updated after the NVDIMM device is plugged or
+ * unplugged.
+ *
+ * Mark @dirty whenever the buffer is updated so that it preserves NVDIMM
+ * ACPI _FIT method to read incomplete or obsolete fit info if fit update
+ * happens during multiple RFIT calls.
+ */
+struct NvdimmFitBuffer {
+    GArray *fit;
+    bool dirty;
+};
+typedef struct NvdimmFitBuffer NvdimmFitBuffer;
+
 struct AcpiNVDIMMState {
     /* detect if NVDIMM support is enabled. */
     bool is_enabled;
 
     /* the data of the fw_cfg file NVDIMM_DSM_MEM_FILE. */
     GArray *dsm_mem;
+
+    NvdimmFitBuffer fit_buf;
+
     /* the IO region used by OSPM to transfer control to QEMU. */
     MemoryRegion io_mr;
 };
@@ -112,6 +130,7 @@ typedef struct AcpiNVDIMMState AcpiNVDIMMState;
 void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
                             FWCfgState *fw_cfg, Object *owner);
 void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
-                       BIOSLinker *linker, GArray *dsm_dma_arrea,
+                       BIOSLinker *linker, AcpiNVDIMMState *state,
                        uint32_t ram_slots);
+void nvdimm_acpi_hotplug(AcpiNVDIMMState *state);
 #endif
-- 
1.8.3.1

  reply	other threads:[~2016-11-03  4:06 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-03  3:51 [PATCH v4 0/3] nvdimm: hotplug support Xiao Guangrong
2016-11-03  3:51 ` [Qemu-devel] " Xiao Guangrong
2016-11-03  3:51 ` Xiao Guangrong [this message]
2016-11-03  3:51   ` [Qemu-devel] [PATCH v4 1/3] nvdimm acpi: introduce fit buffer Xiao Guangrong
2016-11-03 10:00   ` Stefan Hajnoczi
2016-11-03 10:00     ` [Qemu-devel] " Stefan Hajnoczi
2016-11-03  9:58     ` Xiao Guangrong
2016-11-03  9:58       ` [Qemu-devel] " Xiao Guangrong
2016-11-03 11:02   ` Igor Mammedov
2016-11-03 11:02     ` [Qemu-devel] " Igor Mammedov
2016-11-03 11:09     ` Xiao Guangrong
2016-11-03 11:09       ` [Qemu-devel] " Xiao Guangrong
2016-11-03 12:29       ` Igor Mammedov
2016-11-03 12:29         ` [Qemu-devel] " Igor Mammedov
2016-11-03  3:51 ` [PATCH v4 2/3] nvdimm acpi: introduce _FIT Xiao Guangrong
2016-11-03  3:51   ` [Qemu-devel] " Xiao Guangrong
2016-11-03  9:53   ` Stefan Hajnoczi
2016-11-03  9:53     ` [Qemu-devel] " Stefan Hajnoczi
2016-11-03 10:08     ` Xiao Guangrong
2016-11-03 10:08       ` [Qemu-devel] " Xiao Guangrong
2016-11-03 12:30       ` Igor Mammedov
2016-11-03 12:30         ` [Qemu-devel] " Igor Mammedov
2016-11-03 11:58   ` Igor Mammedov
2016-11-03 11:58     ` [Qemu-devel] " Igor Mammedov
2016-11-03 12:21     ` Xiao Guangrong
2016-11-03 12:21       ` [Qemu-devel] " Xiao Guangrong
2016-11-03 13:00       ` Igor Mammedov
2016-11-03 13:00         ` [Qemu-devel] " Igor Mammedov
2016-11-03 13:02         ` Xiao Guangrong
2016-11-03 13:02           ` [Qemu-devel] " Xiao Guangrong
2016-11-03 14:49           ` Igor Mammedov
2016-11-03 14:49             ` [Qemu-devel] " Igor Mammedov
2016-11-03 14:53             ` Xiao Guangrong
2016-11-03 14:53               ` [Qemu-devel] " Xiao Guangrong
2016-11-03 16:13               ` Igor Mammedov
2016-11-03 16:13                 ` [Qemu-devel] " Igor Mammedov
2016-11-03 16:17                 ` Xiao Guangrong
2016-11-03 16:17                   ` [Qemu-devel] " Xiao Guangrong
2016-11-03 16:49                   ` Igor Mammedov
2016-11-03 16:49                     ` [Qemu-devel] " Igor Mammedov
2016-11-03 16:53                     ` Xiao Guangrong
2016-11-03 16:53                       ` [Qemu-devel] " Xiao Guangrong
2016-11-03 17:29                       ` Igor Mammedov
2016-11-03 17:29                         ` [Qemu-devel] " Igor Mammedov
2016-11-03 17:39                         ` Xiao Guangrong
2016-11-03 17:39                           ` [Qemu-devel] " Xiao Guangrong
2016-11-03 17:54                           ` Igor Mammedov
2016-11-03 17:54                             ` [Qemu-devel] " Igor Mammedov
2016-11-03  3:51 ` [PATCH v4 3/3] pc: memhp: enable nvdimm device hotplug Xiao Guangrong
2016-11-03  3:51   ` [Qemu-devel] " Xiao Guangrong
2016-11-03 12:51   ` Igor Mammedov
2016-11-03 12:51     ` [Qemu-devel] " Igor Mammedov
2016-11-03 12:54     ` Xiao Guangrong
2016-11-03 12:54       ` [Qemu-devel] " Xiao Guangrong
2016-11-03  4:14 ` [PATCH v4 0/3] nvdimm: hotplug support Michael S. Tsirkin
2016-11-03  4:14   ` [Qemu-devel] " Michael S. Tsirkin
2016-11-03  4:25   ` Xiao Guangrong
2016-11-03  4:25     ` [Qemu-devel] " Xiao Guangrong
2016-11-03  4:51     ` Michael S. Tsirkin
2016-11-03  4:51       ` [Qemu-devel] " Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1478145090-11987-2-git-send-email-guangrong.xiao@linux.intel.com \
    --to=guangrong.xiao@linux.intel.com \
    --cc=dan.j.williams@intel.com \
    --cc=ehabkost@redhat.com \
    --cc=gleb@kernel.org \
    --cc=imammedo@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.