From mboxrd@z Thu Jan 1 00:00:00 1970 From: Xiao Guangrong Subject: [PATCH v2 2/8] nvdimm acpi: prebuild nvdimm devices for available slots Date: Fri, 12 Aug 2016 14:54:04 +0800 Message-ID: <1470984850-66891-3-git-send-email-guangrong.xiao@linux.intel.com> References: <1470984850-66891-1-git-send-email-guangrong.xiao@linux.intel.com> Cc: gleb@kernel.org, mtosatti@redhat.com, stefanha@redhat.com, mst@redhat.com, rth@twiddle.net, ehabkost@redhat.com, dan.j.williams@intel.com, kvm@vger.kernel.org, qemu-devel@nongnu.org, Xiao Guangrong To: pbonzini@redhat.com, imammedo@redhat.com Return-path: Received: from mga14.intel.com ([192.55.52.115]:33541 "EHLO mga14.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750878AbcHLHL2 (ORCPT ); Fri, 12 Aug 2016 03:11:28 -0400 In-Reply-To: <1470984850-66891-1-git-send-email-guangrong.xiao@linux.intel.com> Sender: kvm-owner@vger.kernel.org List-ID: For each NVDIMM present or intended to be supported by platform, platform firmware also exposes an ACPI Namespace Device under the root device So it builds nvdimm devices for all slots to support vNVDIMM hotplug Signed-off-by: Xiao Guangrong --- hw/acpi/nvdimm.c | 41 ++++++++++++++++++++++++----------------- hw/i386/acpi-build.c | 2 +- include/hw/mem/nvdimm.h | 3 ++- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index 5454c0f..0e2b9f0 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -886,12 +886,11 @@ static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle) aml_append(dev, method); } -static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev) +static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots) { - for (; device_list; device_list = device_list->next) { - DeviceState *dev = device_list->data; - int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, - NULL); + uint32_t slot; + + for (slot = 0; slot < ram_slots; slot++) { uint32_t handle = nvdimm_slot_to_handle(slot); Aml *nvdimm_dev; @@ -912,9 +911,9 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev) } } -static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, - GArray *table_data, BIOSLinker *linker, - GArray *dsm_dma_arrea) +static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, + BIOSLinker *linker, GArray *dsm_dma_arrea, + uint32_t ram_slots) { Aml *ssdt, *sb_scope, *dev, *field; int mem_addr_offset, nvdimm_ssdt; @@ -1003,7 +1002,7 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, /* 0 is reserved for root device. */ nvdimm_build_device_dsm(dev, 0); - nvdimm_build_nvdimm_devices(device_list, dev); + nvdimm_build_nvdimm_devices(dev, ram_slots); aml_append(sb_scope, dev); aml_append(ssdt, sb_scope); @@ -1028,17 +1027,25 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, } void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, - BIOSLinker *linker, GArray *dsm_dma_arrea) + BIOSLinker *linker, GArray *dsm_dma_arrea, + uint32_t ram_slots) { GSList *device_list; - /* no NVDIMM device is plugged. */ device_list = nvdimm_get_plugged_device_list(); - if (!device_list) { - return; + + /* NVDIMM device is plugged. */ + if (device_list) { + nvdimm_build_nfit(device_list, table_offsets, table_data, linker); + g_slist_free(device_list); + } + + /* + * NVDIMM device is allowed to be plugged only if there has available + * slot. + */ + if (ram_slots) { + nvdimm_build_ssdt(table_offsets, table_data, linker, dsm_dma_arrea, + ram_slots); } - nvdimm_build_nfit(device_list, table_offsets, table_data, linker); - nvdimm_build_ssdt(device_list, table_offsets, table_data, linker, - dsm_dma_arrea); - g_slist_free(device_list); } diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index a26a4bb..b1d0ced 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -2712,7 +2712,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) } if (pcms->acpi_nvdimm_state.is_enabled) { nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, - pcms->acpi_nvdimm_state.dsm_mem); + pcms->acpi_nvdimm_state.dsm_mem, machine->ram_slots); } /* Add tables supplied by user (if any) */ diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h index 1cfe9e0..63a2b20 100644 --- a/include/hw/mem/nvdimm.h +++ b/include/hw/mem/nvdimm.h @@ -112,5 +112,6 @@ typedef struct AcpiNVDIMMState AcpiNVDIMMState; void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io, FWCfgState *fw_cfg, Object *owner); void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, - BIOSLinker *linker, GArray *dsm_dma_arrea); + BIOSLinker *linker, GArray *dsm_dma_arrea, + uint32_t ram_slots); #endif -- 1.8.3.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:33021) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1bYDKb-0004Yn-Qv for qemu-devel@nongnu.org; Fri, 12 Aug 2016 10:21:16 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1bYDKR-0001iR-5R for qemu-devel@nongnu.org; Fri, 12 Aug 2016 10:21:05 -0400 Received: from mga04.intel.com ([192.55.52.120]:16173) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1bYDKQ-0001hn-Ph for qemu-devel@nongnu.org; Fri, 12 Aug 2016 10:20:55 -0400 From: Xiao Guangrong Date: Fri, 12 Aug 2016 14:54:04 +0800 Message-Id: <1470984850-66891-3-git-send-email-guangrong.xiao@linux.intel.com> In-Reply-To: <1470984850-66891-1-git-send-email-guangrong.xiao@linux.intel.com> References: <1470984850-66891-1-git-send-email-guangrong.xiao@linux.intel.com> Subject: [Qemu-devel] [PATCH v2 2/8] nvdimm acpi: prebuild nvdimm devices for available slots List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: pbonzini@redhat.com, imammedo@redhat.com Cc: gleb@kernel.org, mtosatti@redhat.com, stefanha@redhat.com, mst@redhat.com, rth@twiddle.net, ehabkost@redhat.com, dan.j.williams@intel.com, kvm@vger.kernel.org, qemu-devel@nongnu.org, Xiao Guangrong For each NVDIMM present or intended to be supported by platform, platform firmware also exposes an ACPI Namespace Device under the root device So it builds nvdimm devices for all slots to support vNVDIMM hotplug Signed-off-by: Xiao Guangrong --- hw/acpi/nvdimm.c | 41 ++++++++++++++++++++++++----------------- hw/i386/acpi-build.c | 2 +- include/hw/mem/nvdimm.h | 3 ++- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index 5454c0f..0e2b9f0 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -886,12 +886,11 @@ static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle) aml_append(dev, method); } -static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev) +static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots) { - for (; device_list; device_list = device_list->next) { - DeviceState *dev = device_list->data; - int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, - NULL); + uint32_t slot; + + for (slot = 0; slot < ram_slots; slot++) { uint32_t handle = nvdimm_slot_to_handle(slot); Aml *nvdimm_dev; @@ -912,9 +911,9 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev) } } -static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, - GArray *table_data, BIOSLinker *linker, - GArray *dsm_dma_arrea) +static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, + BIOSLinker *linker, GArray *dsm_dma_arrea, + uint32_t ram_slots) { Aml *ssdt, *sb_scope, *dev, *field; int mem_addr_offset, nvdimm_ssdt; @@ -1003,7 +1002,7 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, /* 0 is reserved for root device. */ nvdimm_build_device_dsm(dev, 0); - nvdimm_build_nvdimm_devices(device_list, dev); + nvdimm_build_nvdimm_devices(dev, ram_slots); aml_append(sb_scope, dev); aml_append(ssdt, sb_scope); @@ -1028,17 +1027,25 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, } void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, - BIOSLinker *linker, GArray *dsm_dma_arrea) + BIOSLinker *linker, GArray *dsm_dma_arrea, + uint32_t ram_slots) { GSList *device_list; - /* no NVDIMM device is plugged. */ device_list = nvdimm_get_plugged_device_list(); - if (!device_list) { - return; + + /* NVDIMM device is plugged. */ + if (device_list) { + nvdimm_build_nfit(device_list, table_offsets, table_data, linker); + g_slist_free(device_list); + } + + /* + * NVDIMM device is allowed to be plugged only if there has available + * slot. + */ + if (ram_slots) { + nvdimm_build_ssdt(table_offsets, table_data, linker, dsm_dma_arrea, + ram_slots); } - nvdimm_build_nfit(device_list, table_offsets, table_data, linker); - nvdimm_build_ssdt(device_list, table_offsets, table_data, linker, - dsm_dma_arrea); - g_slist_free(device_list); } diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index a26a4bb..b1d0ced 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -2712,7 +2712,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) } if (pcms->acpi_nvdimm_state.is_enabled) { nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, - pcms->acpi_nvdimm_state.dsm_mem); + pcms->acpi_nvdimm_state.dsm_mem, machine->ram_slots); } /* Add tables supplied by user (if any) */ diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h index 1cfe9e0..63a2b20 100644 --- a/include/hw/mem/nvdimm.h +++ b/include/hw/mem/nvdimm.h @@ -112,5 +112,6 @@ typedef struct AcpiNVDIMMState AcpiNVDIMMState; void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io, FWCfgState *fw_cfg, Object *owner); void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, - BIOSLinker *linker, GArray *dsm_dma_arrea); + BIOSLinker *linker, GArray *dsm_dma_arrea, + uint32_t ram_slots); #endif -- 1.8.3.1