From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:46614) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1bRiUz-0002Z6-6K for qemu-devel@nongnu.org; Mon, 25 Jul 2016 12:12:58 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1bRiUx-0001CF-La for qemu-devel@nongnu.org; Mon, 25 Jul 2016 12:12:57 -0400 Received: from mail-vk0-x22d.google.com ([2607:f8b0:400c:c05::22d]:36150) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1bRiUx-0001Bz-7F for qemu-devel@nongnu.org; Mon, 25 Jul 2016 12:12:55 -0400 Received: by mail-vk0-x22d.google.com with SMTP id n129so204642338vke.3 for ; Mon, 25 Jul 2016 09:12:55 -0700 (PDT) MIME-Version: 1.0 In-Reply-To: <1468975744-12587-3-git-send-email-kwangwoo.lee@sk.com> References: <1468975744-12587-1-git-send-email-kwangwoo.lee@sk.com> <1468975744-12587-3-git-send-email-kwangwoo.lee@sk.com> From: Peter Maydell Date: Mon, 25 Jul 2016 17:12:35 +0100 Message-ID: Content-Type: text/plain; charset=UTF-8 Subject: Re: [Qemu-devel] [RFC PATCH 2/3] nvdimm: use configurable ACPI IO base and size List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Kwangwoo Lee Cc: Xiao Guangrong , "Michael S. Tsirkin" , Igor Mammedov , Paolo Bonzini , Richard Henderson , Eduardo Habkost , Shannon Zhao , Shannon Zhao , QEMU Developers , qemu-arm , Woosuk Chung , Hyunchul Kim On 20 July 2016 at 01:49, Kwangwoo Lee wrote: > This patch uses configurable IO base and size to create NPIO AML for > ACPI NFIT. Since a different architecture like AArch64 does not use > port-mapped IO, a configurable IO base is required to create correct > mapping of ACPI IO address and size. > > Signed-off-by: Kwangwoo Lee > --- > hw/acpi/nvdimm.c | 23 +++++++++++++++-------- > hw/i386/acpi-build.c | 2 +- > hw/i386/pc_piix.c | 8 +++++++- > hw/i386/pc_q35.c | 8 +++++++- > include/hw/mem/nvdimm.h | 17 ++++++++++++++++- > 5 files changed, 46 insertions(+), 12 deletions(-) > > diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c > index e486128..57e03ee 100644 > --- a/hw/acpi/nvdimm.c > +++ b/hw/acpi/nvdimm.c > @@ -765,8 +765,8 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io, > FWCfgState *fw_cfg, Object *owner) > { > memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state, > - "nvdimm-acpi-io", NVDIMM_ACPI_IO_LEN); > - memory_region_add_subregion(io, NVDIMM_ACPI_IO_BASE, &state->io_mr); > + "nvdimm-acpi-io", state->dsm_io.size); > + memory_region_add_subregion(io, state->dsm_io.base, &state->io_mr); > > state->dsm_mem = g_array_new(false, true /* clear */, 1); > acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn)); Why does this function take a MemoryRegion to insert itself into, rather than returning a MemoryRegion for the caller to map into wherever is appropriate, or even being a DeviceState which has mappable memory regions via the sysbus API ? I guess the answer is "that's the way it happens to be at the moment", so I'm not really asking for a change here necessarily. > @@ -912,9 +912,10 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev) > > static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, > GArray *table_data, BIOSLinker *linker, > - GArray *dsm_dma_arrea) > + AcpiNVDIMMState *acpi_nvdimm_state) > { > Aml *ssdt, *sb_scope, *dev, *field; > + AmlRegionSpace rs; > int mem_addr_offset, nvdimm_ssdt; > > acpi_add_table(table_offsets, table_data); > @@ -940,8 +941,14 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, > aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012"))); > > /* map DSM memory and IO into ACPI namespace. */ > - aml_append(dev, aml_operation_region("NPIO", AML_SYSTEM_IO, > - aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN)); > + if (acpi_nvdimm_state->dsm_io.type == NVDIMM_ACPI_IO_PORT) { > + rs = AML_SYSTEM_IO; > + } else { > + rs = AML_SYSTEM_MEMORY; > + } > + aml_append(dev, aml_operation_region("NPIO", rs, > + aml_int(acpi_nvdimm_state->dsm_io.base), > + acpi_nvdimm_state->dsm_io.size)); > aml_append(dev, aml_operation_region("NRAM", AML_SYSTEM_MEMORY, > aml_name(NVDIMM_ACPI_MEM_ADDR), sizeof(NvdimmDsmIn))); > > @@ -1014,7 +1021,7 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, > NVDIMM_ACPI_MEM_ADDR); > > bios_linker_loader_alloc(linker, > - NVDIMM_DSM_MEM_FILE, dsm_dma_arrea, > + NVDIMM_DSM_MEM_FILE, acpi_nvdimm_state->dsm_mem, > sizeof(NvdimmDsmIn), false /* high memory */); > bios_linker_loader_add_pointer(linker, > ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t), > @@ -1026,7 +1033,7 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, > } > > void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, > - BIOSLinker *linker, GArray *dsm_dma_arrea) > + BIOSLinker *linker, AcpiNVDIMMState *acpi_nvdimm_state) > { > GSList *device_list; > > @@ -1037,6 +1044,6 @@ void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, > } > nvdimm_build_nfit(device_list, table_offsets, table_data, linker); > nvdimm_build_ssdt(device_list, table_offsets, table_data, linker, > - dsm_dma_arrea); > + acpi_nvdimm_state); > g_slist_free(device_list); > } > diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c > index fbba461..54b09a9 100644 > --- a/hw/i386/acpi-build.c > +++ b/hw/i386/acpi-build.c > @@ -2632,7 +2632,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) > } > if (pcms->acpi_nvdimm_state.is_enabled) { > nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, > - pcms->acpi_nvdimm_state.dsm_mem); > + &pcms->acpi_nvdimm_state); > } > > /* Add tables supplied by user (if any) */ > diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c > index a07dc81..b624f59 100644 > --- a/hw/i386/pc_piix.c > +++ b/hw/i386/pc_piix.c > @@ -298,7 +298,13 @@ static void pc_init1(MachineState *machine, > } > > if (pcms->acpi_nvdimm_state.is_enabled) { > - nvdimm_init_acpi_state(&pcms->acpi_nvdimm_state, system_io, > + AcpiNVDIMMState *acpi_nvdimm_state = &pcms->acpi_nvdimm_state; > + > + acpi_nvdimm_state->dsm_io.type = NVDIMM_ACPI_IO_PORT; > + acpi_nvdimm_state->dsm_io.base = NVDIMM_ACPI_IO_BASE; > + acpi_nvdimm_state->dsm_io.size = NVDIMM_ACPI_IO_LEN; > + > + nvdimm_init_acpi_state(acpi_nvdimm_state, system_io, > pcms->fw_cfg, OBJECT(pcms)); > } > } > diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c > index c0b9961..779ac32 100644 > --- a/hw/i386/pc_q35.c > +++ b/hw/i386/pc_q35.c > @@ -263,7 +263,13 @@ static void pc_q35_init(MachineState *machine) > } > > if (pcms->acpi_nvdimm_state.is_enabled) { > - nvdimm_init_acpi_state(&pcms->acpi_nvdimm_state, system_io, > + AcpiNVDIMMState *acpi_nvdimm_state = &pcms->acpi_nvdimm_state; > + > + acpi_nvdimm_state->dsm_io.type = NVDIMM_ACPI_IO_PORT; > + acpi_nvdimm_state->dsm_io.base = NVDIMM_ACPI_IO_BASE; > + acpi_nvdimm_state->dsm_io.size = NVDIMM_ACPI_IO_LEN; > + > + nvdimm_init_acpi_state(acpi_nvdimm_state, system_io, > pcms->fw_cfg, OBJECT(pcms)); > } > } Ideally this would be a QOM object with QOM properties, rather than an ad-hoc init function. thanks -- PMM