qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Salil Mehta via <qemu-devel@nongnu.org>
To: <qemu-devel@nongnu.org>, <qemu-arm@nongnu.org>
Cc: <salil.mehta@huawei.com>, <maz@kernel.org>,
	<jean-philippe@linaro.org>, <jonathan.cameron@huawei.com>,
	<lpieralisi@kernel.org>, <peter.maydell@linaro.org>,
	<richard.henderson@linaro.org>, <imammedo@redhat.com>,
	<andrew.jones@linux.dev>, <david@redhat.com>, <philmd@linaro.org>,
	<eric.auger@redhat.com>, <will@kernel.org>, <ardb@kernel.org>,
	<oliver.upton@linux.dev>, <pbonzini@redhat.com>, <mst@redhat.com>,
	<gshan@redhat.com>, <rafael@kernel.org>,
	<borntraeger@linux.ibm.com>, <alex.bennee@linaro.org>,
	<linux@armlinux.org.uk>, <darren@os.amperecomputing.com>,
	<ilkka@os.amperecomputing.com>, <vishnu@os.amperecomputing.com>,
	<karl.heubaum@oracle.com>, <miguel.luis@oracle.com>,
	<salil.mehta@opnsrc.net>, <zhukeqian1@huawei.com>,
	<wangxiongfeng2@huawei.com>, <wangyanan55@huawei.com>,
	<jiakernel2@gmail.com>, <maobibo@loongson.cn>,
	<lixianglai@loongson.cn>
Subject: [PATCH RFC V2 04/37] arm/virt, target/arm: Machine init time change common to vCPU {cold|hot}-plug
Date: Tue, 26 Sep 2023 11:04:03 +0100	[thread overview]
Message-ID: <20230926100436.28284-5-salil.mehta@huawei.com> (raw)
In-Reply-To: <20230926100436.28284-1-salil.mehta@huawei.com>

Refactor and introduce the common logic required during the initialization of
both cold and hot plugged vCPUs. Also initialize the *disabled* state of the
vCPUs which shall be used further during init phases of various other components
like GIC, PMU, ACPI etc as part of the virt machine initialization.

KVM vCPUs corresponding to unplugged/yet-to-be-plugged QOM CPUs are kept in
powered-off state in the KVM Host and do not run the guest code. Plugged vCPUs
are also kept in powered-off state but vCPU threads exist and is kept sleeping.

TBD:
For the cold booted vCPUs, this change also exists in the arm_load_kernel()
in boot.c but for the hotplugged CPUs this change should still remain part of
the pre-plug phase. We are duplicating the powering-off of the cold booted CPUs.
Shall we remove the duplicate change from boot.c?

Co-developed-by: Salil Mehta <salil.mehta@huawei.com>
Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
Co-developed-by: Keqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
Reported-by: Gavin Shan <gavin.shan@redhat.com>
[GS: pointed the assertion due to wrong range check]
Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
---
 hw/arm/virt.c      | 149 ++++++++++++++++++++++++++++++++++++++++-----
 target/arm/cpu.c   |   7 +++
 target/arm/cpu64.c |  14 +++++
 3 files changed, 156 insertions(+), 14 deletions(-)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 0eb6bf5a18..3668ad27ec 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -221,6 +221,7 @@ static const char *valid_cpus[] = {
     ARM_CPU_TYPE_NAME("max"),
 };
 
+static CPUArchId *virt_find_cpu_slot(MachineState *ms, int vcpuid);
 static int virt_get_socket_id(const MachineState *ms, int cpu_index);
 static int virt_get_cluster_id(const MachineState *ms, int cpu_index);
 static int virt_get_core_id(const MachineState *ms, int cpu_index);
@@ -2154,6 +2155,14 @@ static void machvirt_init(MachineState *machine)
         exit(1);
     }
 
+    finalize_gic_version(vms);
+    if (tcg_enabled() || hvf_enabled() || qtest_enabled() ||
+        (vms->gic_version < VIRT_GIC_VERSION_3)) {
+        machine->smp.max_cpus = smp_cpus;
+        mc->has_hotpluggable_cpus = false;
+        warn_report("cpu hotplug feature has been disabled");
+    }
+
     possible_cpus = mc->possible_cpu_arch_ids(machine);
 
     /*
@@ -2180,11 +2189,6 @@ static void machvirt_init(MachineState *machine)
         virt_set_memmap(vms, pa_bits);
     }
 
-    /* We can probe only here because during property set
-     * KVM is not available yet
-     */
-    finalize_gic_version(vms);
-
     sysmem = vms->sysmem = get_system_memory();
 
     if (vms->secure) {
@@ -2289,17 +2293,9 @@ static void machvirt_init(MachineState *machine)
     assert(possible_cpus->len == max_cpus);
     for (n = 0; n < possible_cpus->len; n++) {
         Object *cpuobj;
-        CPUState *cs;
-
-        if (n >= smp_cpus) {
-            break;
-        }
 
         cpuobj = object_new(possible_cpus->cpus[n].type);
 
-        cs = CPU(cpuobj);
-        cs->cpu_index = n;
-
         aarch64 &= object_property_get_bool(cpuobj, "aarch64", NULL);
         object_property_set_int(cpuobj, "socket-id",
                                 virt_get_socket_id(machine, n), NULL);
@@ -2804,6 +2800,50 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
     return ms->possible_cpus;
 }
 
+static CPUArchId *virt_find_cpu_slot(MachineState *ms, int vcpuid)
+{
+    VirtMachineState *vms = VIRT_MACHINE(ms);
+    CPUArchId *found_cpu;
+    uint64_t mp_affinity;
+
+    assert(vcpuid >= 0 && vcpuid < ms->possible_cpus->len);
+
+    /*
+     * RFC: Question:
+     * TBD: Should mp-affinity be treated as MPIDR?
+     */
+    mp_affinity = virt_cpu_mp_affinity(vms, vcpuid);
+    found_cpu = &ms->possible_cpus->cpus[vcpuid];
+
+    assert(found_cpu->arch_id == mp_affinity);
+
+    /*
+     * RFC: Question:
+     * Slot-id is the index where vCPU with certain arch-id(=mpidr/ap-affinity)
+     * is plugged. For Host KVM, MPIDR for vCPU is derived using vcpu-id.
+     * As I understand, MPIDR and vcpu-id are property of vCPU but slot-id is
+     * more related to machine? Current code assumes slot-id and vcpu-id are
+     * same i.e. meaning of slot is bit vague.
+     *
+     * Q1: Is there any requirement to clearly represent slot and dissociate it
+     *     from vcpu-id?
+     * Q2: Should we make MPIDR within host KVM user configurable?
+     *
+     *          +----+----+----+----+----+----+----+----+
+     * MPIDR    |||  Res  |   Aff2  |   Aff1  |  Aff0   |
+     *          +----+----+----+----+----+----+----+----+
+     *                     \         \         \   |    |
+     *                      \   8bit  \   8bit  \  |4bit|
+     *                       \<------->\<------->\ |<-->|
+     *                        \         \         \|    |
+     *          +----+----+----+----+----+----+----+----+
+     * VCPU-ID  |  Byte4  |  Byte2  |  Byte1  |  Byte0  |
+     *          +----+----+----+----+----+----+----+----+
+     */
+
+    return found_cpu;
+}
+
 static void virt_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
                                  Error **errp)
 {
@@ -2847,6 +2887,81 @@ static void virt_memory_plug(HotplugHandler *hotplug_dev,
                          dev, &error_abort);
 }
 
+static void virt_cpu_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+                              Error **errp)
+{
+    VirtMachineState *vms = VIRT_MACHINE(hotplug_dev);
+    MachineState *ms = MACHINE(hotplug_dev);
+    ARMCPU *cpu = ARM_CPU(dev);
+    CPUState *cs = CPU(dev);
+    CPUArchId *cpu_slot;
+    int32_t min_cpuid = 0;
+    int32_t max_cpuid;
+
+    /* sanity check the cpu */
+    if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) {
+        error_setg(errp, "Invalid CPU type, expected cpu type: '%s'",
+                   ms->cpu_type);
+        return;
+    }
+
+    if ((cpu->thread_id < 0) || (cpu->thread_id >= ms->smp.threads)) {
+        error_setg(errp, "Invalid thread-id %u specified, correct range 0:%u",
+                   cpu->thread_id, ms->smp.threads - 1);
+        return;
+    }
+
+    max_cpuid = ms->possible_cpus->len - 1;
+    if (!dev->hotplugged) {
+        min_cpuid = vms->acpi_dev ? ms->smp.cpus : 0;
+        max_cpuid = vms->acpi_dev ? max_cpuid : ms->smp.cpus - 1;
+    }
+
+    if ((cpu->core_id < min_cpuid) || (cpu->core_id > max_cpuid)) {
+        error_setg(errp, "Invalid core-id %d specified, correct range %d:%d",
+                   cpu->core_id, min_cpuid, max_cpuid);
+        return;
+    }
+
+    if ((cpu->cluster_id < 0) || (cpu->cluster_id >= ms->smp.clusters)) {
+        error_setg(errp, "Invalid cluster-id %u specified, correct range 0:%u",
+                   cpu->cluster_id, ms->smp.clusters - 1);
+        return;
+    }
+
+    if ((cpu->socket_id < 0) || (cpu->socket_id >= ms->smp.sockets)) {
+        error_setg(errp, "Invalid socket-id %u specified, correct range 0:%u",
+                   cpu->socket_id, ms->smp.sockets - 1);
+        return;
+    }
+
+    cs->cpu_index = virt_get_cpu_id_from_cpu_topo(ms, dev);
+
+    cpu_slot = virt_find_cpu_slot(ms, cs->cpu_index);
+    if (qemu_present_cpu(CPU(cpu_slot->cpu))) {
+        error_setg(errp, "cpu(id%d=%d:%d:%d:%d) with arch-id %" PRIu64 " exist",
+                   cs->cpu_index, cpu->socket_id, cpu->cluster_id, cpu->core_id,
+                   cpu->thread_id, cpu_slot->arch_id);
+        return;
+    }
+    virt_cpu_set_properties(OBJECT(cs), cpu_slot, errp);
+}
+
+static void virt_cpu_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+                          Error **errp)
+{
+    MachineState *ms = MACHINE(hotplug_dev);
+    CPUState *cs = CPU(dev);
+    CPUArchId *cpu_slot;
+
+    /* insert the cold/hot-plugged vcpu in the slot */
+    cpu_slot = virt_find_cpu_slot(ms, cs->cpu_index);
+    cpu_slot->cpu = OBJECT(dev);
+
+    cs->disabled = false;
+    return;
+}
+
 static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,
                                             DeviceState *dev, Error **errp)
 {
@@ -2888,6 +3003,8 @@ static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,
         object_property_set_str(OBJECT(dev), "reserved-regions[0]",
                                 resv_prop_str, errp);
         g_free(resv_prop_str);
+    } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+        virt_cpu_pre_plug(hotplug_dev, dev, errp);
     }
 }
 
@@ -2909,6 +3026,8 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
         virt_memory_plug(hotplug_dev, dev, errp);
     } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
         virtio_md_pci_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
+    } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+        virt_cpu_plug(hotplug_dev, dev, errp);
     }
 
     if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
@@ -2993,7 +3112,8 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
     if (device_is_dynamic_sysbus(mc, dev) ||
         object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
         object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI) ||
-        object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
+        object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) ||
+        object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
         return HOTPLUG_HANDLER(machine);
     }
     return NULL;
@@ -3070,6 +3190,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
 #endif
     mc->get_default_cpu_node_id = virt_get_default_cpu_node_id;
     mc->kvm_type = virt_kvm_type;
+    mc->has_hotpluggable_cpus = true;
     assert(!mc->get_hotplug_handler);
     mc->get_hotplug_handler = virt_machine_get_hotplug_handler;
     hc->pre_plug = virt_machine_device_pre_plug_cb;
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 1376350416..3a2e7e64ee 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2332,6 +2332,12 @@ static const struct TCGCPUOps arm_tcg_ops = {
 };
 #endif /* CONFIG_TCG */
 
+static int64_t arm_cpu_get_arch_id(CPUState *cs)
+{
+    ARMCPU *cpu = ARM_CPU(cs);
+    return cpu->mp_affinity;
+}
+
 static void arm_cpu_class_init(ObjectClass *oc, void *data)
 {
     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
@@ -2350,6 +2356,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
     cc->class_by_name = arm_cpu_class_by_name;
     cc->has_work = arm_cpu_has_work;
     cc->dump_state = arm_cpu_dump_state;
+    cc->get_arch_id = arm_cpu_get_arch_id;
     cc->set_pc = arm_cpu_set_pc;
     cc->get_pc = arm_cpu_get_pc;
     cc->gdb_read_register = arm_cpu_gdb_read_register;
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 96158093cc..a660e3f483 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -739,6 +739,17 @@ static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
     }
 }
 
+static void aarch64_cpu_initfn(Object *obj)
+{
+    CPUState *cs = CPU(obj);
+
+    /*
+     * we start every ARM64 vcpu as disabled possible vCPU. It needs to be
+     * enabled explicitly
+     */
+    cs->disabled = true;
+}
+
 static void aarch64_cpu_finalizefn(Object *obj)
 {
 }
@@ -751,7 +762,9 @@ static gchar *aarch64_gdb_arch_name(CPUState *cs)
 static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
 {
     CPUClass *cc = CPU_CLASS(oc);
+    DeviceClass *dc = DEVICE_CLASS(oc);
 
+    dc->user_creatable = true;
     cc->gdb_read_register = aarch64_cpu_gdb_read_register;
     cc->gdb_write_register = aarch64_cpu_gdb_write_register;
     cc->gdb_num_core_regs = 34;
@@ -800,6 +813,7 @@ static const TypeInfo aarch64_cpu_type_info = {
     .name = TYPE_AARCH64_CPU,
     .parent = TYPE_ARM_CPU,
     .instance_size = sizeof(ARMCPU),
+    .instance_init = aarch64_cpu_initfn,
     .instance_finalize = aarch64_cpu_finalizefn,
     .abstract = true,
     .class_size = sizeof(AArch64CPUClass),
-- 
2.34.1



  parent reply	other threads:[~2023-09-26 10:07 UTC|newest]

Thread overview: 146+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-26 10:03 [PATCH RFC V2 00/37] Support of Virtual CPU Hotplug for ARMv8 Arch Salil Mehta via
2023-09-26 10:04 ` [PATCH RFC V2 01/37] arm/virt, target/arm: Add new ARMCPU {socket, cluster, core, thread}-id property Salil Mehta via
2023-09-26 23:57   ` [PATCH RFC V2 01/37] arm/virt,target/arm: Add new ARMCPU {socket,cluster,core,thread}-id property Gavin Shan
2023-10-02  9:53     ` Salil Mehta via
2023-10-02  9:53       ` Salil Mehta
2023-10-03  5:05       ` Gavin Shan
2023-09-26 10:04 ` [PATCH RFC V2 02/37] cpus-common: Add common CPU utility for possible vCPUs Salil Mehta via
2023-09-27  3:54   ` Gavin Shan
2023-10-02 10:21     ` Salil Mehta via
2023-10-02 10:21       ` Salil Mehta
2023-10-03  5:34       ` Gavin Shan
2023-09-26 10:04 ` [PATCH RFC V2 03/37] hw/arm/virt: Move setting of common CPU properties in a function Salil Mehta via
2023-09-27  5:16   ` Gavin Shan
2023-10-02 10:24     ` Salil Mehta via
2023-10-02 10:24       ` Salil Mehta
2023-10-10  6:46   ` Shaoqin Huang
2023-10-10  9:47     ` Salil Mehta via
2023-10-10  9:47       ` Salil Mehta
2023-09-26 10:04 ` Salil Mehta via [this message]
2023-09-27  6:28   ` [PATCH RFC V2 04/37] arm/virt,target/arm: Machine init time change common to vCPU {cold|hot}-plug Gavin Shan
2023-10-02 16:12     ` Salil Mehta via
2023-10-02 16:12       ` Salil Mehta
2024-01-16 15:59       ` Jonathan Cameron via
2023-09-27  6:30   ` Gavin Shan
2023-10-02 10:27     ` Salil Mehta via
2023-10-02 10:27       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 05/37] accel/kvm: Extract common KVM vCPU {creation, parking} code Salil Mehta via
2023-09-27  6:51   ` [PATCH RFC V2 05/37] accel/kvm: Extract common KVM vCPU {creation,parking} code Gavin Shan
2023-10-02 16:20     ` Salil Mehta via
2023-10-02 16:20       ` Salil Mehta
2023-10-03  5:39       ` Gavin Shan
2023-09-26 10:04 ` [PATCH RFC V2 06/37] arm/virt, kvm: Pre-create disabled possible vCPUs @machine init Salil Mehta via
2023-09-27 10:04   ` [PATCH RFC V2 06/37] arm/virt,kvm: " Gavin Shan
2023-10-02 16:39     ` Salil Mehta via
2023-10-02 16:39       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 07/37] arm/virt, gicv3: Changes to pre-size GIC with possible vcpus " Salil Mehta via
2023-09-28  0:14   ` Gavin Shan
2023-10-16 16:15     ` Salil Mehta via
2023-10-16 16:15       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 08/37] arm/virt: Init PMU at host for all possible vcpus Salil Mehta via
2023-09-26 10:04 ` [PATCH RFC V2 09/37] hw/acpi: Move CPU ctrl-dev MMIO region len macro to common header file Salil Mehta via
2023-09-28  0:19   ` Gavin Shan
2023-10-16 16:20     ` Salil Mehta via
2023-10-16 16:20       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 10/37] arm/acpi: Enable ACPI support for vcpu hotplug Salil Mehta via
2023-09-28  0:25   ` Gavin Shan
2023-10-16 21:23     ` Salil Mehta via
2023-10-16 21:23       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 11/37] hw/acpi: Add ACPI CPU hotplug init stub Salil Mehta via
2023-09-28  0:28   ` Gavin Shan
2023-10-16 21:27     ` Salil Mehta via
2023-10-16 21:27       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 12/37] hw/acpi: Use qemu_present_cpu() API in ACPI CPU hotplug init Salil Mehta via
2023-09-28  0:40   ` Gavin Shan
2023-10-16 21:41     ` Salil Mehta via
2023-10-16 21:41       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 13/37] hw/acpi: Init GED framework with cpu hotplug events Salil Mehta via
2023-09-28  0:56   ` Gavin Shan
2023-10-16 21:44     ` Salil Mehta via
2023-10-16 21:44       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 14/37] arm/virt: Add cpu hotplug events to GED during creation Salil Mehta via
2023-09-28  1:03   ` Gavin Shan
2023-10-16 21:46     ` Salil Mehta via
2023-10-16 21:46       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 15/37] arm/virt: Create GED dev before *disabled* CPU Objs are destroyed Salil Mehta via
2023-09-28  1:08   ` Gavin Shan
2023-10-16 21:54     ` Salil Mehta via
2023-10-16 21:54       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 16/37] hw/acpi: Update CPUs AML with cpu-(ctrl)dev change Salil Mehta via
2023-09-28  1:26   ` Gavin Shan
2023-10-16 21:57     ` Salil Mehta via
2023-10-16 21:57       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 17/37] arm/virt/acpi: Build CPUs AML with CPU Hotplug support Salil Mehta via
2023-09-28  1:36   ` Gavin Shan
2023-10-16 22:05     ` Salil Mehta via
2023-10-16 22:05       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 18/37] arm/virt: Make ARM vCPU *present* status ACPI *persistent* Salil Mehta via
2023-09-28 23:18   ` Gavin Shan
2023-10-16 22:33     ` Salil Mehta via
2023-10-16 22:33       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 19/37] hw/acpi: ACPI/AML Changes to reflect the correct _STA.{PRES, ENA} Bits to Guest Salil Mehta via
2023-09-28 23:33   ` [PATCH RFC V2 19/37] hw/acpi: ACPI/AML Changes to reflect the correct _STA.{PRES,ENA} " Gavin Shan
2023-10-16 22:59     ` Salil Mehta via
2023-10-16 22:59       ` Salil Mehta
2024-01-17 21:46   ` [PATCH RFC V2 19/37] hw/acpi: ACPI/AML Changes to reflect the correct _STA.{PRES, ENA} " Jonathan Cameron via
2023-09-26 10:04 ` [PATCH RFC V2 20/37] hw/acpi: Update GED _EVT method AML with cpu scan Salil Mehta via
2023-09-28 23:35   ` Gavin Shan
2023-10-16 23:01     ` Salil Mehta via
2023-10-16 23:01       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 21/37] hw/arm: MADT Tbl change to size the guest with possible vCPUs Salil Mehta via
2023-09-28 23:43   ` Gavin Shan
2023-10-16 23:15     ` Salil Mehta via
2023-10-16 23:15       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 22/37] hw/acpi: Make _MAT method optional Salil Mehta via
2023-09-28 23:50   ` Gavin Shan
2023-10-16 23:17     ` Salil Mehta via
2023-10-16 23:17       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 23/37] arm/virt: Release objects for *disabled* possible vCPUs after init Salil Mehta via
2023-09-28 23:57   ` Gavin Shan
2023-10-16 23:28     ` Salil Mehta via
2023-10-16 23:28       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 24/37] hw/acpi: Update ACPI GED framework to support vCPU Hotplug Salil Mehta via
2023-09-26 11:02   ` Michael S. Tsirkin
2023-09-26 11:37     ` Salil Mehta via
2023-09-26 12:00       ` Michael S. Tsirkin
2023-09-26 12:27         ` Salil Mehta via
2023-09-26 13:02         ` lixianglai
2023-09-26 10:04 ` [PATCH RFC V2 25/37] arm/virt: Add/update basic hot-(un)plug framework Salil Mehta via
2023-09-29  0:20   ` Gavin Shan
2023-10-16 23:40     ` Salil Mehta via
2023-10-16 23:40       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 26/37] arm/virt: Changes to (un)wire GICC<->vCPU IRQs during hot-(un)plug Salil Mehta via
2023-09-26 10:04 ` [PATCH RFC V2 27/37] hw/arm, gicv3: Changes to update GIC with vCPU hot-plug notification Salil Mehta via
2023-09-26 10:04 ` [PATCH RFC V2 28/37] hw/intc/arm-gicv3*: Changes required to (re)init the vCPU register info Salil Mehta via
2023-09-26 10:04 ` [PATCH RFC V2 29/37] arm/virt: Update the guest(via GED) about CPU hot-(un)plug events Salil Mehta via
2023-09-29  0:30   ` Gavin Shan
2023-10-16 23:48     ` Salil Mehta via
2023-10-16 23:48       ` Salil Mehta
2023-09-26 10:04 ` [PATCH RFC V2 30/37] hw/arm: Changes required for reset and to support next boot Salil Mehta via
2023-09-26 10:04 ` [PATCH RFC V2 31/37] physmem, gdbstub: Common helping funcs/changes to *unrealize* vCPU Salil Mehta via
2023-10-03  6:33   ` [PATCH RFC V2 31/37] physmem,gdbstub: " Philippe Mathieu-Daudé
2023-10-03 10:22     ` Salil Mehta via
2023-10-03 10:22       ` Salil Mehta
2023-10-04  9:17       ` Salil Mehta via
2023-10-04  9:17         ` Salil Mehta
2023-09-26 10:36 ` [PATCH RFC V2 32/37] target/arm: Add support of *unrealize* ARMCPU during vCPU Hot-unplug Salil Mehta via
2023-09-26 10:36   ` [PATCH RFC V2 33/37] target/arm/kvm: Write CPU state back to KVM on reset Salil Mehta via
2023-09-26 10:36   ` [PATCH RFC V2 34/37] target/arm/kvm, tcg: Register/Handle SMCCC hypercall exits to VMM/Qemu Salil Mehta via
2023-09-29  4:15     ` [PATCH RFC V2 34/37] target/arm/kvm,tcg: " Gavin Shan
2023-10-17  0:03       ` Salil Mehta via
2023-10-17  0:03         ` Salil Mehta
2023-09-26 10:36   ` [PATCH RFC V2 35/37] hw/arm: Support hotplug capability check using _OSC method Salil Mehta via
2023-09-29  4:23     ` Gavin Shan
2023-10-17  0:13       ` Salil Mehta via
2023-10-17  0:13         ` Salil Mehta
2023-09-26 10:36   ` [PATCH RFC V2 36/37] tcg/mttcg: enable threads to unregister in tcg_ctxs[] Salil Mehta via
2023-09-26 10:36   ` [PATCH RFC V2 37/37] hw/arm/virt: Expose cold-booted CPUs as MADT GICC Enabled Salil Mehta via
2023-10-11 10:23 ` [PATCH RFC V2 00/37] Support of Virtual CPU Hotplug for ARMv8 Arch Vishnu Pajjuri
2023-10-11 10:32   ` Salil Mehta via
2023-10-11 10:32     ` Salil Mehta
2023-10-11 11:08     ` Vishnu Pajjuri
2023-10-11 20:15       ` Salil Mehta
2023-10-12 17:02 ` Miguel Luis
2023-10-12 17:54   ` Salil Mehta via
2023-10-12 17:54     ` Salil Mehta
2023-10-13 10:43     ` Miguel Luis

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230926100436.28284-5-salil.mehta@huawei.com \
    --to=qemu-devel@nongnu.org \
    --cc=alex.bennee@linaro.org \
    --cc=andrew.jones@linux.dev \
    --cc=ardb@kernel.org \
    --cc=borntraeger@linux.ibm.com \
    --cc=darren@os.amperecomputing.com \
    --cc=david@redhat.com \
    --cc=eric.auger@redhat.com \
    --cc=gshan@redhat.com \
    --cc=ilkka@os.amperecomputing.com \
    --cc=imammedo@redhat.com \
    --cc=jean-philippe@linaro.org \
    --cc=jiakernel2@gmail.com \
    --cc=jonathan.cameron@huawei.com \
    --cc=karl.heubaum@oracle.com \
    --cc=linux@armlinux.org.uk \
    --cc=lixianglai@loongson.cn \
    --cc=lpieralisi@kernel.org \
    --cc=maobibo@loongson.cn \
    --cc=maz@kernel.org \
    --cc=miguel.luis@oracle.com \
    --cc=mst@redhat.com \
    --cc=oliver.upton@linux.dev \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=philmd@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=rafael@kernel.org \
    --cc=richard.henderson@linaro.org \
    --cc=salil.mehta@huawei.com \
    --cc=salil.mehta@opnsrc.net \
    --cc=vishnu@os.amperecomputing.com \
    --cc=wangxiongfeng2@huawei.com \
    --cc=wangyanan55@huawei.com \
    --cc=will@kernel.org \
    --cc=zhukeqian1@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).