From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:44967) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Ya2YX-0007Iu-F3 for qemu-devel@nongnu.org; Mon, 23 Mar 2015 09:38:18 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1Ya2YS-0002iV-Cq for qemu-devel@nongnu.org; Mon, 23 Mar 2015 09:38:13 -0400 Received: from e23smtp03.au.ibm.com ([202.81.31.145]:38596) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Ya2YR-0002hW-G3 for qemu-devel@nongnu.org; Mon, 23 Mar 2015 09:38:08 -0400 Received: from /spool/local by e23smtp03.au.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Mon, 23 Mar 2015 23:38:05 +1000 From: Bharata B Rao Date: Mon, 23 Mar 2015 19:05:57 +0530 Message-Id: <1427117764-23008-17-git-send-email-bharata@linux.vnet.ibm.com> In-Reply-To: <1427117764-23008-1-git-send-email-bharata@linux.vnet.ibm.com> References: <1427117764-23008-1-git-send-email-bharata@linux.vnet.ibm.com> Subject: [Qemu-devel] [RFC PATCH v2 16/23] cpus: Reclaim vCPU objects List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: Zhu Guihua , Bharata B Rao , mdroth@linux.vnet.ibm.com, agraf@suse.de, Chen Fan , qemu-ppc@nongnu.org, tyreld@linux.vnet.ibm.com, nfont@linux.vnet.ibm.com, Gu Zheng , imammedo@redhat.com, afaerber@suse.de, david@gibson.dropbear.id.au From: Gu Zheng In order to deal well with the kvm vcpus (which can not be removed without any protection), we do not close KVM vcpu fd, just record and mark it as stopped into a list, so that we can reuse it for the appending cpu hot-add request if possible. It is also the approach that kvm guys suggested: https://www.mail-archive.com/kvm@vger.kernel.org/msg102839.html Signed-off-by: Chen Fan Signed-off-by: Gu Zheng Signed-off-by: Zhu Guihua Signed-off-by: Bharata B Rao --- cpus.c | 44 ++++++++++++++++++++++++++++++++++++++++ include/qom/cpu.h | 11 ++++++++++ include/sysemu/kvm.h | 1 + kvm-all.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++- kvm-stub.c | 5 +++++ 5 files changed, 117 insertions(+), 1 deletion(-) diff --git a/cpus.c b/cpus.c index 0fac143..33906cd 100644 --- a/cpus.c +++ b/cpus.c @@ -858,6 +858,24 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) qemu_cpu_kick(cpu); } +static void qemu_kvm_destroy_vcpu(CPUState *cpu) +{ + CPU_REMOVE(cpu); + + if (kvm_destroy_vcpu(cpu) < 0) { + error_report("kvm_destroy_vcpu failed.\n"); + exit(EXIT_FAILURE); + } + + object_unparent(OBJECT(cpu)); +} + +static void qemu_tcg_destroy_vcpu(CPUState *cpu) +{ + CPU_REMOVE(cpu); + object_unparent(OBJECT(cpu)); +} + static void flush_queued_work(CPUState *cpu) { struct qemu_work_item *wi; @@ -950,6 +968,11 @@ static void *qemu_kvm_cpu_thread_fn(void *arg) } } qemu_kvm_wait_io_event(cpu); + if (cpu->exit && !cpu_can_run(cpu)) { + qemu_kvm_destroy_vcpu(cpu); + qemu_mutex_unlock(&qemu_global_mutex); + return NULL; + } } return NULL; @@ -1003,6 +1026,7 @@ static void tcg_exec_all(void); static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; + CPUState *remove_cpu = NULL; qemu_tcg_init_cpu_signals(); qemu_thread_get_self(cpu->thread); @@ -1039,6 +1063,16 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) } } qemu_tcg_wait_io_event(); + CPU_FOREACH(cpu) { + if (cpu->exit && !cpu_can_run(cpu)) { + remove_cpu = cpu; + break; + } + } + if (remove_cpu) { + qemu_tcg_destroy_vcpu(remove_cpu); + remove_cpu = NULL; + } } return NULL; @@ -1196,6 +1230,13 @@ void resume_all_vcpus(void) } } +void cpu_remove(CPUState *cpu) +{ + cpu->stop = true; + cpu->exit = true; + qemu_cpu_kick(cpu); +} + /* For temporary buffers for forming a name */ #define VCPU_THREAD_NAME_SIZE 16 @@ -1390,6 +1431,9 @@ static void tcg_exec_all(void) break; } } else if (cpu->stop || cpu->stopped) { + if (cpu->exit) { + next_cpu = CPU_NEXT(cpu); + } break; } } diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 5241cf4..1bfc3d4 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -206,6 +206,7 @@ struct kvm_run; * @halted: Nonzero if the CPU is in suspended state. * @stop: Indicates a pending stop request. * @stopped: Indicates the CPU has been artificially stopped. + * @exit: Indicates the CPU has exited due to an unplug operation. * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this * CPU and return to its top level loop. * @singlestep_enabled: Flags for single-stepping. @@ -249,6 +250,7 @@ struct CPUState { bool created; bool stop; bool stopped; + bool exit; volatile sig_atomic_t exit_request; uint32_t interrupt_request; int singlestep_enabled; @@ -306,6 +308,7 @@ struct CPUState { QTAILQ_HEAD(CPUTailQ, CPUState); extern struct CPUTailQ cpus; #define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node) +#define CPU_REMOVE(cpu) QTAILQ_REMOVE(&cpus, cpu, node) #define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node) #define CPU_FOREACH_SAFE(cpu, next_cpu) \ QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu) @@ -610,6 +613,14 @@ void cpu_exit(CPUState *cpu); */ void cpu_resume(CPUState *cpu); + /** + * cpu_remove: + * @cpu: The CPU to remove. + * + * Requests the CPU to be removed. + */ +void cpu_remove(CPUState *cpu); + /** * qemu_init_vcpu: * @cpu: The vCPU to initialize. diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 30cb84d..560caef 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -188,6 +188,7 @@ int kvm_has_intx_set_mask(void); int kvm_init_vcpu(CPUState *cpu); int kvm_cpu_exec(CPUState *cpu); +int kvm_destroy_vcpu(CPUState *cpu); #ifdef NEED_CPU_H diff --git a/kvm-all.c b/kvm-all.c index 05a79c2..46e7853 100644 --- a/kvm-all.c +++ b/kvm-all.c @@ -71,6 +71,12 @@ typedef struct KVMSlot typedef struct kvm_dirty_log KVMDirtyLog; +struct KVMParkedVcpu { + unsigned long vcpu_id; + int kvm_fd; + QLIST_ENTRY(KVMParkedVcpu) node; +}; + struct KVMState { AccelState parent_obj; @@ -107,6 +113,7 @@ struct KVMState QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; bool direct_msi; #endif + QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; }; #define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm") @@ -247,6 +254,53 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); } +int kvm_destroy_vcpu(CPUState *cpu) +{ + KVMState *s = kvm_state; + long mmap_size; + struct KVMParkedVcpu *vcpu = NULL; + int ret = 0; + + DPRINTF("kvm_destroy_vcpu\n"); + + mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); + if (mmap_size < 0) { + ret = mmap_size; + DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); + goto err; + } + + ret = munmap(cpu->kvm_run, mmap_size); + if (ret < 0) { + goto err; + } + + vcpu = g_malloc0(sizeof(*vcpu)); + vcpu->vcpu_id = kvm_arch_vcpu_id(cpu); + vcpu->kvm_fd = cpu->kvm_fd; + QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); +err: + return ret; +} + +static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id) +{ + struct KVMParkedVcpu *cpu; + + QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { + if (cpu->vcpu_id == vcpu_id) { + int kvm_fd; + + QLIST_REMOVE(cpu, node); + kvm_fd = cpu->kvm_fd; + g_free(cpu); + return kvm_fd; + } + } + + return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id); +} + int kvm_init_vcpu(CPUState *cpu) { KVMState *s = kvm_state; @@ -255,7 +309,7 @@ int kvm_init_vcpu(CPUState *cpu) DPRINTF("kvm_init_vcpu\n"); - ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu)); + ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu)); if (ret < 0) { DPRINTF("kvm_create_vcpu failed\n"); goto err; @@ -1448,6 +1502,7 @@ static int kvm_init(MachineState *ms) #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif + QLIST_INIT(&s->kvm_parked_vcpus); s->vmfd = -1; s->fd = qemu_open("/dev/kvm", O_RDWR); if (s->fd == -1) { diff --git a/kvm-stub.c b/kvm-stub.c index 7ba90c5..79ac626 100644 --- a/kvm-stub.c +++ b/kvm-stub.c @@ -30,6 +30,11 @@ bool kvm_gsi_direct_mapping; bool kvm_allowed; bool kvm_readonly_mem_allowed; +int kvm_destroy_vcpu(CPUState *cpu) +{ + return -ENOSYS; +} + int kvm_init_vcpu(CPUState *cpu) { return -ENOSYS; -- 2.1.0