From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:57444) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1bXrqZ-0003hc-B0 for qemu-devel@nongnu.org; Thu, 11 Aug 2016 11:24:40 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1bXrqX-0006cF-6u for qemu-devel@nongnu.org; Thu, 11 Aug 2016 11:24:38 -0400 Received: from mail-wm0-x22b.google.com ([2a00:1450:400c:c09::22b]:35319) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1bXrqW-0006c6-Rk for qemu-devel@nongnu.org; Thu, 11 Aug 2016 11:24:37 -0400 Received: by mail-wm0-x22b.google.com with SMTP id f65so6277460wmi.0 for ; Thu, 11 Aug 2016 08:24:36 -0700 (PDT) From: =?UTF-8?q?Alex=20Benn=C3=A9e?= Date: Thu, 11 Aug 2016 16:24:08 +0100 Message-Id: <1470929064-4092-13-git-send-email-alex.bennee@linaro.org> In-Reply-To: <1470929064-4092-1-git-send-email-alex.bennee@linaro.org> References: <1470929064-4092-1-git-send-email-alex.bennee@linaro.org> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Subject: [Qemu-devel] [RFC v4 12/28] tcg: cpus rm tcg_exec_all() List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: mttcg@listserver.greensocs.com, qemu-devel@nongnu.org, fred.konrad@greensocs.com, a.rigo@virtualopensystems.com, cota@braap.org, bobby.prani@gmail.com, nikunj@linux.vnet.ibm.com Cc: mark.burton@greensocs.com, pbonzini@redhat.com, jan.kiszka@siemens.com, serge.fdrv@gmail.com, rth@twiddle.net, peter.maydell@linaro.org, claudio.fontana@huawei.com, =?UTF-8?q?Alex=20Benn=C3=A9e?= , Peter Crosthwaite In preparation for multi-threaded TCG we remove tcg_exec_all and move all the CPU cycling into the main thread function. When MTTCG is enabled we shall use a separate thread function which only handles one vCPU. Signed-off-by: Alex Bennée Reviewed-by: Sergey Fedorov Reviewed-by: Richard Henderson --- v2 - update timer calls to new API on rebase v3 - move tcg_cpu_exec above thread function, drop static fwd declaration v4 - split mechanical moves into earlier patch - moved unplug logic info function, don't break smp boot --- cpus.c | 89 +++++++++++++++++++++++++++++++++--------------------------------- 1 file changed, 45 insertions(+), 44 deletions(-) diff --git a/cpus.c b/cpus.c index 0759a84..0ff51ea 100644 --- a/cpus.c +++ b/cpus.c @@ -69,7 +69,6 @@ #endif /* CONFIG_LINUX */ -static CPUState *next_cpu; int64_t max_delay; int64_t max_advance; @@ -1129,46 +1128,26 @@ static int tcg_cpu_exec(CPUState *cpu) return ret; } -static void tcg_exec_all(void) +/* Destroy any remaining vCPUs which have been unplugged and have + * finished running + */ +static void deal_with_unplugged_cpus(void) { - int r; - - /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ - qemu_account_warp_timer(); - - if (next_cpu == NULL) { - next_cpu = first_cpu; - } - for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) { - CPUState *cpu = next_cpu; - - qemu_clock_enable(QEMU_CLOCK_VIRTUAL, - (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); + CPUState *cpu; - if (cpu_can_run(cpu)) { - tcg_cpu_exec_start(cpu); - r = tcg_cpu_exec(cpu); - tcg_cpu_exec_end(cpu); - if (r == EXCP_DEBUG) { - cpu_handle_guest_debug(cpu); - break; - } - } else if (cpu->stop || cpu->stopped) { - if (cpu->unplug) { - next_cpu = CPU_NEXT(cpu); - } + CPU_FOREACH(cpu) { + if (cpu->unplug && !cpu_can_run(cpu)) { + qemu_tcg_destroy_vcpu(cpu); + cpu->created = false; + qemu_cond_signal(&qemu_cpu_cond); break; } } - - /* Pairs with smp_wmb in qemu_cpu_kick. */ - atomic_mb_set(&exit_request, 0); } static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; - CPUState *remove_cpu = NULL; rcu_register_thread(); @@ -1195,8 +1174,41 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) /* process any pending work */ atomic_mb_set(&exit_request, 1); + cpu = first_cpu; + while (1) { - tcg_exec_all(); + /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ + qemu_account_warp_timer(); + + if (!cpu) { + cpu = first_cpu; + } + + for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) { + + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, + (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); + + if (cpu_can_run(cpu)) { + int r; + tcg_cpu_exec_start(cpu); + r = tcg_cpu_exec(cpu); + tcg_cpu_exec_end(cpu); + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + break; + } + } else if (cpu->stop || cpu->stopped) { + if (cpu->unplug) { + cpu = CPU_NEXT(cpu); + } + break; + } + + } /* for cpu.. */ + + /* Pairs with smp_wmb in qemu_cpu_kick. */ + atomic_mb_set(&exit_request, 0); if (use_icount) { int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); @@ -1206,18 +1218,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) } } qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus)); - CPU_FOREACH(cpu) { - if (cpu->unplug && !cpu_can_run(cpu)) { - remove_cpu = cpu; - break; - } - } - if (remove_cpu) { - qemu_tcg_destroy_vcpu(remove_cpu); - cpu->created = false; - qemu_cond_signal(&qemu_cpu_cond); - remove_cpu = NULL; - } + deal_with_unplugged_cpus(); } return NULL; -- 2.7.4