From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1LwiCz-0005Go-BT for qemu-devel@nongnu.org; Wed, 22 Apr 2009 15:34:13 -0400 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1LwiCr-0005E1-Dw for qemu-devel@nongnu.org; Wed, 22 Apr 2009 15:34:10 -0400 Received: from [199.232.76.173] (port=44675 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1LwiCq-0005DD-Hk for qemu-devel@nongnu.org; Wed, 22 Apr 2009 15:34:04 -0400 Received: from mx2.redhat.com ([66.187.237.31]:37805) by monty-python.gnu.org with esmtp (Exim 4.60) (envelope-from ) id 1LwiCp-0000ir-H3 for qemu-devel@nongnu.org; Wed, 22 Apr 2009 15:34:04 -0400 Message-Id: <20090422192119.991668223@localhost.localdomain> References: <20090422191504.975476933@localhost.localdomain> Date: Wed, 22 Apr 2009 16:15:10 -0300 From: mtosatti@redhat.com Content-Disposition: inline; filename=refactor-main-loop Subject: [Qemu-devel] [patch 06/14] qemu: refactor main_loop List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org, aliguori@us.ibm.com Cc: Marcelo Tosatti Break main loop into 3 main functions. Signed-off-by: Marcelo Tosatti Index: qemu-iothread-4/vl.c =================================================================== --- qemu-iothread-4.orig/vl.c +++ qemu-iothread-4/vl.c @@ -266,7 +266,7 @@ struct drive_opt drives_opt[MAX_DRIVES]; static CPUState *cur_cpu; static CPUState *next_cpu; -static int event_pending = 1; +static int timer_alarm_pending = 1; /* Conversion factor from emulated instructions to virtual clock ticks. */ static int icount_time_shift; /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ @@ -1350,7 +1350,7 @@ static void host_alarm_handler(int host_ } #endif } - event_pending = 1; + timer_alarm_pending = 1; qemu_notify_event(); } } @@ -3811,153 +3811,175 @@ void main_loop_wait(int timeout) } -static int main_loop(void) +static int qemu_cpu_exec(CPUState *env) { - int ret, timeout; + int ret; #ifdef CONFIG_PROFILER int64_t ti; #endif - CPUState *env; - cur_cpu = first_cpu; - next_cpu = cur_cpu->next_cpu ?: first_cpu; - for(;;) { - if (vm_running) { - - for(;;) { - /* get next cpu */ - env = next_cpu; #ifdef CONFIG_PROFILER - ti = profile_getclock(); + ti = profile_getclock(); #endif - if (use_icount) { - int64_t count; - int decr; - qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); - env->icount_decr.u16.low = 0; - env->icount_extra = 0; - count = qemu_next_deadline(); - count = (count + (1 << icount_time_shift) - 1) - >> icount_time_shift; - qemu_icount += count; - decr = (count > 0xffff) ? 0xffff : count; - count -= decr; - env->icount_decr.u16.low = decr; - env->icount_extra = count; - } - ret = cpu_exec(env); + if (use_icount) { + int64_t count; + int decr; + qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); + env->icount_decr.u16.low = 0; + env->icount_extra = 0; + count = qemu_next_deadline(); + count = (count + (1 << icount_time_shift) - 1) + >> icount_time_shift; + qemu_icount += count; + decr = (count > 0xffff) ? 0xffff : count; + count -= decr; + env->icount_decr.u16.low = decr; + env->icount_extra = count; + } + ret = cpu_exec(env); #ifdef CONFIG_PROFILER - qemu_time += profile_getclock() - ti; + qemu_time += profile_getclock() - ti; #endif - if (use_icount) { - /* Fold pending instructions back into the - instruction counter, and clear the interrupt flag. */ - qemu_icount -= (env->icount_decr.u16.low - + env->icount_extra); - env->icount_decr.u32 = 0; - env->icount_extra = 0; - } - next_cpu = env->next_cpu ?: first_cpu; - if (event_pending && likely(ret != EXCP_DEBUG)) { - ret = EXCP_INTERRUPT; - event_pending = 0; - break; - } - if (ret == EXCP_HLT) { - /* Give the next CPU a chance to run. */ - cur_cpu = env; - continue; - } - if (ret != EXCP_HALTED) + if (use_icount) { + /* Fold pending instructions back into the + instruction counter, and clear the interrupt flag. */ + qemu_icount -= (env->icount_decr.u16.low + + env->icount_extra); + env->icount_decr.u32 = 0; + env->icount_extra = 0; + } + return ret; +} + +static int cpu_has_work(CPUState *env) +{ + if (!env->halted) + return 1; + if (qemu_cpu_has_work(env)) + return 1; + return 0; +} + +static int tcg_has_work(void) +{ + CPUState *env; + + for (env = first_cpu; env != NULL; env = env->next_cpu) + if (cpu_has_work(env)) + return 1; + return 0; +} + +static int qemu_calculate_timeout(void) +{ + int timeout; + + if (!vm_running) + timeout = 5000; + else if (tcg_has_work()) + timeout = 0; + else if (!use_icount) + timeout = 5000; + else { + /* XXX: use timeout computed from timers */ + int64_t add; + int64_t delta; + /* Advance virtual time to the next event. */ + if (use_icount == 1) { + /* When not using an adaptive execution frequency + we tend to get badly out of sync with real time, + so just delay for a reasonable amount of time. */ + delta = 0; + } else { + delta = cpu_get_icount() - cpu_get_clock(); + } + if (delta > 0) { + /* If virtual time is ahead of real time then just + wait for IO. */ + timeout = (delta / 1000000) + 1; + } else { + /* Wait for either IO to occur or the next + timer event. */ + add = qemu_next_deadline(); + /* We advance the timer before checking for IO. + Limit the amount we advance so that early IO + activity won't get the guest too far ahead. */ + if (add > 10000000) + add = 10000000; + delta += add; + add = (add + (1 << icount_time_shift) - 1) + >> icount_time_shift; + qemu_icount += add; + timeout = delta / 1000000; + if (timeout < 0) + timeout = 0; + } + } + + return timeout; +} + +static int vm_can_run(void) +{ + if (powerdown_requested) + return 0; + if (reset_requested) + return 0; + if (shutdown_requested) + return 0; + return 1; +} + +static void main_loop(void) +{ + int ret = 0; +#ifdef CONFIG_PROFILER + int64_t ti; +#endif + + for (;;) { + do { + if (next_cpu == NULL) + next_cpu = first_cpu; + for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) { + CPUState *env = cur_cpu = next_cpu; + + if (!vm_running) break; - /* all CPUs are halted ? */ - if (env == cur_cpu) + if (timer_alarm_pending) { + timer_alarm_pending = 0; break; - } - cur_cpu = env; - - if (shutdown_requested) { - ret = EXCP_INTERRUPT; - if (no_shutdown) { - vm_stop(0); - no_shutdown = 0; } - else + ret = qemu_cpu_exec(env); + if (ret == EXCP_DEBUG) { + gdb_set_stop_cpu(env); break; - } - if (reset_requested) { - reset_requested = 0; - qemu_system_reset(); - ret = EXCP_INTERRUPT; - } - if (powerdown_requested) { - powerdown_requested = 0; - qemu_system_powerdown(); - ret = EXCP_INTERRUPT; - } - if (unlikely(ret == EXCP_DEBUG)) { - gdb_set_stop_cpu(cur_cpu); - vm_stop(EXCP_DEBUG); - } - /* If all cpus are halted then wait until the next IRQ */ - /* XXX: use timeout computed from timers */ - if (ret == EXCP_HALTED) { - if (use_icount) { - int64_t add; - int64_t delta; - /* Advance virtual time to the next event. */ - if (use_icount == 1) { - /* When not using an adaptive execution frequency - we tend to get badly out of sync with real time, - so just delay for a reasonable amount of time. */ - delta = 0; - } else { - delta = cpu_get_icount() - cpu_get_clock(); - } - if (delta > 0) { - /* If virtual time is ahead of real time then just - wait for IO. */ - timeout = (delta / 1000000) + 1; - } else { - /* Wait for either IO to occur or the next - timer event. */ - add = qemu_next_deadline(); - /* We advance the timer before checking for IO. - Limit the amount we advance so that early IO - activity won't get the guest too far ahead. */ - if (add > 10000000) - add = 10000000; - delta += add; - add = (add + (1 << icount_time_shift) - 1) - >> icount_time_shift; - qemu_icount += add; - timeout = delta / 1000000; - if (timeout < 0) - timeout = 0; - } - } else { - timeout = 5000; } - } else { - timeout = 0; } - } else { - if (shutdown_requested) { - ret = EXCP_INTERRUPT; - break; - } - timeout = 5000; - } #ifdef CONFIG_PROFILER - ti = profile_getclock(); + ti = profile_getclock(); #endif - main_loop_wait(timeout); + main_loop_wait(qemu_calculate_timeout()); #ifdef CONFIG_PROFILER - dev_time += profile_getclock() - ti; + dev_time += profile_getclock() - ti; #endif + } while (ret != EXCP_DEBUG && vm_can_run()); + + if (ret == EXCP_DEBUG) + vm_stop(EXCP_DEBUG); + + if (qemu_shutdown_requested()) { + if (no_shutdown) { + vm_stop(0); + no_shutdown = 0; + } else + break; + } + if (qemu_reset_requested()) + qemu_system_reset(); + if (qemu_powerdown_requested()) + qemu_system_powerdown(); } - cpu_disable_ticks(); - return ret; } static void version(void) --