From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:58325) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YEDVW-000224-76 for qemu-devel@nongnu.org; Thu, 22 Jan 2015 03:53:00 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1YEDVP-0005WT-5P for qemu-devel@nongnu.org; Thu, 22 Jan 2015 03:52:54 -0500 Received: from mail.ispras.ru ([83.149.199.45]:36379) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YEDVO-0005W3-P4 for qemu-devel@nongnu.org; Thu, 22 Jan 2015 03:52:47 -0500 From: Pavel Dovgalyuk Date: Thu, 22 Jan 2015 11:52:49 +0300 Message-ID: <20150122085249.5276.26806.stgit@PASHA-ISP.def.inno> In-Reply-To: <20150122085127.5276.53895.stgit@PASHA-ISP.def.inno> References: <20150122085127.5276.53895.stgit@PASHA-ISP.def.inno> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit Subject: [Qemu-devel] [RFC PATCH v8 14/21] replay: checkpoints List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: peter.maydell@linaro.org, peter.crosthwaite@xilinx.com, alex.bennee@linaro.org, mark.burton@greensocs.com, real@ispras.ru, batuzovk@ispras.ru, maria.klimushenkova@ispras.ru, pavel.dovgaluk@ispras.ru, pbonzini@redhat.com, afaerber@suse.de, fred.konrad@greensocs.com This patch introduces checkpoints that synchronize cpu thread and iothread. When checkpoint is met in the code all asynchronous events from the queue are executed. Signed-off-by: Pavel Dovgalyuk --- block.c | 11 ++++++++++ cpus.c | 7 ++++++- include/qemu/timer.h | 6 ++++-- main-loop.c | 5 +++++ qemu-timer.c | 49 ++++++++++++++++++++++++++++++++++++++-------- replay/replay-internal.h | 5 ++++- replay/replay.c | 36 ++++++++++++++++++++++++++++++++++ replay/replay.h | 21 ++++++++++++++++++++ stubs/replay.c | 11 ++++++++++ vl.c | 4 +++- 10 files changed, 142 insertions(+), 13 deletions(-) diff --git a/block.c b/block.c index cbe4a32..a4f45c3 100644 --- a/block.c +++ b/block.c @@ -1994,6 +1994,11 @@ void bdrv_drain_all(void) BlockDriverState *bs; while (busy) { + if (!replay_checkpoint(CHECKPOINT_BDRV_DRAIN)) { + /* Do not wait anymore, we stopped at some place in + the middle of execution during replay */ + return; + } busy = false; QTAILQ_FOREACH(bs, &bdrv_states, device_list) { @@ -2004,6 +2009,12 @@ void bdrv_drain_all(void) aio_context_release(aio_context); } } + if (replay_mode == REPLAY_MODE_PLAY) { + /* Skip checkpoints from the log */ + while (replay_checkpoint(CHECKPOINT_BDRV_DRAIN)) { + /* Nothing */ + } + } } /* make a BlockDriverState anonymous by removing from bdrv_state and diff --git a/cpus.c b/cpus.c index 01d89aa..9c32491 100644 --- a/cpus.c +++ b/cpus.c @@ -388,7 +388,7 @@ void qtest_clock_warp(int64_t dest) timers_state.qemu_icount_bias += warp; seqlock_write_unlock(&timers_state.vm_clock_seqlock); - qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); + qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL, false); clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); } qemu_clock_notify(QEMU_CLOCK_VIRTUAL); @@ -408,6 +408,11 @@ void qemu_clock_warp(QEMUClockType type) return; } + /* warp clock deterministically in record/replay mode */ + if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP)) { + return; + } + /* * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now. * This ensures that the deadline for the timer is computed correctly below. diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 0c2472c..26927b0 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -240,13 +240,14 @@ void qemu_clock_unregister_reset_notifier(QEMUClockType type, /** * qemu_clock_run_timers: * @type: clock on which to operate + * @run_all: true, when called from qemu_clock_run_all_timers * * Run all the timers associated with the default timer list * of a clock. * * Returns: true if any timer ran. */ -bool qemu_clock_run_timers(QEMUClockType type); +bool qemu_clock_run_timers(QEMUClockType type, bool run_all); /** * qemu_clock_run_all_timers: @@ -337,12 +338,13 @@ QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list); /** * timerlist_run_timers: * @timer_list: the timer list to use + * @run_all: true, when called from qemu_clock_run_all_timers * * Call all expired timers associated with the timer list. * * Returns: true if any timer expired */ -bool timerlist_run_timers(QEMUTimerList *timer_list); +bool timerlist_run_timers(QEMUTimerList *timer_list, bool run_all); /** * timerlist_notify: diff --git a/main-loop.c b/main-loop.c index 981bcb5..d6e93c3 100644 --- a/main-loop.c +++ b/main-loop.c @@ -497,6 +497,11 @@ int main_loop_wait(int nonblocking) slirp_pollfds_poll(gpollfds, (ret < 0)); #endif + /* CPU thread can infinitely wait for event after + missing the warp */ + if (replay_mode == REPLAY_MODE_PLAY) { + qemu_clock_warp(QEMU_CLOCK_VIRTUAL); + } qemu_clock_run_all_timers(); return ret; diff --git a/qemu-timer.c b/qemu-timer.c index bc981a2..b6eb7b3 100644 --- a/qemu-timer.c +++ b/qemu-timer.c @@ -465,7 +465,7 @@ bool timer_expired(QEMUTimer *timer_head, int64_t current_time) return timer_expired_ns(timer_head, current_time * timer_head->scale); } -bool timerlist_run_timers(QEMUTimerList *timer_list) +bool timerlist_run_timers(QEMUTimerList *timer_list, bool run_all) { QEMUTimer *ts; int64_t current_time; @@ -473,6 +473,32 @@ bool timerlist_run_timers(QEMUTimerList *timer_list) QEMUTimerCB *cb; void *opaque; + switch (timer_list->clock->type) { + case QEMU_CLOCK_REALTIME: + break; + default: + case QEMU_CLOCK_VIRTUAL: + if ((replay_mode != REPLAY_MODE_NONE && !runstate_is_running()) + || !replay_checkpoint(run_all ? CHECKPOINT_CLOCK_VIRTUAL_ALL + : CHECKPOINT_CLOCK_VIRTUAL)) { + return false; + } + break; + case QEMU_CLOCK_HOST: + if ((replay_mode != REPLAY_MODE_NONE && !runstate_is_running()) + || !replay_checkpoint(run_all ? CHECKPOINT_CLOCK_HOST_ALL + : CHECKPOINT_CLOCK_HOST)) { + return false; + } + case QEMU_CLOCK_VIRTUAL_RT: + if ((replay_mode != REPLAY_MODE_NONE && !runstate_is_running()) + || !replay_checkpoint(run_all ? CHECKPOINT_CLOCK_VIRTUAL_RT_ALL + : CHECKPOINT_CLOCK_VIRTUAL_RT)) { + return false; + } + break; + } + qemu_event_reset(&timer_list->timers_done_ev); if (!timer_list->clock->enabled) { goto out; @@ -505,9 +531,9 @@ out: return progress; } -bool qemu_clock_run_timers(QEMUClockType type) +bool qemu_clock_run_timers(QEMUClockType type, bool run_all) { - return timerlist_run_timers(main_loop_tlg.tl[type]); + return timerlist_run_timers(main_loop_tlg.tl[type], run_all); } void timerlistgroup_init(QEMUTimerListGroup *tlg, @@ -532,7 +558,7 @@ bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg) QEMUClockType type; bool progress = false; for (type = 0; type < QEMU_CLOCK_MAX; type++) { - progress |= timerlist_run_timers(tlg->tl[type]); + progress |= timerlist_run_timers(tlg->tl[type], false); } return progress; } @@ -541,11 +567,18 @@ int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg) { int64_t deadline = -1; QEMUClockType type; + bool play = replay_mode == REPLAY_MODE_PLAY; for (type = 0; type < QEMU_CLOCK_MAX; type++) { if (qemu_clock_use_for_deadline(tlg->tl[type]->clock->type)) { - deadline = qemu_soonest_timeout(deadline, - timerlist_deadline_ns( - tlg->tl[type])); + if (!play || tlg->tl[type]->clock->type == QEMU_CLOCK_REALTIME) { + deadline = qemu_soonest_timeout(deadline, + timerlist_deadline_ns( + tlg->tl[type])); + } else { + /* Read clock from the replay file and + do not calculate the deadline, based on virtual clock. */ + qemu_clock_get_ns(tlg->tl[type]->clock->type); + } } } return deadline; @@ -615,7 +648,7 @@ bool qemu_clock_run_all_timers(void) QEMUClockType type; for (type = 0; type < QEMU_CLOCK_MAX; type++) { - progress |= qemu_clock_run_timers(type); + progress |= qemu_clock_run_timers(type, true); } return progress; diff --git a/replay/replay-internal.h b/replay/replay-internal.h index 68b2d45..c0a5800 100755 --- a/replay/replay-internal.h +++ b/replay/replay-internal.h @@ -31,7 +31,10 @@ enum ReplayEvents { EVENT_SHUTDOWN, /* for clock read/writes */ /* some of grteater codes are reserved for clocks */ - EVENT_CLOCK + EVENT_CLOCK, + /* for checkpoint event */ + /* some of grteater codes are reserved for checkpoints */ + EVENT_CHECKPOINT = EVENT_CLOCK + REPLAY_CLOCK_COUNT }; /* Asynchronous events IDs */ diff --git a/replay/replay.c b/replay/replay.c index cfa69fa..7c4a801 100755 --- a/replay/replay.c +++ b/replay/replay.c @@ -163,3 +163,39 @@ void replay_shutdown_request(void) replay_put_event(EVENT_SHUTDOWN); } } + +bool replay_checkpoint(ReplayCheckpoint checkpoint) +{ + bool res = false; + replay_save_instructions(); + + if (replay_file) { + if (replay_mode == REPLAY_MODE_PLAY) { + replay_mutex_lock(); + if (!skip_async_events(EVENT_CHECKPOINT + checkpoint)) { + if (replay_data_kind == EVENT_ASYNC) { + replay_read_events(checkpoint); + replay_fetch_data_kind(); + res = replay_data_kind != EVENT_ASYNC; + replay_mutex_unlock(); + return res; + } + replay_mutex_unlock(); + return res; + } + replay_has_unread_data = 0; + replay_read_events(checkpoint); + replay_fetch_data_kind(); + res = replay_data_kind != EVENT_ASYNC; + replay_mutex_unlock(); + return res; + } else if (replay_mode == REPLAY_MODE_RECORD) { + replay_mutex_lock(); + replay_put_event(EVENT_CHECKPOINT + checkpoint); + replay_save_events(checkpoint); + replay_mutex_unlock(); + } + } + + return true; +} diff --git a/replay/replay.h b/replay/replay.h index e1c5fcf..39822b4 100755 --- a/replay/replay.h +++ b/replay/replay.h @@ -29,6 +29,21 @@ enum ReplayClockKind { }; typedef enum ReplayClockKind ReplayClockKind; +/* IDs of the checkpoints */ +enum ReplayCheckpoint { + CHECKPOINT_BDRV_DRAIN, + CHECKPOINT_CLOCK_WARP, + CHECKPOINT_RESET_REQUESTED, + CHECKPOINT_CLOCK_VIRTUAL, + CHECKPOINT_CLOCK_VIRTUAL_ALL, + CHECKPOINT_CLOCK_HOST, + CHECKPOINT_CLOCK_HOST_ALL, + CHECKPOINT_CLOCK_VIRTUAL_RT, + CHECKPOINT_CLOCK_VIRTUAL_RT_ALL, + CHECKPOINT_COUNT +}; +typedef enum ReplayCheckpoint ReplayCheckpoint; + extern ReplayMode replay_mode; /* Processing the instructions */ @@ -80,6 +95,12 @@ void replay_get_timedate(struct tm *tm); /*! Called when qemu shutdown is requested. */ void replay_shutdown_request(void); +/*! Should be called at check points in the execution. + These check points are skipped, if they were not met. + Saves checkpoint in the SAVE mode and validates in the PLAY mode. + Returns 0 in PLAY mode if checkpoint was not found. + Returns 1 in all other cases. */ +bool replay_checkpoint(ReplayCheckpoint checkpoint); /* Asynchronous events queue */ diff --git a/stubs/replay.c b/stubs/replay.c index 121bca6..1be3575 100755 --- a/stubs/replay.c +++ b/stubs/replay.c @@ -1,4 +1,5 @@ #include "replay/replay.h" +#include "sysemu/sysemu.h" ReplayMode replay_mode; @@ -10,3 +11,13 @@ int64_t replay_read_clock(unsigned int kind) { return 0; } + +bool replay_checkpoint(ReplayCheckpoint checkpoint) +{ + return 0; +} + +int runstate_is_running(void) +{ + return 0; +} diff --git a/vl.c b/vl.c index 905ea8a..86ba385 100644 --- a/vl.c +++ b/vl.c @@ -1767,7 +1767,9 @@ static bool main_loop_should_exit(void) return true; } } - if (qemu_reset_requested()) { + if (qemu_reset_requested_get() + && replay_checkpoint(CHECKPOINT_RESET_REQUESTED)) { + qemu_reset_requested(); pause_all_vcpus(); cpu_synchronize_all_states(); qemu_system_reset(VMRESET_REPORT);