From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:36482) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1a1AOO-0008Ve-Nt for qemu-devel@nongnu.org; Tue, 24 Nov 2015 05:00:16 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1a1AOA-0007ta-82 for qemu-devel@nongnu.org; Tue, 24 Nov 2015 05:00:08 -0500 Received: from szxga03-in.huawei.com ([119.145.14.66]:26115) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1a1AO9-0007l0-CI for qemu-devel@nongnu.org; Tue, 24 Nov 2015 04:59:54 -0500 From: zhanghailiang Date: Tue, 24 Nov 2015 17:25:28 +0800 Message-ID: <1448357149-17572-19-git-send-email-zhang.zhanghailiang@huawei.com> In-Reply-To: <1448357149-17572-1-git-send-email-zhang.zhanghailiang@huawei.com> References: <1448357149-17572-1-git-send-email-zhang.zhanghailiang@huawei.com> MIME-Version: 1.0 Content-Type: text/plain Subject: [Qemu-devel] [PATCH COLO-Frame v11 18/39] COLO: Flush PVM's cached RAM into SVM's memory List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: lizhijian@cn.fujitsu.com, quintela@redhat.com, yunhong.jiang@intel.com, eddie.dong@intel.com, peter.huangpeng@huawei.com, dgilbert@redhat.com, zhanghailiang , arei.gonglei@huawei.com, stefanha@redhat.com, amit.shah@redhat.com, hongyang.yang@easystack.cn During the time of VM's running, PVM may dirty some pages, we will transfer PVM's dirty pages to SVM and store them into SVM's RAM cache at next checkpoint time. So, the content of SVM's RAM cache will always be some with PVM's memory after checkpoint. Instead of flushing all content of PVM's RAM cache into SVM's MEMORY, we do this in a more efficient way: Only flush any page that dirtied by PVM since last checkpoint. In this way, we can ensure SVM's memory same with PVM's. Besides, we must ensure flush RAM cache before load device state. Signed-off-by: zhanghailiang Signed-off-by: Li Zhijian Signed-off-by: Gonglei --- v11: - Move the place of 'need_flush' (Dave's suggestion) - Remove unused 'DPRINTF("Flush ram_cache\n")' v10: - trace the number of dirty pages that be received. --- include/migration/migration.h | 1 + migration/colo.c | 2 -- migration/ram.c | 37 +++++++++++++++++++++++++++++++++++++ trace-events | 1 + 4 files changed, 39 insertions(+), 2 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index e41372d..221176b 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -336,4 +336,5 @@ PostcopyState postcopy_state_set(PostcopyState new_state); /* ram cache */ int colo_init_ram_cache(void); void colo_release_ram_cache(void); +void colo_flush_ram_cache(void); #endif diff --git a/migration/colo.c b/migration/colo.c index 5ac8ff2..4095d97 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -385,8 +385,6 @@ void *colo_process_incoming_thread(void *opaque) } qemu_mutex_unlock_iothread(); - /* TODO: flush vm state */ - ret = colo_ctl_put(mis->to_src_file, COLO_COMMAND_VMSTATE_LOADED, 0); if (ret < 0) { goto out; diff --git a/migration/ram.c b/migration/ram.c index da6bbd6..4f37144 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -2448,6 +2448,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) * be atomic */ bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING; + bool need_flush = false; seq_iter++; @@ -2482,6 +2483,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) /* After going into COLO, we should load the Page into colo_cache */ if (ram_cache_enable) { host = colo_cache_from_block_offset(block, addr); + need_flush = true; } else { host = host_from_ram_block_offset(block, addr); } @@ -2575,6 +2577,10 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) } rcu_read_unlock(); + + if (!ret && ram_cache_enable && need_flush) { + colo_flush_ram_cache(); + } DPRINTF("Completed load of VM with exit code %d seq iteration " "%" PRIu64 "\n", ret, seq_iter); return ret; @@ -2647,6 +2653,37 @@ void colo_release_ram_cache(void) rcu_read_unlock(); } +/* + * Flush content of RAM cache into SVM's memory. + * Only flush the pages that be dirtied by PVM or SVM or both. + */ +void colo_flush_ram_cache(void) +{ + RAMBlock *block = NULL; + void *dst_host; + void *src_host; + ram_addr_t offset = 0; + + trace_colo_flush_ram_cache(migration_dirty_pages); + rcu_read_lock(); + block = QLIST_FIRST_RCU(&ram_list.blocks); + while (block) { + ram_addr_t ram_addr_abs; + offset = migration_bitmap_find_dirty(block, offset, &ram_addr_abs); + migration_bitmap_clear_dirty(ram_addr_abs); + if (offset >= block->used_length) { + offset = 0; + block = QLIST_NEXT_RCU(block, next); + } else { + dst_host = block->host + offset; + src_host = block->colo_cache + offset; + memcpy(dst_host, src_host, TARGET_PAGE_SIZE); + } + } + rcu_read_unlock(); + assert(migration_dirty_pages == 0); +} + static SaveVMHandlers savevm_ram_handlers = { .save_live_setup = ram_save_setup, .save_live_iterate = ram_save_iterate, diff --git a/trace-events b/trace-events index f8a0959..f158d2a 100644 --- a/trace-events +++ b/trace-events @@ -1264,6 +1264,7 @@ migration_throttle(void) "" ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x" ram_postcopy_send_discard_bitmap(void) "" ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: %zx len: %zx" +colo_flush_ram_cache(uint64_t dirty_pages) "dirty_pages %" PRIu64"" # hw/display/qxl.c disable qxl_interface_set_mm_time(int qid, uint32_t mm_time) "%d %d" -- 1.8.3.1