From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:55545) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1bxfiH-0005NA-87 for qemu-devel@nongnu.org; Fri, 21 Oct 2016 15:42:46 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1bxfiG-0007c3-4E for qemu-devel@nongnu.org; Fri, 21 Oct 2016 15:42:45 -0400 Received: from mx1.redhat.com ([209.132.183.28]:60654) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1bxfiF-0007bO-T2 for qemu-devel@nongnu.org; Fri, 21 Oct 2016 15:42:44 -0400 From: Juan Quintela Date: Fri, 21 Oct 2016 21:42:15 +0200 Message-Id: <1477078935-7182-14-git-send-email-quintela@redhat.com> In-Reply-To: <1477078935-7182-1-git-send-email-quintela@redhat.com> References: <1477078935-7182-1-git-send-email-quintela@redhat.com> Subject: [Qemu-devel] [PATCH 13/13] migration: flush receive queue List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: amit.shah@redhat.com, dgilbert@redhat.com, Juan Quintela From: Juan Quintela Each time that we sync the bitmap, it is a possiblity that we receive a page that is being processed by a different thread. We fix this problem just making sure that we wait for all receiving threads to finish its work before we procedeed with the next stage. I tried to make a migration command for it, but it don't work because we sync the bitmap sometimes when we have already sent the beggining of the section, so I just added a new page flag. Signed-off-by: Juan Quintela --- include/migration/migration.h | 1 + migration/ram.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/include/migration/migration.h b/include/migration/migration.h index afdc7ec..49e2ec6 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -251,6 +251,7 @@ void migrate_multifd_send_threads_create(void); void migrate_multifd_send_threads_join(void); void migrate_multifd_recv_threads_create(void); void migrate_multifd_recv_threads_join(void); +void qemu_savevm_send_multifd_flush(QEMUFile *f); void migrate_compress_threads_create(void); void migrate_compress_threads_join(void); diff --git a/migration/ram.c b/migration/ram.c index 9a20f63..bf2022e 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -69,6 +69,7 @@ static uint64_t bitmap_sync_count; /* 0x80 is reserved in migration.h start with 0x100 next */ #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 #define RAM_SAVE_FLAG_MULTIFD_PAGE 0x200 +#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x400 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE]; @@ -398,6 +399,11 @@ void migrate_compress_threads_create(void) /* Multiple fd's */ + +/* Indicates if we have synced the bitmap and we need to assure that + target has processeed all previous pages */ +bool multifd_needs_flush = false; + struct MultiFDSendParams { /* not changed */ QemuThread thread; @@ -713,6 +719,25 @@ static void multifd_recv_page(uint8_t *address, int fd_num) qemu_mutex_unlock(¶ms->mutex); } + +static int multifd_flush(void) +{ + int i, thread_count; + + if (!migrate_multifd()) { + return 0; + } + thread_count = migrate_multifd_threads(); + qemu_mutex_lock(&multifd_recv_mutex); + for (i = 0; i < thread_count; i++) { + while(!multifd_recv[i].done) { + qemu_cond_wait(&multifd_recv_cond, &multifd_recv_mutex); + } + } + qemu_mutex_unlock(&multifd_recv_mutex); + return 0; +} + /** * save_page_header: Write page header to wire * @@ -729,6 +754,11 @@ static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset) { size_t size, len; + if (multifd_needs_flush) { + offset |= RAM_SAVE_FLAG_MULTIFD_FLUSH; + multifd_needs_flush = false; + } + qemu_put_be64(f, offset); size = 8; @@ -2399,6 +2429,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque) if (!migration_in_postcopy(migrate_get_current())) { migration_bitmap_sync(); + if (migrate_multifd()) { + multifd_needs_flush = true; + } } ram_control_before_iterate(f, RAM_CONTROL_FINISH); @@ -2440,6 +2473,9 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, qemu_mutex_lock_iothread(); rcu_read_lock(); migration_bitmap_sync(); + if (migrate_multifd()) { + multifd_needs_flush = true; + } rcu_read_unlock(); qemu_mutex_unlock_iothread(); remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; @@ -2851,6 +2887,10 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) flags = addr & ~TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK; + if (flags & RAM_SAVE_FLAG_MULTIFD_FLUSH) { + multifd_flush(); + flags = flags & (~RAM_SAVE_FLAG_MULTIFD_FLUSH); + } if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE | RAM_SAVE_FLAG_MULTIFD_PAGE)) { -- 2.7.4