All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: dgilbert@redhat.com, lvivier@redhat.com, peterx@redhat.com
Subject: [Qemu-devel] [PULL 29/41] migration: synchronize dirty bitmap for resume
Date: Wed,  9 May 2018 13:23:54 +0200	[thread overview]
Message-ID: <20180509112406.6183-30-quintela@redhat.com> (raw)
In-Reply-To: <20180509112406.6183-1-quintela@redhat.com>

From: Peter Xu <peterx@redhat.com>

This patch implements the first part of core RAM resume logic for
postcopy. ram_resume_prepare() is provided for the work.

When the migration is interrupted by network failure, the dirty bitmap
on the source side will be meaningless, because even the dirty bit is
cleared, it is still possible that the sent page was lost along the way
to destination. Here instead of continue the migration with the old
dirty bitmap on source, we ask the destination side to send back its
received bitmap, then invert it to be our initial dirty bitmap.

The source side send thread will issue the MIG_CMD_RECV_BITMAP requests,
once per ramblock, to ask for the received bitmap. On destination side,
MIG_RP_MSG_RECV_BITMAP will be issued, along with the requested bitmap.
Data will be received on the return-path thread of source, and the main
migration thread will be notified when all the ramblock bitmaps are
synchronized.

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20180502104740.12123-17-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/migration.c  |  2 ++
 migration/migration.h  |  1 +
 migration/ram.c        | 47 ++++++++++++++++++++++++++++++++++++++++++
 migration/trace-events |  4 ++++
 4 files changed, 54 insertions(+)

diff --git a/migration/migration.c b/migration/migration.c
index b0217c4823..19ef8b05b1 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -2967,6 +2967,7 @@ static void migration_instance_finalize(Object *obj)
     qemu_sem_destroy(&ms->pause_sem);
     qemu_sem_destroy(&ms->postcopy_pause_sem);
     qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
+    qemu_sem_destroy(&ms->rp_state.rp_sem);
     error_free(ms->error);
 }
 
@@ -2999,6 +3000,7 @@ static void migration_instance_init(Object *obj)
 
     qemu_sem_init(&ms->postcopy_pause_sem, 0);
     qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
+    qemu_sem_init(&ms->rp_state.rp_sem, 0);
 }
 
 /*
diff --git a/migration/migration.h b/migration/migration.h
index 556964d9d9..b4438ccb65 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -135,6 +135,7 @@ struct MigrationState
         QEMUFile     *from_dst_file;
         QemuThread    rp_thread;
         bool          error;
+        QemuSemaphore rp_sem;
     } rp_state;
 
     double mbps;
diff --git a/migration/ram.c b/migration/ram.c
index 5542843adc..b16eabcfb9 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -54,6 +54,7 @@
 #include "migration/block.h"
 #include "sysemu/sysemu.h"
 #include "qemu/uuid.h"
+#include "savevm.h"
 
 /***********************************************************/
 /* ram save/restore */
@@ -3364,6 +3365,38 @@ static bool ram_has_postcopy(void *opaque)
     return migrate_postcopy_ram();
 }
 
+/* Sync all the dirty bitmap with destination VM.  */
+static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
+{
+    RAMBlock *block;
+    QEMUFile *file = s->to_dst_file;
+    int ramblock_count = 0;
+
+    trace_ram_dirty_bitmap_sync_start();
+
+    RAMBLOCK_FOREACH(block) {
+        qemu_savevm_send_recv_bitmap(file, block->idstr);
+        trace_ram_dirty_bitmap_request(block->idstr);
+        ramblock_count++;
+    }
+
+    trace_ram_dirty_bitmap_sync_wait();
+
+    /* Wait until all the ramblocks' dirty bitmap synced */
+    while (ramblock_count--) {
+        qemu_sem_wait(&s->rp_state.rp_sem);
+    }
+
+    trace_ram_dirty_bitmap_sync_complete();
+
+    return 0;
+}
+
+static void ram_dirty_bitmap_reload_notify(MigrationState *s)
+{
+    qemu_sem_post(&s->rp_state.rp_sem);
+}
+
 /*
  * Read the received bitmap, revert it as the initial dirty bitmap.
  * This is only used when the postcopy migration is paused but wants
@@ -3438,12 +3471,25 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
 
     trace_ram_dirty_bitmap_reload_complete(block->idstr);
 
+    /*
+     * We succeeded to sync bitmap for current ramblock. If this is
+     * the last one to sync, we need to notify the main send thread.
+     */
+    ram_dirty_bitmap_reload_notify(s);
+
     ret = 0;
 out:
     free(le_bitmap);
     return ret;
 }
 
+static int ram_resume_prepare(MigrationState *s, void *opaque)
+{
+    RAMState *rs = *(RAMState **)opaque;
+
+    return ram_dirty_bitmap_sync_all(s, rs);
+}
+
 static SaveVMHandlers savevm_ram_handlers = {
     .save_setup = ram_save_setup,
     .save_live_iterate = ram_save_iterate,
@@ -3455,6 +3501,7 @@ static SaveVMHandlers savevm_ram_handlers = {
     .save_cleanup = ram_save_cleanup,
     .load_setup = ram_load_setup,
     .load_cleanup = ram_load_cleanup,
+    .resume_prepare = ram_resume_prepare,
 };
 
 void ram_mig_init(void)
diff --git a/migration/trace-events b/migration/trace-events
index be36fbccfe..53243e17ec 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -82,8 +82,12 @@ ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x"
 ram_postcopy_send_discard_bitmap(void) ""
 ram_save_page(const char *rbname, uint64_t offset, void *host) "%s: offset: 0x%" PRIx64 " host: %p"
 ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: 0x%zx len: 0x%zx"
+ram_dirty_bitmap_request(char *str) "%s"
 ram_dirty_bitmap_reload_begin(char *str) "%s"
 ram_dirty_bitmap_reload_complete(char *str) "%s"
+ram_dirty_bitmap_sync_start(void) ""
+ram_dirty_bitmap_sync_wait(void) ""
+ram_dirty_bitmap_sync_complete(void) ""
 
 # migration/migration.c
 await_return_path_close_on_source_close(void) ""
-- 
2.17.0

  parent reply	other threads:[~2018-05-09 11:25 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-09 11:23 [Qemu-devel] [PULL 00/41] Migration queue Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 01/41] migration: fix saving normal page even if it's been compressed Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 02/41] tests: Add migration precopy test Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 03/41] tests: Add migration xbzrle test Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 04/41] tests: Migration ppc now inlines its program Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 05/41] migration: Set error state in case of error Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 06/41] migration: Introduce multifd_recv_new_channel() Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 07/41] migration: terminate_* can be called for other threads Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 08/41] migration: Be sure all recv channels are created Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 09/41] migration: Export functions to create send channels Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 10/41] migration: Create multifd channels Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 11/41] migration: Delay start of migration main routines Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 12/41] migration: Transmit initial package through the multifd channels Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 13/41] migration: Define MultifdRecvParams sooner Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 14/41] migration: let incoming side use thread context Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 15/41] migration: new postcopy-pause state Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 16/41] migration: implement "postcopy-pause" src logic Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 17/41] migration: allow dst vm pause on postcopy Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 18/41] migration: allow src return path to pause Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 19/41] migration: allow fault thread " Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 20/41] qmp: hmp: add migrate "resume" option Juan Quintela
2018-05-09 12:57   ` Eric Blake
2018-05-09 11:23 ` [Qemu-devel] [PULL 21/41] migration: rebuild channel on source Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 22/41] migration: new state "postcopy-recover" Juan Quintela
2018-05-09 12:57   ` Eric Blake
2018-05-09 11:23 ` [Qemu-devel] [PULL 23/41] migration: wakeup dst ram-load-thread for recover Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 24/41] migration: new cmd MIG_CMD_RECV_BITMAP Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 25/41] migration: new message MIG_RP_MSG_RECV_BITMAP Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 26/41] migration: new cmd MIG_CMD_POSTCOPY_RESUME Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 27/41] migration: new message MIG_RP_MSG_RESUME_ACK Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 28/41] migration: introduce SaveVMHandlers.resume_prepare Juan Quintela
2018-05-09 11:23 ` Juan Quintela [this message]
2018-05-09 11:23 ` [Qemu-devel] [PULL 30/41] migration: setup ramstate for resume Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 31/41] migration: final handshake for the resume Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 32/41] migration: init dst in migration_object_init too Juan Quintela
2018-05-09 11:23 ` [Qemu-devel] [PULL 33/41] qmp/migration: new command migrate-recover Juan Quintela
2018-05-09 12:59   ` Eric Blake
2018-05-09 11:23 ` [Qemu-devel] [PULL 34/41] hmp/migration: add migrate_recover command Juan Quintela
2018-05-09 11:24 ` [Qemu-devel] [PULL 35/41] migration: introduce lock for to_dst_file Juan Quintela
2018-05-09 11:24 ` [Qemu-devel] [PULL 36/41] migration/qmp: add command migrate-pause Juan Quintela
2018-05-09 12:59   ` Eric Blake
2018-05-09 11:24 ` [Qemu-devel] [PULL 37/41] migration/hmp: add migrate_pause command Juan Quintela
2018-05-09 11:24 ` [Qemu-devel] [PULL 38/41] migration: update docs Juan Quintela
2018-05-09 11:24 ` [Qemu-devel] [PULL 39/41] migration: update index field when delete or qsort RDMALocalBlock Juan Quintela
2018-05-09 11:24 ` [Qemu-devel] [PULL 40/41] migration: Textual fixups for blocktime Juan Quintela
2018-05-09 11:24 ` [Qemu-devel] [PULL 41/41] Migration+TLS: Fix crash due to double cleanup Juan Quintela
2018-05-11 13:41 ` [Qemu-devel] [PULL 00/41] Migration queue Peter Maydell
2018-05-11 14:20   ` Dr. David Alan Gilbert
2018-05-11 14:22     ` Peter Maydell
2018-05-18 10:19   ` Peter Maydell
2018-05-18 10:22     ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180509112406.6183-30-quintela@redhat.com \
    --to=quintela@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.