All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zhang Chen <zhangckid@gmail.com>
To: qemu-devel@nongnu.org, Paolo Bonzini <pbonzini@redhat.com>,
	Juan Quintela <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	Jason Wang <jasowang@redhat.com>, Eric Blake <eblake@redhat.com>,
	Markus Armbruster <armbru@redhat.com>
Cc: Zhang Chen <zhangckid@gmail.com>,
	zhanghailiang <zhang.zhanghailiang@huawei.com>,
	Li Zhijian <lizhijian@cn.fujitsu.com>
Subject: [Qemu-devel] [PATCH V8 07/17] COLO: Load dirty pages into SVM's RAM cache firstly
Date: Sun,  3 Jun 2018 13:05:36 +0800	[thread overview]
Message-ID: <20180603050546.6827-8-zhangckid@gmail.com> (raw)
In-Reply-To: <20180603050546.6827-1-zhangckid@gmail.com>

We should not load PVM's state directly into SVM, because there maybe some
errors happen when SVM is receving data, which will break SVM.

We need to ensure receving all data before load the state into SVM. We use
an extra memory to cache these data (PVM's ram). The ram cache in secondary side
is initially the same as SVM/PVM's memory. And in the process of checkpoint,
we cache the dirty pages of PVM into this ram cache firstly, so this ram cache
always the same as PVM's memory at every checkpoint, then we flush this cached ram
to SVM after we receive all PVM's state.

Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Signed-off-by: Zhang Chen <zhangckid@gmail.com>
---
 include/exec/ram_addr.h |  1 +
 migration/migration.c   |  6 +++
 migration/ram.c         | 83 ++++++++++++++++++++++++++++++++++++++++-
 migration/ram.h         |  4 ++
 migration/savevm.c      |  2 +-
 5 files changed, 93 insertions(+), 3 deletions(-)

diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index cf2446a176..51ec153a57 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -27,6 +27,7 @@ struct RAMBlock {
     struct rcu_head rcu;
     struct MemoryRegion *mr;
     uint8_t *host;
+    uint8_t *colo_cache; /* For colo, VM's ram cache */
     ram_addr_t offset;
     ram_addr_t used_length;
     ram_addr_t max_length;
diff --git a/migration/migration.c b/migration/migration.c
index 48e183a54e..0d3e2e6d66 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -416,6 +416,10 @@ static void process_incoming_migration_co(void *opaque)
             error_report_err(local_err);
             exit(EXIT_FAILURE);
         }
+        if (colo_init_ram_cache() < 0) {
+            error_report("Init ram cache failed");
+            exit(EXIT_FAILURE);
+        }
         mis->migration_incoming_co = qemu_coroutine_self();
         qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
              colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
@@ -424,6 +428,8 @@ static void process_incoming_migration_co(void *opaque)
 
         /* Wait checkpoint incoming thread exit before free resource */
         qemu_thread_join(&mis->colo_incoming_thread);
+        /* We hold the global iothread lock, so it is safe here */
+        colo_release_ram_cache();
     }
 
     if (ret < 0) {
diff --git a/migration/ram.c b/migration/ram.c
index c53e8369a3..2bcd70659f 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2820,6 +2820,20 @@ static inline void *host_from_ram_block_offset(RAMBlock *block,
     return block->host + offset;
 }
 
+static inline void *colo_cache_from_block_offset(RAMBlock *block,
+                                                 ram_addr_t offset)
+{
+    if (!offset_in_ramblock(block, offset)) {
+        return NULL;
+    }
+    if (!block->colo_cache) {
+        error_report("%s: colo_cache is NULL in block :%s",
+                     __func__, block->idstr);
+        return NULL;
+    }
+    return block->colo_cache + offset;
+}
+
 /**
  * ram_handle_compressed: handle the zero page case
  *
@@ -3024,6 +3038,58 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
     qemu_mutex_unlock(&decomp_done_lock);
 }
 
+/*
+ * colo cache: this is for secondary VM, we cache the whole
+ * memory of the secondary VM, it is need to hold the global lock
+ * to call this helper.
+ */
+int colo_init_ram_cache(void)
+{
+    RAMBlock *block;
+
+    rcu_read_lock();
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        block->colo_cache = qemu_anon_ram_alloc(block->used_length,
+                                                NULL,
+                                                false);
+        if (!block->colo_cache) {
+            error_report("%s: Can't alloc memory for COLO cache of block %s,"
+                         "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
+                         block->used_length);
+            goto out_locked;
+        }
+        memcpy(block->colo_cache, block->host, block->used_length);
+    }
+    rcu_read_unlock();
+    return 0;
+
+out_locked:
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        if (block->colo_cache) {
+            qemu_anon_ram_free(block->colo_cache, block->used_length);
+            block->colo_cache = NULL;
+        }
+    }
+
+    rcu_read_unlock();
+    return -errno;
+}
+
+/* It is need to hold the global lock to call this helper */
+void colo_release_ram_cache(void)
+{
+    RAMBlock *block;
+
+    rcu_read_lock();
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        if (block->colo_cache) {
+            qemu_anon_ram_free(block->colo_cache, block->used_length);
+            block->colo_cache = NULL;
+        }
+    }
+    rcu_read_unlock();
+}
+
 /**
  * ram_load_setup: Setup RAM for migration incoming side
  *
@@ -3040,6 +3106,7 @@ static int ram_load_setup(QEMUFile *f, void *opaque)
 
     xbzrle_load_setup();
     ramblock_recv_map_init();
+
     return 0;
 }
 
@@ -3053,6 +3120,7 @@ static int ram_load_cleanup(void *opaque)
         g_free(rb->receivedmap);
         rb->receivedmap = NULL;
     }
+
     return 0;
 }
 
@@ -3286,13 +3354,24 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
             RAMBlock *block = ram_block_from_stream(f, flags);
 
-            host = host_from_ram_block_offset(block, addr);
+            /*
+             * After going into COLO, we should load the Page into colo_cache.
+             */
+            if (migration_incoming_in_colo_state()) {
+                host = colo_cache_from_block_offset(block, addr);
+            } else {
+                host = host_from_ram_block_offset(block, addr);
+            }
             if (!host) {
                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
                 ret = -EINVAL;
                 break;
             }
-            ramblock_recv_bitmap_set(block, host);
+
+            if (!migration_incoming_in_colo_state()) {
+                ramblock_recv_bitmap_set(block, host);
+            }
+
             trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
         }
 
diff --git a/migration/ram.h b/migration/ram.h
index d386f4d641..d5e81d4d48 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -70,4 +70,8 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file,
                                   const char *block_name);
 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb);
 
+/* ram cache */
+int colo_init_ram_cache(void);
+void colo_release_ram_cache(void);
+
 #endif
diff --git a/migration/savevm.c b/migration/savevm.c
index 308f753013..4a789eb4c9 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1929,7 +1929,7 @@ static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis,
 static int loadvm_process_enable_colo(MigrationIncomingState *mis)
 {
     migration_incoming_enable_colo();
-    return 0;
+    return colo_init_ram_cache();
 }
 
 /*
-- 
2.17.GIT

  parent reply	other threads:[~2018-06-03  5:06 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-03  5:05 [Qemu-devel] [PATCH V8 00/17] COLO: integrate colo frame with block replication and COLO proxy Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 01/17] filter-rewriter: fix memory leak for connection in connection_track_table Zhang Chen
2018-06-04  5:51   ` Jason Wang
2018-06-10 14:08     ` Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 02/17] colo-compare: implement the process of checkpoint Zhang Chen
2018-06-04  6:31   ` Jason Wang
2018-06-10 14:08     ` Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 03/17] colo-compare: use notifier to notify packets comparing result Zhang Chen
2018-06-04  6:36   ` Jason Wang
2018-06-10 14:09     ` Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 04/17] COLO: integrate colo compare with colo frame Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 05/17] COLO: Add block replication into colo process Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 06/17] COLO: Remove colo_state migration struct Zhang Chen
2018-06-03  5:05 ` Zhang Chen [this message]
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 08/17] ram/COLO: Record the dirty pages that SVM received Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 09/17] COLO: Flush memory data from ram cache Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 10/17] qmp event: Add COLO_EXIT event to notify users while exited COLO Zhang Chen
2018-06-04 22:23   ` Eric Blake
2018-06-07 12:54     ` Markus Armbruster
2018-06-10 17:24       ` Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 11/17] qapi: Add new command to query colo status Zhang Chen
2018-06-04 22:23   ` Eric Blake
2018-06-10 17:42     ` Zhang Chen
2018-06-10 17:53       ` Zhang Chen
2018-06-07 12:59   ` Markus Armbruster
2018-06-10 17:39     ` Zhang Chen
2018-06-11  6:48       ` Markus Armbruster
2018-06-11 15:34         ` Zhang Chen
2018-06-13 16:50           ` Dr. David Alan Gilbert
2018-06-14  8:42             ` Markus Armbruster
2018-06-14  9:25               ` Dr. David Alan Gilbert
2018-06-19  4:00                 ` Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 12/17] savevm: split the process of different stages for loadvm/savevm Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 13/17] COLO: flush host dirty ram from cache Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 14/17] filter: Add handle_event method for NetFilterClass Zhang Chen
2018-06-04  6:57   ` Jason Wang
2018-06-10 14:09     ` Zhang Chen
2018-06-11  1:56       ` Jason Wang
2018-06-11  6:46         ` Zhang Chen
2018-06-11  7:02           ` Jason Wang
2018-06-11 15:36             ` Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 15/17] filter-rewriter: handle checkpoint and failover event Zhang Chen
2018-06-04  7:42   ` Jason Wang
2018-06-10 17:20     ` Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 16/17] COLO: notify net filters about checkpoint/failover event Zhang Chen
2018-06-03  5:05 ` [Qemu-devel] [PATCH V8 17/17] COLO: quick failover process by kick COLO thread Zhang Chen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180603050546.6827-8-zhangckid@gmail.com \
    --to=zhangckid@gmail.com \
    --cc=armbru@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=eblake@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=lizhijian@cn.fujitsu.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=zhang.zhanghailiang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.