All of lore.kernel.org
 help / color / mirror / Atom feed
From: guangrong.xiao@gmail.com
To: pbonzini@redhat.com, mst@redhat.com, mtosatti@redhat.com
Cc: Xiao Guangrong <xiaoguangrong@tencent.com>,
	qemu-devel@nongnu.org, kvm@vger.kernel.org
Subject: [PATCH 4/8] migration: introduce control_save_page()
Date: Tue, 13 Mar 2018 15:57:35 +0800	[thread overview]
Message-ID: <20180313075739.11194-5-xiaoguangrong@tencent.com> (raw)
In-Reply-To: <20180313075739.11194-1-xiaoguangrong@tencent.com>

From: Xiao Guangrong <xiaoguangrong@tencent.com>

Abstract the common function control_save_page() to cleanup the code,
no logic is changed

Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
---
 migration/ram.c | 174 +++++++++++++++++++++++++++++---------------------------
 1 file changed, 89 insertions(+), 85 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index c47185d38c..e7b8b14c3c 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -957,6 +957,44 @@ static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
     ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
 }
 
+/*
+ * @pages: the number of pages written by the control path,
+ *        < 0 - error
+ *        > 0 - number of pages written
+ *
+ * Return true if the pages has been saved, otherwise false is returned.
+ */
+static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
+                              int *pages)
+{
+    uint64_t bytes_xmit = 0;
+    int ret;
+
+    *pages = -1;
+    ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
+                                &bytes_xmit);
+    if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
+        return false;
+    }
+
+    if (bytes_xmit) {
+        ram_counters.transferred += bytes_xmit;
+        *pages = 1;
+    }
+
+    if (ret == RAM_SAVE_CONTROL_DELAYED) {
+        return true;
+    }
+
+    if (bytes_xmit > 0) {
+        ram_counters.normal++;
+    } else if (bytes_xmit == 0) {
+        ram_counters.duplicate++;
+    }
+
+    return true;
+}
+
 /**
  * ram_save_page: send the given page to the stream
  *
@@ -973,56 +1011,36 @@ static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
 {
     int pages = -1;
-    uint64_t bytes_xmit;
-    ram_addr_t current_addr;
     uint8_t *p;
-    int ret;
     bool send_async = true;
     RAMBlock *block = pss->block;
     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
+    ram_addr_t current_addr = block->offset + offset;
 
     p = block->host + offset;
     trace_ram_save_page(block->idstr, (uint64_t)offset, p);
 
-    /* In doubt sent page as normal */
-    bytes_xmit = 0;
-    ret = ram_control_save_page(rs->f, block->offset,
-                           offset, TARGET_PAGE_SIZE, &bytes_xmit);
-    if (bytes_xmit) {
-        ram_counters.transferred += bytes_xmit;
-        pages = 1;
+    if (control_save_page(rs, block, offset, &pages)) {
+        return pages;
     }
 
     XBZRLE_cache_lock();
-
-    current_addr = block->offset + offset;
-
-    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
-        if (ret != RAM_SAVE_CONTROL_DELAYED) {
-            if (bytes_xmit > 0) {
-                ram_counters.normal++;
-            } else if (bytes_xmit == 0) {
-                ram_counters.duplicate++;
-            }
-        }
-    } else {
-        pages = save_zero_page(rs, block, offset);
-        if (pages > 0) {
-            /* Must let xbzrle know, otherwise a previous (now 0'd) cached
-             * page would be stale
+    pages = save_zero_page(rs, block, offset);
+    if (pages > 0) {
+        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
+         * page would be stale
+         */
+        xbzrle_cache_zero_page(rs, current_addr);
+        ram_release_pages(block->idstr, offset, pages);
+    } else if (!rs->ram_bulk_stage &&
+               !migration_in_postcopy() && migrate_use_xbzrle()) {
+        pages = save_xbzrle_page(rs, &p, current_addr, block,
+                                 offset, last_stage);
+        if (!last_stage) {
+            /* Can't send this cached data async, since the cache page
+             * might get updated before it gets to the wire
              */
-            xbzrle_cache_zero_page(rs, current_addr);
-            ram_release_pages(block->idstr, offset, pages);
-        } else if (!rs->ram_bulk_stage &&
-                   !migration_in_postcopy() && migrate_use_xbzrle()) {
-            pages = save_xbzrle_page(rs, &p, current_addr, block,
-                                     offset, last_stage);
-            if (!last_stage) {
-                /* Can't send this cached data async, since the cache page
-                 * might get updated before it gets to the wire
-                 */
-                send_async = false;
-            }
+            send_async = false;
         }
     }
 
@@ -1152,63 +1170,49 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
                                     bool last_stage)
 {
     int pages = -1;
-    uint64_t bytes_xmit = 0;
     uint8_t *p;
-    int ret;
     RAMBlock *block = pss->block;
     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
 
     p = block->host + offset;
 
-    ret = ram_control_save_page(rs->f, block->offset,
-                                offset, TARGET_PAGE_SIZE, &bytes_xmit);
-    if (bytes_xmit) {
-        ram_counters.transferred += bytes_xmit;
-        pages = 1;
+    if (control_save_page(rs, block, offset, &pages)) {
+        return pages;
     }
-    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
-        if (ret != RAM_SAVE_CONTROL_DELAYED) {
-            if (bytes_xmit > 0) {
-                ram_counters.normal++;
-            } else if (bytes_xmit == 0) {
-                ram_counters.duplicate++;
-            }
+
+    /* When starting the process of a new block, the first page of
+     * the block should be sent out before other pages in the same
+     * block, and all the pages in last block should have been sent
+     * out, keeping this order is important, because the 'cont' flag
+     * is used to avoid resending the block name.
+     */
+    if (block != rs->last_sent_block) {
+        flush_compressed_data(rs);
+        pages = save_zero_page(rs, block, offset);
+        if (pages > 0) {
+            ram_release_pages(block->idstr, offset, pages);
+        } else {
+            /*
+             * Make sure the first page is sent out before other pages.
+             *
+             * we post it as normal page as compression will take much
+             * CPU resource.
+             */
+            ram_counters.transferred += save_page_header(rs, rs->f, block,
+                                            offset | RAM_SAVE_FLAG_PAGE);
+            qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
+                                  migrate_release_ram() &
+                                  migration_in_postcopy());
+            ram_counters.transferred += TARGET_PAGE_SIZE;
+            ram_counters.normal++;
+            pages = 1;
         }
     } else {
-        /* When starting the process of a new block, the first page of
-         * the block should be sent out before other pages in the same
-         * block, and all the pages in last block should have been sent
-         * out, keeping this order is important, because the 'cont' flag
-         * is used to avoid resending the block name.
-         */
-        if (block != rs->last_sent_block) {
-            flush_compressed_data(rs);
-            pages = save_zero_page(rs, block, offset);
-            if (pages > 0) {
-                ram_release_pages(block->idstr, offset, pages);
-            } else {
-                /*
-                 * Make sure the first page is sent out before other pages.
-                 *
-                 * we post it as normal page as compression will take much
-                 * CPU resource.
-                 */
-                ram_counters.transferred += save_page_header(rs, rs->f, block,
-                                                offset | RAM_SAVE_FLAG_PAGE);
-                qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
-                                      migrate_release_ram() &
-                                      migration_in_postcopy());
-                ram_counters.transferred += TARGET_PAGE_SIZE;
-                ram_counters.normal++;
-                pages = 1;
-            }
+        pages = save_zero_page(rs, block, offset);
+        if (pages == -1) {
+            pages = compress_page_with_multi_thread(rs, block, offset);
         } else {
-            pages = save_zero_page(rs, block, offset);
-            if (pages == -1) {
-                pages = compress_page_with_multi_thread(rs, block, offset);
-            } else {
-                ram_release_pages(block->idstr, offset, pages);
-            }
+            ram_release_pages(block->idstr, offset, pages);
         }
     }
 
-- 
2.14.3

WARNING: multiple messages have this Message-ID (diff)
From: guangrong.xiao@gmail.com
To: pbonzini@redhat.com, mst@redhat.com, mtosatti@redhat.com
Cc: qemu-devel@nongnu.org, kvm@vger.kernel.org,
	Xiao Guangrong <xiaoguangrong@tencent.com>
Subject: [Qemu-devel] [PATCH 4/8] migration: introduce control_save_page()
Date: Tue, 13 Mar 2018 15:57:35 +0800	[thread overview]
Message-ID: <20180313075739.11194-5-xiaoguangrong@tencent.com> (raw)
In-Reply-To: <20180313075739.11194-1-xiaoguangrong@tencent.com>

From: Xiao Guangrong <xiaoguangrong@tencent.com>

Abstract the common function control_save_page() to cleanup the code,
no logic is changed

Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
---
 migration/ram.c | 174 +++++++++++++++++++++++++++++---------------------------
 1 file changed, 89 insertions(+), 85 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index c47185d38c..e7b8b14c3c 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -957,6 +957,44 @@ static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
     ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
 }
 
+/*
+ * @pages: the number of pages written by the control path,
+ *        < 0 - error
+ *        > 0 - number of pages written
+ *
+ * Return true if the pages has been saved, otherwise false is returned.
+ */
+static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
+                              int *pages)
+{
+    uint64_t bytes_xmit = 0;
+    int ret;
+
+    *pages = -1;
+    ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
+                                &bytes_xmit);
+    if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
+        return false;
+    }
+
+    if (bytes_xmit) {
+        ram_counters.transferred += bytes_xmit;
+        *pages = 1;
+    }
+
+    if (ret == RAM_SAVE_CONTROL_DELAYED) {
+        return true;
+    }
+
+    if (bytes_xmit > 0) {
+        ram_counters.normal++;
+    } else if (bytes_xmit == 0) {
+        ram_counters.duplicate++;
+    }
+
+    return true;
+}
+
 /**
  * ram_save_page: send the given page to the stream
  *
@@ -973,56 +1011,36 @@ static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
 {
     int pages = -1;
-    uint64_t bytes_xmit;
-    ram_addr_t current_addr;
     uint8_t *p;
-    int ret;
     bool send_async = true;
     RAMBlock *block = pss->block;
     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
+    ram_addr_t current_addr = block->offset + offset;
 
     p = block->host + offset;
     trace_ram_save_page(block->idstr, (uint64_t)offset, p);
 
-    /* In doubt sent page as normal */
-    bytes_xmit = 0;
-    ret = ram_control_save_page(rs->f, block->offset,
-                           offset, TARGET_PAGE_SIZE, &bytes_xmit);
-    if (bytes_xmit) {
-        ram_counters.transferred += bytes_xmit;
-        pages = 1;
+    if (control_save_page(rs, block, offset, &pages)) {
+        return pages;
     }
 
     XBZRLE_cache_lock();
-
-    current_addr = block->offset + offset;
-
-    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
-        if (ret != RAM_SAVE_CONTROL_DELAYED) {
-            if (bytes_xmit > 0) {
-                ram_counters.normal++;
-            } else if (bytes_xmit == 0) {
-                ram_counters.duplicate++;
-            }
-        }
-    } else {
-        pages = save_zero_page(rs, block, offset);
-        if (pages > 0) {
-            /* Must let xbzrle know, otherwise a previous (now 0'd) cached
-             * page would be stale
+    pages = save_zero_page(rs, block, offset);
+    if (pages > 0) {
+        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
+         * page would be stale
+         */
+        xbzrle_cache_zero_page(rs, current_addr);
+        ram_release_pages(block->idstr, offset, pages);
+    } else if (!rs->ram_bulk_stage &&
+               !migration_in_postcopy() && migrate_use_xbzrle()) {
+        pages = save_xbzrle_page(rs, &p, current_addr, block,
+                                 offset, last_stage);
+        if (!last_stage) {
+            /* Can't send this cached data async, since the cache page
+             * might get updated before it gets to the wire
              */
-            xbzrle_cache_zero_page(rs, current_addr);
-            ram_release_pages(block->idstr, offset, pages);
-        } else if (!rs->ram_bulk_stage &&
-                   !migration_in_postcopy() && migrate_use_xbzrle()) {
-            pages = save_xbzrle_page(rs, &p, current_addr, block,
-                                     offset, last_stage);
-            if (!last_stage) {
-                /* Can't send this cached data async, since the cache page
-                 * might get updated before it gets to the wire
-                 */
-                send_async = false;
-            }
+            send_async = false;
         }
     }
 
@@ -1152,63 +1170,49 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
                                     bool last_stage)
 {
     int pages = -1;
-    uint64_t bytes_xmit = 0;
     uint8_t *p;
-    int ret;
     RAMBlock *block = pss->block;
     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
 
     p = block->host + offset;
 
-    ret = ram_control_save_page(rs->f, block->offset,
-                                offset, TARGET_PAGE_SIZE, &bytes_xmit);
-    if (bytes_xmit) {
-        ram_counters.transferred += bytes_xmit;
-        pages = 1;
+    if (control_save_page(rs, block, offset, &pages)) {
+        return pages;
     }
-    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
-        if (ret != RAM_SAVE_CONTROL_DELAYED) {
-            if (bytes_xmit > 0) {
-                ram_counters.normal++;
-            } else if (bytes_xmit == 0) {
-                ram_counters.duplicate++;
-            }
+
+    /* When starting the process of a new block, the first page of
+     * the block should be sent out before other pages in the same
+     * block, and all the pages in last block should have been sent
+     * out, keeping this order is important, because the 'cont' flag
+     * is used to avoid resending the block name.
+     */
+    if (block != rs->last_sent_block) {
+        flush_compressed_data(rs);
+        pages = save_zero_page(rs, block, offset);
+        if (pages > 0) {
+            ram_release_pages(block->idstr, offset, pages);
+        } else {
+            /*
+             * Make sure the first page is sent out before other pages.
+             *
+             * we post it as normal page as compression will take much
+             * CPU resource.
+             */
+            ram_counters.transferred += save_page_header(rs, rs->f, block,
+                                            offset | RAM_SAVE_FLAG_PAGE);
+            qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
+                                  migrate_release_ram() &
+                                  migration_in_postcopy());
+            ram_counters.transferred += TARGET_PAGE_SIZE;
+            ram_counters.normal++;
+            pages = 1;
         }
     } else {
-        /* When starting the process of a new block, the first page of
-         * the block should be sent out before other pages in the same
-         * block, and all the pages in last block should have been sent
-         * out, keeping this order is important, because the 'cont' flag
-         * is used to avoid resending the block name.
-         */
-        if (block != rs->last_sent_block) {
-            flush_compressed_data(rs);
-            pages = save_zero_page(rs, block, offset);
-            if (pages > 0) {
-                ram_release_pages(block->idstr, offset, pages);
-            } else {
-                /*
-                 * Make sure the first page is sent out before other pages.
-                 *
-                 * we post it as normal page as compression will take much
-                 * CPU resource.
-                 */
-                ram_counters.transferred += save_page_header(rs, rs->f, block,
-                                                offset | RAM_SAVE_FLAG_PAGE);
-                qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
-                                      migrate_release_ram() &
-                                      migration_in_postcopy());
-                ram_counters.transferred += TARGET_PAGE_SIZE;
-                ram_counters.normal++;
-                pages = 1;
-            }
+        pages = save_zero_page(rs, block, offset);
+        if (pages == -1) {
+            pages = compress_page_with_multi_thread(rs, block, offset);
         } else {
-            pages = save_zero_page(rs, block, offset);
-            if (pages == -1) {
-                pages = compress_page_with_multi_thread(rs, block, offset);
-            } else {
-                ram_release_pages(block->idstr, offset, pages);
-            }
+            ram_release_pages(block->idstr, offset, pages);
         }
     }
 
-- 
2.14.3

  parent reply	other threads:[~2018-03-13  7:57 UTC|newest]

Thread overview: 126+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-13  7:57 [PATCH 0/8] migration: improve and cleanup compression guangrong.xiao
2018-03-13  7:57 ` [Qemu-devel] " guangrong.xiao
2018-03-13  7:57 ` [PATCH 1/8] migration: stop compressing page in migration thread guangrong.xiao
2018-03-13  7:57   ` [Qemu-devel] " guangrong.xiao
2018-03-15 10:25   ` Dr. David Alan Gilbert
2018-03-15 10:25     ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-16  8:05     ` Xiao Guangrong
2018-03-16  8:05       ` [Qemu-devel] " Xiao Guangrong
2018-03-19 12:11       ` Dr. David Alan Gilbert
2018-03-19 12:11         ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-21  8:19       ` Peter Xu
2018-03-21  8:19         ` [Qemu-devel] " Peter Xu
2018-03-22 11:38         ` Xiao Guangrong
2018-03-22 11:38           ` [Qemu-devel] " Xiao Guangrong
2018-03-26  9:02           ` Peter Xu
2018-03-26  9:02             ` [Qemu-devel] " Peter Xu
2018-03-26 15:43             ` Xiao Guangrong
2018-03-26 15:43               ` [Qemu-devel] " Xiao Guangrong
2018-03-27  7:33               ` Peter Xu
2018-03-27  7:33                 ` [Qemu-devel] " Peter Xu
2018-03-27 19:12               ` Dr. David Alan Gilbert
2018-03-27 19:12                 ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-28  3:01   ` Wang, Wei W
2018-03-28  3:01     ` [Qemu-devel] " Wang, Wei W
2018-03-27 15:24     ` Xiao Guangrong
2018-03-27 15:24       ` [Qemu-devel] " Xiao Guangrong
2018-03-28  7:30       ` Wei Wang
2018-03-28  7:30         ` [Qemu-devel] " Wei Wang
2018-03-28  7:37         ` Peter Xu
2018-03-28  7:37           ` [Qemu-devel] " Peter Xu
2018-03-28  8:30           ` Wei Wang
2018-03-28  8:30             ` [Qemu-devel] " Wei Wang
2018-03-13  7:57 ` [PATCH 2/8] migration: stop allocating and freeing memory frequently guangrong.xiao
2018-03-13  7:57   ` [Qemu-devel] " guangrong.xiao
2018-03-15 11:03   ` Dr. David Alan Gilbert
2018-03-15 11:03     ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-16  8:19     ` Xiao Guangrong
2018-03-16  8:19       ` [Qemu-devel] " Xiao Guangrong
2018-03-19 10:54       ` Dr. David Alan Gilbert
2018-03-19 10:54         ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-19 12:11         ` Xiao Guangrong
2018-03-19 12:11           ` [Qemu-devel] " Xiao Guangrong
2018-03-19  1:49   ` [PATCH 2/8] migration: stop allocating and freeingmemory frequently jiang.biao2
2018-03-19  1:49     ` [Qemu-devel] " jiang.biao2
2018-03-19  4:03     ` Xiao Guangrong
2018-03-19  4:03       ` [Qemu-devel] " Xiao Guangrong
2018-03-19  4:48       ` [PATCH 2/8] migration: stop allocating andfreeingmemory frequently jiang.biao2
2018-03-19  4:48         ` [Qemu-devel] " jiang.biao2
2018-03-21  9:06   ` [PATCH 2/8] migration: stop allocating and freeing memory frequently Peter Xu
2018-03-21  9:06     ` [Qemu-devel] " Peter Xu
2018-03-22 11:57     ` Xiao Guangrong
2018-03-22 11:57       ` [Qemu-devel] " Xiao Guangrong
2018-03-27  7:07       ` Peter Xu
2018-03-27  7:07         ` [Qemu-devel] " Peter Xu
2018-03-13  7:57 ` [PATCH 3/8] migration: support to detect compression and decompression errors guangrong.xiao
2018-03-13  7:57   ` [Qemu-devel] " guangrong.xiao
2018-03-15 11:29   ` Dr. David Alan Gilbert
2018-03-15 11:29     ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-16  8:25     ` Xiao Guangrong
2018-03-16  8:25       ` [Qemu-devel] " Xiao Guangrong
2018-03-19  7:56   ` [PATCH 3/8] migration: support to detect compressionand " jiang.biao2
2018-03-19  7:56     ` [Qemu-devel] " jiang.biao2
2018-03-19  8:01     ` Xiao Guangrong
2018-03-19  8:01       ` [Qemu-devel] " Xiao Guangrong
2018-03-21 10:00   ` [PATCH 3/8] migration: support to detect compression and " Peter Xu
2018-03-21 10:00     ` [Qemu-devel] " Peter Xu
2018-03-22 12:03     ` Xiao Guangrong
2018-03-22 12:03       ` [Qemu-devel] " Xiao Guangrong
2018-03-27  7:22       ` Peter Xu
2018-03-27  7:22         ` [Qemu-devel] " Peter Xu
2018-03-26 19:42         ` Xiao Guangrong
2018-03-26 19:42           ` [Qemu-devel] " Xiao Guangrong
2018-03-27 11:17           ` Peter Xu
2018-03-27 11:17             ` [Qemu-devel] " Peter Xu
2018-03-27  1:20             ` Xiao Guangrong
2018-03-27  1:20               ` [Qemu-devel] " Xiao Guangrong
2018-03-28  0:43               ` [PATCH 3/8] migration: support to detectcompression " jiang.biao2
2018-03-28  0:43                 ` [Qemu-devel] " jiang.biao2
2018-03-27 14:35                 ` Xiao Guangrong
2018-03-27 14:35                   ` [Qemu-devel] " Xiao Guangrong
2018-03-28  3:03                   ` Peter Xu
2018-03-28  3:03                     ` [Qemu-devel] " Peter Xu
2018-03-28  4:08                     ` [PATCH 3/8] migration: support todetectcompression " jiang.biao2
2018-03-28  4:08                       ` [Qemu-devel] " jiang.biao2
2018-03-28  4:20                       ` Peter Xu
2018-03-28  4:20                         ` [Qemu-devel] " Peter Xu
2018-03-27 18:44                         ` Xiao Guangrong
2018-03-27 18:44                           ` [Qemu-devel] " Xiao Guangrong
2018-03-28  8:07                           ` [PATCH 3/8] migration: support todetectcompressionand " jiang.biao2
2018-03-28  8:07                             ` [Qemu-devel] " jiang.biao2
2018-03-13  7:57 ` guangrong.xiao [this message]
2018-03-13  7:57   ` [Qemu-devel] [PATCH 4/8] migration: introduce control_save_page() guangrong.xiao
2018-03-15 11:37   ` Dr. David Alan Gilbert
2018-03-15 11:37     ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-16  8:52     ` Xiao Guangrong
2018-03-16  8:52       ` [Qemu-devel] " Xiao Guangrong
2018-03-27  7:47     ` Peter Xu
2018-03-27  7:47       ` [Qemu-devel] " Peter Xu
2018-03-13  7:57 ` [PATCH 5/8] migration: move calling control_save_page to the common place guangrong.xiao
2018-03-13  7:57   ` [Qemu-devel] " guangrong.xiao
2018-03-15 11:47   ` Dr. David Alan Gilbert
2018-03-15 11:47     ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-16  8:59     ` Xiao Guangrong
2018-03-16  8:59       ` [Qemu-devel] " Xiao Guangrong
2018-03-19 13:15       ` Dr. David Alan Gilbert
2018-03-19 13:15         ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-27 12:35   ` Peter Xu
2018-03-27 12:35     ` [Qemu-devel] " Peter Xu
2018-03-13  7:57 ` [PATCH 6/8] migration: move calling save_zero_page " guangrong.xiao
2018-03-13  7:57   ` [Qemu-devel] " guangrong.xiao
2018-03-15 12:27   ` Dr. David Alan Gilbert
2018-03-15 12:27     ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-27 12:49   ` Peter Xu
2018-03-27 12:49     ` [Qemu-devel] " Peter Xu
2018-03-13  7:57 ` [PATCH 7/8] migration: introduce save_normal_page() guangrong.xiao
2018-03-13  7:57   ` [Qemu-devel] " guangrong.xiao
2018-03-15 12:30   ` Dr. David Alan Gilbert
2018-03-15 12:30     ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-27 12:54   ` Peter Xu
2018-03-27 12:54     ` [Qemu-devel] " Peter Xu
2018-03-13  7:57 ` [PATCH 8/8] migration: remove ram_save_compressed_page() guangrong.xiao
2018-03-13  7:57   ` [Qemu-devel] " guangrong.xiao
2018-03-15 12:32   ` Dr. David Alan Gilbert
2018-03-15 12:32     ` [Qemu-devel] " Dr. David Alan Gilbert
2018-03-27 12:56   ` Peter Xu
2018-03-27 12:56     ` [Qemu-devel] " Peter Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180313075739.11194-5-xiaoguangrong@tencent.com \
    --to=guangrong.xiao@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=xiaoguangrong@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.