qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Wei Yang <richardw.yang@linux.intel.com>
To: Ivan Ren <renyime@gmail.com>
Cc: qemu-devel@nongnu.org, richardw.yang@linux.intel.com,
	dgilbert@redhat.com, quintela@redhat.com
Subject: Re: [Qemu-devel] [PATCH v3 2/3] migration: add speed limit for multifd migration
Date: Wed, 31 Jul 2019 08:26:15 +0800	[thread overview]
Message-ID: <20190731002615.GA26871@richard> (raw)
In-Reply-To: <1564464816-21804-3-git-send-email-ivanren@tencent.com>

On Tue, Jul 30, 2019 at 01:33:35PM +0800, Ivan Ren wrote:
>From: Ivan Ren <ivanren@tencent.com>
>
>Limit the speed of multifd migration through common speed limitation
>qemu file.
>
>Signed-off-by: Ivan Ren <ivanren@tencent.com>

Reviewed-by: Wei Yang <richardw.yang@linux.intel.com>

>---
> migration/ram.c | 22 ++++++++++++----------
> 1 file changed, 12 insertions(+), 10 deletions(-)
>
>diff --git a/migration/ram.c b/migration/ram.c
>index 889148dd84..88ddd2bbe2 100644
>--- a/migration/ram.c
>+++ b/migration/ram.c
>@@ -922,7 +922,7 @@ struct {
>  * false.
>  */
> 
>-static int multifd_send_pages(void)
>+static int multifd_send_pages(RAMState *rs)
> {
>     int i;
>     static int next_channel;
>@@ -954,6 +954,7 @@ static int multifd_send_pages(void)
>     multifd_send_state->pages = p->pages;
>     p->pages = pages;
>     transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
>+    qemu_file_update_transfer(rs->f, transferred);
>     ram_counters.multifd_bytes += transferred;
>     ram_counters.transferred += transferred;;
>     qemu_mutex_unlock(&p->mutex);
>@@ -962,7 +963,7 @@ static int multifd_send_pages(void)
>     return 1;
> }
> 
>-static int multifd_queue_page(RAMBlock *block, ram_addr_t offset)
>+static int multifd_queue_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
> {
>     MultiFDPages_t *pages = multifd_send_state->pages;
> 
>@@ -981,12 +982,12 @@ static int multifd_queue_page(RAMBlock *block, ram_addr_t offset)
>         }
>     }
> 
>-    if (multifd_send_pages() < 0) {
>+    if (multifd_send_pages(rs) < 0) {
>         return -1;
>     }
> 
>     if (pages->block != block) {
>-        return  multifd_queue_page(block, offset);
>+        return  multifd_queue_page(rs, block, offset);
>     }
> 
>     return 1;
>@@ -1054,7 +1055,7 @@ void multifd_save_cleanup(void)
>     multifd_send_state = NULL;
> }
> 
>-static void multifd_send_sync_main(void)
>+static void multifd_send_sync_main(RAMState *rs)
> {
>     int i;
> 
>@@ -1062,7 +1063,7 @@ static void multifd_send_sync_main(void)
>         return;
>     }
>     if (multifd_send_state->pages->used) {
>-        if (multifd_send_pages() < 0) {
>+        if (multifd_send_pages(rs) < 0) {
>             error_report("%s: multifd_send_pages fail", __func__);
>             return;
>         }
>@@ -1083,6 +1084,7 @@ static void multifd_send_sync_main(void)
>         p->packet_num = multifd_send_state->packet_num++;
>         p->flags |= MULTIFD_FLAG_SYNC;
>         p->pending_job++;
>+        qemu_file_update_transfer(rs->f, p->packet_len);
>         qemu_mutex_unlock(&p->mutex);
>         qemu_sem_post(&p->sem);
>     }
>@@ -2079,7 +2081,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
> static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
>                                  ram_addr_t offset)
> {
>-    if (multifd_queue_page(block, offset) < 0) {
>+    if (multifd_queue_page(rs, block, offset) < 0) {
>         return -1;
>     }
>     ram_counters.normal++;
>@@ -3482,7 +3484,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
>     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
> 
>-    multifd_send_sync_main();
>+    multifd_send_sync_main(*rsp);
>     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
>     qemu_fflush(f);
> 
>@@ -3570,7 +3572,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
> 
> out:
>-    multifd_send_sync_main();
>+    multifd_send_sync_main(rs);
>     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
>     qemu_fflush(f);
>     ram_counters.transferred += 8;
>@@ -3629,7 +3631,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
> 
>     rcu_read_unlock();
> 
>-    multifd_send_sync_main();
>+    multifd_send_sync_main(rs);
>     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
>     qemu_fflush(f);
> 
>-- 
>2.17.2 (Apple Git-113)
>

-- 
Wei Yang
Help you, Help me


  reply	other threads:[~2019-07-31  0:27 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-30  5:33 [Qemu-devel] [PATCH v2 0/3] migration: add speed limit for multifd migration Ivan Ren
2019-07-30  5:33 ` [Qemu-devel] [PATCH v3 1/3] migration: add qemu_file_update_transfer interface Ivan Ren
2019-08-07  8:45   ` Juan Quintela
2019-07-30  5:33 ` [Qemu-devel] [PATCH v3 2/3] migration: add speed limit for multifd migration Ivan Ren
2019-07-31  0:26   ` Wei Yang [this message]
2019-08-07  8:46   ` Juan Quintela
2019-07-30  5:33 ` [Qemu-devel] [PATCH v3 3/3] migration: update ram_counters for multifd sync packet Ivan Ren
2019-08-07  8:46   ` Juan Quintela
2019-08-07 18:53 ` [Qemu-devel] [PATCH v2 0/3] migration: add speed limit for multifd migration Dr. David Alan Gilbert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190731002615.GA26871@richard \
    --to=richardw.yang@linux.intel.com \
    --cc=dgilbert@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=renyime@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).