All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: dgilbert@redhat.com, lvivier@redhat.com, peterx@redhat.com,
	berrange@redhat.com
Subject: [Qemu-devel] [PATCH v5 17/17] migration: Flush receive queue
Date: Mon, 17 Jul 2017 15:42:38 +0200	[thread overview]
Message-ID: <20170717134238.1966-18-quintela@redhat.com> (raw)
In-Reply-To: <20170717134238.1966-1-quintela@redhat.com>

Each time that we sync the bitmap, it is a possiblity that we receive
a page that is being processed by a different thread.  We fix this
problem just making sure that we wait for all receiving threads to
finish its work before we procedeed with the next stage.

We are low on page flags, so we use a combination that is not valid to
emit that message:  MULTIFD_PAGE and COMPRESSED.

I tried to make a migration command for it, but it don't work because
we sync the bitmap sometimes when we have already sent the beggining
of the section, so I just added a new page flag.

Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/ram.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 56 insertions(+), 1 deletion(-)

diff --git a/migration/ram.c b/migration/ram.c
index c78b286..bffe204 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -71,6 +71,12 @@
 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
 #define RAM_SAVE_FLAG_MULTIFD_PAGE     0x200
 
+/* We are getting low on pages flags, so we start using combinations
+   When we need to flush a page, we sent it as
+   RAM_SAVE_FLAG_MULTIFD_PAGE | RAM_SAVE_FLAG_COMPRESS_PAGE
+   We don't allow that combination
+*/
+
 static inline bool is_zero_range(uint8_t *p, uint64_t size)
 {
     return buffer_is_zero(p, size);
@@ -193,6 +199,9 @@ struct RAMState {
     uint64_t iterations_prev;
     /* Iterations since start */
     uint64_t iterations;
+    /* Indicates if we have synced the bitmap and we need to assure that
+       target has processeed all previous pages */
+    bool multifd_needs_flush;
     /* protects modification of the bitmap */
     uint64_t migration_dirty_pages;
     /* number of dirty bits in the bitmap */
@@ -363,7 +372,6 @@ static void compress_threads_save_setup(void)
 
 /* Multiple fd's */
 
-
 typedef struct {
     int num;
     int size;
@@ -595,9 +603,11 @@ struct MultiFDRecvParams {
     QIOChannel *c;
     QemuSemaphore ready;
     QemuSemaphore sem;
+    QemuCond cond_sync;
     QemuMutex mutex;
     /* proteced by param mutex */
     bool quit;
+    bool sync;
     multifd_pages_t pages;
     bool done;
 };
@@ -637,6 +647,7 @@ void multifd_load_cleanup(void)
         qemu_thread_join(&p->thread);
         qemu_mutex_destroy(&p->mutex);
         qemu_sem_destroy(&p->sem);
+        qemu_cond_destroy(&p->cond_sync);
         socket_recv_channel_destroy(p->c);
         g_free(p);
         multifd_recv_state->params[i] = NULL;
@@ -675,6 +686,10 @@ static void *multifd_recv_thread(void *opaque)
                 return NULL;
             }
             p->done = true;
+            if (p->sync) {
+                qemu_cond_signal(&p->cond_sync);
+                p->sync = false;
+            }
             qemu_mutex_unlock(&p->mutex);
             qemu_sem_post(&p->ready);
             continue;
@@ -724,9 +739,11 @@ gboolean multifd_new_channel(QIOChannel *ioc)
     qemu_mutex_init(&p->mutex);
     qemu_sem_init(&p->sem, 0);
     qemu_sem_init(&p->ready, 0);
+    qemu_cond_init(&p->cond_sync);
     p->quit = false;
     p->id = id;
     p->done = false;
+    p->sync = false;
     multifd_init_group(&p->pages);
     p->c = ioc;
     atomic_set(&multifd_recv_state->params[id], p);
@@ -792,6 +809,27 @@ static void multifd_recv_page(uint8_t *address, uint16_t fd_num)
     qemu_sem_post(&p->sem);
 }
 
+static int multifd_flush(void)
+{
+    int i, thread_count;
+
+    if (!migrate_use_multifd()) {
+        return 0;
+    }
+    thread_count = migrate_multifd_threads();
+    for (i = 0; i < thread_count; i++) {
+        MultiFDRecvParams *p = multifd_recv_state->params[i];
+
+        qemu_mutex_lock(&p->mutex);
+        while (!p->done) {
+            p->sync = true;
+            qemu_cond_wait(&p->cond_sync, &p->mutex);
+        }
+        qemu_mutex_unlock(&p->mutex);
+    }
+    return 0;
+}
+
 /**
  * save_page_header: write page header to wire
  *
@@ -809,6 +847,12 @@ static size_t save_page_header(RAMState *rs, QEMUFile *f,  RAMBlock *block,
 {
     size_t size, len;
 
+    if (rs->multifd_needs_flush &&
+        (offset & RAM_SAVE_FLAG_MULTIFD_PAGE)) {
+        offset |= RAM_SAVE_FLAG_ZERO;
+        rs->multifd_needs_flush = false;
+    }
+
     if (block == rs->last_sent_block) {
         offset |= RAM_SAVE_FLAG_CONTINUE;
     }
@@ -2496,6 +2540,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
 
     if (!migration_in_postcopy()) {
         migration_bitmap_sync(rs);
+        if (migrate_use_multifd()) {
+            rs->multifd_needs_flush = true;
+        }
     }
 
     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
@@ -2538,6 +2585,9 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
         qemu_mutex_lock_iothread();
         rcu_read_lock();
         migration_bitmap_sync(rs);
+        if (migrate_use_multifd()) {
+            rs->multifd_needs_flush = true;
+        }
         rcu_read_unlock();
         qemu_mutex_unlock_iothread();
         remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
@@ -3012,6 +3062,11 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
             break;
         }
 
+        if ((flags & (RAM_SAVE_FLAG_MULTIFD_PAGE | RAM_SAVE_FLAG_ZERO))
+                  == (RAM_SAVE_FLAG_MULTIFD_PAGE | RAM_SAVE_FLAG_ZERO)) {
+            multifd_flush();
+            flags = flags & ~RAM_SAVE_FLAG_ZERO;
+        }
         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE |
                      RAM_SAVE_FLAG_MULTIFD_PAGE)) {
-- 
2.9.4

  parent reply	other threads:[~2017-07-17 13:43 UTC|newest]

Thread overview: 93+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-07-17 13:42 [Qemu-devel] [PATCH v5 00/17] Multifd Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 01/17] migrate: Add gboolean return type to migrate_channel_process_incoming Juan Quintela
2017-07-19 15:01   ` Dr. David Alan Gilbert
2017-07-20  7:00     ` Peter Xu
2017-07-20  8:47       ` Daniel P. Berrange
2017-07-24 10:18         ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 02/17] migration: Create migration_ioc_process_incoming() Juan Quintela
2017-07-19 13:38   ` Daniel P. Berrange
2017-07-24 11:09     ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 03/17] qio: Create new qio_channel_{readv, writev}_all Juan Quintela
2017-07-19 13:44   ` Daniel P. Berrange
2017-08-08  8:40     ` Juan Quintela
2017-08-08  9:25       ` Daniel P. Berrange
2017-07-19 15:42   ` Dr. David Alan Gilbert
2017-07-19 15:43     ` Daniel P. Berrange
2017-07-19 16:04       ` Dr. David Alan Gilbert
2017-07-19 16:08         ` Daniel P. Berrange
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 04/17] migration: Add multifd capability Juan Quintela
2017-07-19 15:44   ` Dr. David Alan Gilbert
2017-08-08  8:42     ` Juan Quintela
2017-07-19 17:14   ` Eric Blake
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 05/17] migration: Create x-multifd-threads parameter Juan Quintela
2017-07-19 16:00   ` Dr. David Alan Gilbert
2017-08-08  8:46     ` Juan Quintela
2017-08-08  9:44       ` Dr. David Alan Gilbert
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 06/17] migration: Create x-multifd-group parameter Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 07/17] migration: Create multifd migration threads Juan Quintela
2017-07-19 16:49   ` Dr. David Alan Gilbert
2017-08-08  8:58     ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 08/17] migration: Split migration_fd_process_incomming Juan Quintela
2017-07-19 17:08   ` Dr. David Alan Gilbert
2017-07-21 12:39     ` Eric Blake
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 09/17] migration: Start of multiple fd work Juan Quintela
2017-07-19 13:56   ` Daniel P. Berrange
2017-07-19 17:35   ` Dr. David Alan Gilbert
2017-08-08  9:35     ` Juan Quintela
2017-08-08  9:54       ` Dr. David Alan Gilbert
2017-07-20  9:34   ` Peter Xu
2017-08-08  9:19     ` Juan Quintela
2017-08-09  8:08       ` Peter Xu
2017-08-09 11:12         ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 10/17] migration: Create ram_multifd_page Juan Quintela
2017-07-19 19:02   ` Dr. David Alan Gilbert
2017-07-20  8:10     ` Peter Xu
2017-07-20 11:48       ` Dr. David Alan Gilbert
2017-08-08 15:58         ` Juan Quintela
2017-08-08 16:04       ` Juan Quintela
2017-08-09  7:42         ` Peter Xu
2017-08-08 15:56     ` Juan Quintela
2017-08-08 16:30       ` Dr. David Alan Gilbert
2017-08-08 18:02         ` Juan Quintela
2017-08-08 19:14           ` Dr. David Alan Gilbert
2017-08-09 16:48             ` Paolo Bonzini
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 11/17] migration: Really use multiple pages at a time Juan Quintela
2017-07-19 13:58   ` Daniel P. Berrange
2017-08-08 11:55     ` Juan Quintela
2017-07-20  9:44   ` Dr. David Alan Gilbert
2017-08-08 12:11     ` Juan Quintela
2017-07-20  9:49   ` Peter Xu
2017-07-20 10:09     ` Peter Xu
2017-08-08 16:06     ` Juan Quintela
2017-08-09  7:48       ` Peter Xu
2017-08-09  8:05         ` Juan Quintela
2017-08-09  8:12           ` Peter Xu
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 12/17] migration: Send the fd number which we are going to use for this page Juan Quintela
2017-07-20  9:58   ` Dr. David Alan Gilbert
2017-08-09 16:48   ` Paolo Bonzini
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 13/17] migration: Create thread infrastructure for multifd recv side Juan Quintela
2017-07-20 10:22   ` Peter Xu
2017-08-08 11:41     ` Juan Quintela
2017-08-09  5:53       ` Peter Xu
2017-07-20 10:29   ` Dr. David Alan Gilbert
2017-08-08 11:51     ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 14/17] migration: Delay the start of reception on main channel Juan Quintela
2017-07-20 10:56   ` Dr. David Alan Gilbert
2017-08-08 11:29     ` Juan Quintela
2017-07-20 11:10   ` Peter Xu
2017-08-08 11:30     ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 15/17] migration: Test new fd infrastructure Juan Quintela
2017-07-20 11:20   ` Dr. David Alan Gilbert
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 16/17] migration: Transfer pages over new channels Juan Quintela
2017-07-20 11:31   ` Dr. David Alan Gilbert
2017-08-08 11:13     ` Juan Quintela
2017-08-08 11:32       ` Dr. David Alan Gilbert
2017-07-17 13:42 ` Juan Quintela [this message]
2017-07-20 11:45   ` [Qemu-devel] [PATCH v5 17/17] migration: Flush receive queue Dr. David Alan Gilbert
2017-08-08 10:43     ` Juan Quintela
2017-08-08 11:25       ` Dr. David Alan Gilbert
2017-07-21  2:40   ` Peter Xu
2017-08-08 11:40     ` Juan Quintela
2017-08-10  6:49       ` Peter Xu
2017-07-21  6:03   ` Peter Xu
2017-07-21 10:53     ` Juan Quintela

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170717134238.1966-18-quintela@redhat.com \
    --to=quintela@redhat.com \
    --cc=berrange@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.