All of lore.kernel.org
 help / color / mirror / Atom feed
From: Leonardo Bras <leobras@redhat.com>
To: "Daniel P. Berrangé" <berrange@redhat.com>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr. David Alan Gilbert" <dgilbert@redhat.com>,
	"Eric Blake" <eblake@redhat.com>,
	"Markus Armbruster" <armbru@redhat.com>
Cc: Leonardo Bras <leobras@redhat.com>, qemu-devel@nongnu.org
Subject: [PATCH v5 6/6] multifd: Implement zerocopy write in multifd migration (multifd-zerocopy)
Date: Fri, 12 Nov 2021 02:10:41 -0300	[thread overview]
Message-ID: <20211112051040.923746-7-leobras@redhat.com> (raw)
In-Reply-To: <20211112051040.923746-1-leobras@redhat.com>

Implement zerocopy on nocomp_send_write(), by making use of QIOChannel
zerocopy interface.

Change multifd_send_sync_main() so it can distinguish each iteration sync from
the setup and the completion, so a flush_zerocopy() can be called
at the after each iteration in order to make sure all dirty pages are sent
before a new iteration is started.

Also make it return -1 if flush_zerocopy() fails, in order to cancel
the migration process, and avoid resuming the guest in the target host
without receiving all current RAM.

This will work fine on RAM migration because the RAM pages are not usually freed,
and there is no problem on changing the pages content between async_send() and
the actual sending of the buffer, because this change will dirty the page and
cause it to be re-sent on a next iteration anyway.

Given a lot of locked memory may be needed in order to use multid migration
with zerocopy enabled, make it optional by creating a new migration parameter
"zerocopy" on qapi, so low-privileged users can still perform multifd
migrations.

Signed-off-by: Leonardo Bras <leobras@redhat.com>
---
 migration/multifd.h |  4 +++-
 migration/multifd.c | 37 ++++++++++++++++++++++++++++++++-----
 migration/ram.c     | 29 ++++++++++++++++++++++-------
 migration/socket.c  |  9 +++++++--
 4 files changed, 64 insertions(+), 15 deletions(-)

diff --git a/migration/multifd.h b/migration/multifd.h
index 15c50ca0b2..37941c1872 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -22,7 +22,7 @@ int multifd_load_cleanup(Error **errp);
 bool multifd_recv_all_channels_created(void);
 bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp);
 void multifd_recv_sync_main(void);
-void multifd_send_sync_main(QEMUFile *f);
+int multifd_send_sync_main(QEMUFile *f, bool sync);
 int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset);
 
 /* Multifd Compression flags */
@@ -97,6 +97,8 @@ typedef struct {
     uint32_t packet_len;
     /* pointer to the packet */
     MultiFDPacket_t *packet;
+    /* multifd flags for sending ram */
+    int write_flags;
     /* multifd flags for each packet */
     uint32_t flags;
     /* size of the next packet that contains pages */
diff --git a/migration/multifd.c b/migration/multifd.c
index 3d9dc8cb58..816078df60 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -105,7 +105,8 @@ static int nocomp_send_prepare(MultiFDSendParams *p, uint32_t used,
  */
 static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
 {
-    return qio_channel_writev_all(p->c, p->pages->iov, used, errp);
+    return qio_channel_writev_all_flags(p->c, p->pages->iov, used,
+                                        p->write_flags, errp);
 }
 
 /**
@@ -578,19 +579,27 @@ void multifd_save_cleanup(void)
     multifd_send_state = NULL;
 }
 
-void multifd_send_sync_main(QEMUFile *f)
+int multifd_send_sync_main(QEMUFile *f, bool sync)
 {
     int i;
+    bool flush_zerocopy;
 
     if (!migrate_use_multifd()) {
-        return;
+        return 0;
     }
     if (multifd_send_state->pages->used) {
         if (multifd_send_pages(f) < 0) {
             error_report("%s: multifd_send_pages fail", __func__);
-            return;
+            return 0;
         }
     }
+
+    /*
+     * When using zerocopy, it's necessary to flush after each iteration to make
+     * sure pages from earlier iterations don't end up replacing newer pages.
+     */
+    flush_zerocopy = sync && migrate_use_zerocopy();
+
     for (i = 0; i < migrate_multifd_channels(); i++) {
         MultiFDSendParams *p = &multifd_send_state->params[i];
 
@@ -601,7 +610,7 @@ void multifd_send_sync_main(QEMUFile *f)
         if (p->quit) {
             error_report("%s: channel %d has already quit", __func__, i);
             qemu_mutex_unlock(&p->mutex);
-            return;
+            return 0;
         }
 
         p->packet_num = multifd_send_state->packet_num++;
@@ -612,6 +621,17 @@ void multifd_send_sync_main(QEMUFile *f)
         ram_counters.transferred += p->packet_len;
         qemu_mutex_unlock(&p->mutex);
         qemu_sem_post(&p->sem);
+
+        if (flush_zerocopy) {
+            int ret;
+            Error *err = NULL;
+
+            ret = qio_channel_flush_zerocopy(p->c, &err);
+            if (ret < 0) {
+                error_report_err(err);
+                return -1;
+            }
+        }
     }
     for (i = 0; i < migrate_multifd_channels(); i++) {
         MultiFDSendParams *p = &multifd_send_state->params[i];
@@ -620,6 +640,8 @@ void multifd_send_sync_main(QEMUFile *f)
         qemu_sem_wait(&p->sem_sync);
     }
     trace_multifd_send_sync_main(multifd_send_state->packet_num);
+
+    return 0;
 }
 
 static void *multifd_send_thread(void *opaque)
@@ -853,6 +875,10 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
         goto cleanup;
     }
 
+    if (migrate_use_zerocopy()) {
+        p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZEROCOPY;
+    }
+
     p->c = QIO_CHANNEL(sioc);
     qio_channel_set_delay(p->c, false);
     p->running = true;
@@ -918,6 +944,7 @@ int multifd_save_setup(Error **errp)
         p->packet->version = cpu_to_be32(MULTIFD_VERSION);
         p->name = g_strdup_printf("multifdsend_%d", i);
         p->tls_hostname = g_strdup(s->hostname);
+        p->write_flags = 0;
         socket_send_channel_create(multifd_new_send_channel_async, p);
     }
 
diff --git a/migration/ram.c b/migration/ram.c
index 863035d235..0b3ddbffc1 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2992,6 +2992,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
 {
     RAMState **rsp = opaque;
     RAMBlock *block;
+    int ret;
 
     if (compress_threads_save_setup()) {
         return -1;
@@ -3026,7 +3027,11 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
 
-    multifd_send_sync_main(f);
+    ret =  multifd_send_sync_main(f, false);
+    if (ret < 0) {
+        return ret;
+    }
+
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
     qemu_fflush(f);
 
@@ -3135,7 +3140,11 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
 out:
     if (ret >= 0
         && migration_is_setup_or_active(migrate_get_current()->state)) {
-        multifd_send_sync_main(rs->f);
+        ret = multifd_send_sync_main(rs->f, true);
+        if (ret < 0) {
+            return ret;
+        }
+
         qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
         qemu_fflush(f);
         ram_counters.transferred += 8;
@@ -3193,13 +3202,19 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
         ram_control_after_iterate(f, RAM_CONTROL_FINISH);
     }
 
-    if (ret >= 0) {
-        multifd_send_sync_main(rs->f);
-        qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
-        qemu_fflush(f);
+    if (ret < 0) {
+        return ret;
     }
 
-    return ret;
+    ret = multifd_send_sync_main(rs->f, false);
+    if (ret < 0) {
+        return ret;
+    }
+
+    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
+    qemu_fflush(f);
+
+    return 0;
 }
 
 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
diff --git a/migration/socket.c b/migration/socket.c
index e26e94aa0c..8e40e0a3fd 100644
--- a/migration/socket.c
+++ b/migration/socket.c
@@ -78,8 +78,13 @@ static void socket_outgoing_migration(QIOTask *task,
         trace_migration_socket_outgoing_connected(data->hostname);
     }
 
-    if (migrate_use_zerocopy()) {
-        error_setg(&err, "Zerocopy not available in migration");
+    if (migrate_use_zerocopy() &&
+        (!migrate_use_multifd() ||
+         !qio_channel_has_feature(sioc, QIO_CHANNEL_FEATURE_WRITE_ZEROCOPY) ||
+          migrate_multifd_compression() != MULTIFD_COMPRESSION_NONE ||
+          migrate_use_tls())) {
+        error_setg(&err,
+                   "Zerocopy only available for non-compressed non-TLS multifd migration");
     }
 
     migration_channel_connect(data->s, sioc, data->hostname, err);
-- 
2.33.1



  parent reply	other threads:[~2021-11-12  5:42 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-12  5:10 [PATCH v5 0/6] MSG_ZEROCOPY + multifd Leonardo Bras
2021-11-12  5:10 ` [PATCH v5 1/6] QIOChannel: Add io_writev_zerocopy & io_flush_zerocopy callbacks Leonardo Bras
2021-11-12 10:13   ` Daniel P. Berrangé
2021-11-12 10:26     ` Daniel P. Berrangé
2021-11-22 23:18     ` Leonardo Bras Soares Passos
2021-11-23  9:45       ` Daniel P. Berrangé
2021-12-03  5:24         ` Leonardo Bras Soares Passos
2021-12-03  9:15           ` Daniel P. Berrangé
2021-11-12 10:56   ` Daniel P. Berrangé
2021-11-12  5:10 ` [PATCH v5 2/6] QIOChannelSocket: Add flags parameter for writing Leonardo Bras
2021-11-12 10:15   ` Daniel P. Berrangé
2021-11-23  5:33     ` Leonardo Bras Soares Passos
2021-11-12  5:10 ` [PATCH v5 3/6] QIOChannelSocket: Implement io_writev_zerocopy & io_flush_zerocopy for CONFIG_LINUX Leonardo Bras
2021-11-12 10:54   ` Daniel P. Berrangé
2021-11-23  4:46     ` Leonardo Bras Soares Passos
2021-11-23  9:55       ` Daniel P. Berrangé
2021-12-03  5:42         ` Leonardo Bras Soares Passos
2021-12-03  9:17           ` Daniel P. Berrangé
2021-12-09  8:38             ` Leonardo Bras Soares Passos
2021-12-09  8:49               ` Leonardo Bras Soares Passos
2021-11-12  5:10 ` [PATCH v5 4/6] migration: Add zerocopy parameter for QMP/HMP for Linux Leonardo Bras
2021-11-12 11:04   ` Juan Quintela
2021-11-12 11:08     ` Daniel P. Berrangé
2021-11-12 11:59       ` Markus Armbruster
2021-12-01 19:07         ` Leonardo Bras Soares Passos
2021-11-12 12:01     ` Markus Armbruster
2021-12-02  4:31       ` Leonardo Bras Soares Passos
2021-12-01 18:51     ` Leonardo Bras Soares Passos
2021-11-12 11:05   ` Daniel P. Berrangé
2021-12-01 19:05     ` Leonardo Bras Soares Passos
2021-11-12  5:10 ` [PATCH v5 5/6] migration: Add migrate_use_tls() helper Leonardo Bras
2021-11-12 11:04   ` Juan Quintela
2021-11-30 19:00     ` Leonardo Bras Soares Passos
2021-11-12  5:10 ` Leonardo Bras [this message]
2021-11-16 16:08   ` [PATCH v5 6/6] multifd: Implement zerocopy write in multifd migration (multifd-zerocopy) Juan Quintela
2021-11-16 16:17     ` Daniel P. Berrangé
2021-11-16 16:34       ` Juan Quintela
2021-11-16 16:39         ` Daniel P. Berrangé
2021-12-02  6:56           ` Leonardo Bras Soares Passos
2021-11-16 16:34       ` Daniel P. Berrangé
2021-12-02  6:54         ` Leonardo Bras Soares Passos
2021-12-02  6:47     ` Leonardo Bras Soares Passos
2021-12-02 12:10       ` Juan Quintela
2021-12-09  8:51     ` Leonardo Bras Soares Passos
2021-12-09  9:42       ` Leonardo Bras Soares Passos

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211112051040.923746-7-leobras@redhat.com \
    --to=leobras@redhat.com \
    --cc=armbru@redhat.com \
    --cc=berrange@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=eblake@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.