All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration
@ 2018-04-07  8:26 Lidong Chen
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 1/5] migration: create a dedicated connection for rdma return path Lidong Chen
                   ` (6 more replies)
  0 siblings, 7 replies; 20+ messages in thread
From: Lidong Chen @ 2018-04-07  8:26 UTC (permalink / raw)
  To: quintela, dgilbert; +Cc: qemu-devel, adido, licq, Lidong Chen

Current Qemu RDMA communication does not support send and receive
data at the same time, so when RDMA live migration with postcopy
enabled, the source qemu return path thread get qemu file error.

Those patch add the postcopy support for RDMA live migration.

Lidong Chen (5):
  migration: create a dedicated connection for rdma return path
  migration: add the interface to set get_return_path
  migration: implement the get_return_path for RDMA iochannel
  migration: fix qemu carsh when RDMA live migration
  migration: disable RDMA WRITR after postcopy started.

 migration/qemu-file-channel.c |  12 ++--
 migration/qemu-file.c         |  13 +++-
 migration/qemu-file.h         |   2 +-
 migration/rdma.c              | 148 ++++++++++++++++++++++++++++++++++++++++--
 4 files changed, 163 insertions(+), 12 deletions(-)

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [Qemu-devel] [PATCH 1/5] migration: create a dedicated connection for rdma return path
  2018-04-07  8:26 [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration Lidong Chen
@ 2018-04-07  8:26 ` Lidong Chen
  2018-04-11 16:57   ` Dr. David Alan Gilbert
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 2/5] migration: add the interface to set get_return_path Lidong Chen
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 20+ messages in thread
From: Lidong Chen @ 2018-04-07  8:26 UTC (permalink / raw)
  To: quintela, dgilbert; +Cc: qemu-devel, adido, licq, Lidong Chen

If start a RDMA migration with postcopy enabled, the source qemu
establish a dedicated connection for return path.

Signed-off-by: Lidong Chen <lidongchen@tencent.com>
---
 migration/rdma.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 91 insertions(+), 3 deletions(-)

diff --git a/migration/rdma.c b/migration/rdma.c
index da474fc..230bd97 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -387,6 +387,10 @@ typedef struct RDMAContext {
     uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX];
 
     GHashTable *blockmap;
+
+    /* the RDMAContext for return path */
+    struct RDMAContext *return_path;
+    bool is_return_path;
 } RDMAContext;
 
 #define TYPE_QIO_CHANNEL_RDMA "qio-channel-rdma"
@@ -2329,10 +2333,22 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
         rdma_destroy_id(rdma->cm_id);
         rdma->cm_id = NULL;
     }
+
+    /* the destination side, listen_id and channel is shared */
     if (rdma->listen_id) {
-        rdma_destroy_id(rdma->listen_id);
+        if (!rdma->is_return_path) {
+            rdma_destroy_id(rdma->listen_id);
+        }
         rdma->listen_id = NULL;
+
+        if (rdma->channel) {
+            if (!rdma->is_return_path) {
+                rdma_destroy_event_channel(rdma->channel);
+            }
+            rdma->channel = NULL;
+        }
     }
+
     if (rdma->channel) {
         rdma_destroy_event_channel(rdma->channel);
         rdma->channel = NULL;
@@ -2561,6 +2577,25 @@ err_dest_init_create_listen_id:
 
 }
 
+static void qemu_rdma_return_path_dest_init(RDMAContext *rdma_return_path,
+                                            RDMAContext *rdma)
+{
+    int idx;
+
+    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
+        rdma_return_path->wr_data[idx].control_len = 0;
+        rdma_return_path->wr_data[idx].control_curr = NULL;
+    }
+
+    /*the CM channel and CM id is shared*/
+    rdma_return_path->channel = rdma->channel;
+    rdma_return_path->listen_id = rdma->listen_id;
+
+    rdma->return_path = rdma_return_path;
+    rdma_return_path->return_path = rdma;
+    rdma_return_path->is_return_path = true;
+}
+
 static void *qemu_rdma_data_init(const char *host_port, Error **errp)
 {
     RDMAContext *rdma = NULL;
@@ -3014,6 +3049,8 @@ err:
     return ret;
 }
 
+static void rdma_accept_incoming_migration(void *opaque);
+
 static int qemu_rdma_accept(RDMAContext *rdma)
 {
     RDMACapabilities cap;
@@ -3108,7 +3145,14 @@ static int qemu_rdma_accept(RDMAContext *rdma)
         }
     }
 
-    qemu_set_fd_handler(rdma->channel->fd, NULL, NULL, NULL);
+    /* Accept the second connection request for return path */
+    if (migrate_postcopy() && !rdma->is_return_path) {
+        qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration,
+                            NULL,
+                            (void *)(intptr_t)rdma->return_path);
+    } else {
+        qemu_set_fd_handler(rdma->channel->fd, NULL, NULL, NULL);
+    }
 
     ret = rdma_accept(rdma->cm_id, &conn_param);
     if (ret) {
@@ -3681,6 +3725,10 @@ static void rdma_accept_incoming_migration(void *opaque)
 
     trace_qemu_rdma_accept_incoming_migration_accepted();
 
+    if (rdma->is_return_path) {
+        return;
+    }
+
     f = qemu_fopen_rdma(rdma, "rb");
     if (f == NULL) {
         ERROR(errp, "could not qemu_fopen_rdma!");
@@ -3695,7 +3743,7 @@ static void rdma_accept_incoming_migration(void *opaque)
 void rdma_start_incoming_migration(const char *host_port, Error **errp)
 {
     int ret;
-    RDMAContext *rdma;
+    RDMAContext *rdma, *rdma_return_path;
     Error *local_err = NULL;
 
     trace_rdma_start_incoming_migration();
@@ -3722,12 +3770,24 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp)
 
     trace_rdma_start_incoming_migration_after_rdma_listen();
 
+    /* initialize the RDMAContext for return path */
+    if (migrate_postcopy()) {
+        rdma_return_path = qemu_rdma_data_init(host_port, &local_err);
+
+        if (rdma_return_path == NULL) {
+            goto err;
+        }
+
+        qemu_rdma_return_path_dest_init(rdma_return_path, rdma);
+    }
+
     qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration,
                         NULL, (void *)(intptr_t)rdma);
     return;
 err:
     error_propagate(errp, local_err);
     g_free(rdma);
+    g_free(rdma_return_path);
 }
 
 void rdma_start_outgoing_migration(void *opaque,
@@ -3735,6 +3795,7 @@ void rdma_start_outgoing_migration(void *opaque,
 {
     MigrationState *s = opaque;
     RDMAContext *rdma = qemu_rdma_data_init(host_port, errp);
+    RDMAContext *rdma_return_path = NULL;
     int ret = 0;
 
     if (rdma == NULL) {
@@ -3755,6 +3816,32 @@ void rdma_start_outgoing_migration(void *opaque,
         goto err;
     }
 
+    /* RDMA postcopy need a seprate queue pair for return path */
+    if (migrate_postcopy()) {
+        rdma_return_path = qemu_rdma_data_init(host_port, errp);
+
+        if (rdma_return_path == NULL) {
+            goto err;
+        }
+
+        ret = qemu_rdma_source_init(rdma_return_path,
+            s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL], errp);
+
+        if (ret) {
+            goto err;
+        }
+
+        ret = qemu_rdma_connect(rdma_return_path, errp);
+
+        if (ret) {
+            goto err;
+        }
+
+        rdma->return_path = rdma_return_path;
+        rdma_return_path->return_path = rdma;
+        rdma_return_path->is_return_path = true;
+    }
+
     trace_rdma_start_outgoing_migration_after_rdma_connect();
 
     s->to_dst_file = qemu_fopen_rdma(rdma, "wb");
@@ -3762,4 +3849,5 @@ void rdma_start_outgoing_migration(void *opaque,
     return;
 err:
     g_free(rdma);
+    g_free(rdma_return_path);
 }
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Qemu-devel] [PATCH 2/5] migration: add the interface to set get_return_path
  2018-04-07  8:26 [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration Lidong Chen
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 1/5] migration: create a dedicated connection for rdma return path Lidong Chen
@ 2018-04-07  8:26 ` Lidong Chen
  2018-04-11 17:18   ` Dr. David Alan Gilbert
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 3/5] migration: implement the get_return_path for RDMA iochannel Lidong Chen
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 20+ messages in thread
From: Lidong Chen @ 2018-04-07  8:26 UTC (permalink / raw)
  To: quintela, dgilbert; +Cc: qemu-devel, adido, licq, Lidong Chen

The default get_return_path function of iochannel does not work for
RDMA live migration. So add the interface to set get_return_path.

Signed-off-by: Lidong Chen <lidongchen@tencent.com>
---
 migration/qemu-file-channel.c | 12 ++++++++----
 migration/qemu-file.c         | 10 ++++++++--
 migration/qemu-file.h         |  2 +-
 3 files changed, 17 insertions(+), 7 deletions(-)

diff --git a/migration/qemu-file-channel.c b/migration/qemu-file-channel.c
index e202d73..d4dd8c4 100644
--- a/migration/qemu-file-channel.c
+++ b/migration/qemu-file-channel.c
@@ -156,7 +156,6 @@ static const QEMUFileOps channel_input_ops = {
     .close = channel_close,
     .shut_down = channel_shutdown,
     .set_blocking = channel_set_blocking,
-    .get_return_path = channel_get_input_return_path,
 };
 
 
@@ -165,18 +164,23 @@ static const QEMUFileOps channel_output_ops = {
     .close = channel_close,
     .shut_down = channel_shutdown,
     .set_blocking = channel_set_blocking,
-    .get_return_path = channel_get_output_return_path,
 };
 
 
 QEMUFile *qemu_fopen_channel_input(QIOChannel *ioc)
 {
+    QEMUFile *f;
     object_ref(OBJECT(ioc));
-    return qemu_fopen_ops(ioc, &channel_input_ops);
+    f = qemu_fopen_ops(ioc, &channel_input_ops);
+    qemu_file_set_return_path(f, channel_get_input_return_path);
+    return f;
 }
 
 QEMUFile *qemu_fopen_channel_output(QIOChannel *ioc)
 {
+    QEMUFile *f;
     object_ref(OBJECT(ioc));
-    return qemu_fopen_ops(ioc, &channel_output_ops);
+    f = qemu_fopen_ops(ioc, &channel_output_ops);
+    qemu_file_set_return_path(f, channel_get_output_return_path);
+    return f;
 }
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index bb63c77..8acb574 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -36,6 +36,7 @@
 struct QEMUFile {
     const QEMUFileOps *ops;
     const QEMUFileHooks *hooks;
+    QEMURetPathFunc *get_return_path;
     void *opaque;
 
     int64_t bytes_xfer;
@@ -72,10 +73,15 @@ int qemu_file_shutdown(QEMUFile *f)
  */
 QEMUFile *qemu_file_get_return_path(QEMUFile *f)
 {
-    if (!f->ops->get_return_path) {
+    if (!f->get_return_path) {
         return NULL;
     }
-    return f->ops->get_return_path(f->opaque);
+    return f->get_return_path(f->opaque);
+}
+
+void qemu_file_set_return_path(QEMUFile *f, QEMURetPathFunc *get_return_path)
+{
+    f->get_return_path = get_return_path;
 }
 
 bool qemu_file_mode_is_not_valid(const char *mode)
diff --git a/migration/qemu-file.h b/migration/qemu-file.h
index f4f356a..74210b7 100644
--- a/migration/qemu-file.h
+++ b/migration/qemu-file.h
@@ -102,7 +102,6 @@ typedef struct QEMUFileOps {
     QEMUFileCloseFunc *close;
     QEMUFileSetBlocking *set_blocking;
     QEMUFileWritevBufferFunc *writev_buffer;
-    QEMURetPathFunc *get_return_path;
     QEMUFileShutdownFunc *shut_down;
 } QEMUFileOps;
 
@@ -114,6 +113,7 @@ typedef struct QEMUFileHooks {
 } QEMUFileHooks;
 
 QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops);
+void qemu_file_set_return_path(QEMUFile *f, QEMURetPathFunc *get_return_path);
 void qemu_file_set_hooks(QEMUFile *f, const QEMUFileHooks *hooks);
 int qemu_get_fd(QEMUFile *f);
 int qemu_fclose(QEMUFile *f);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Qemu-devel] [PATCH 3/5] migration: implement the get_return_path for RDMA iochannel
  2018-04-07  8:26 [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration Lidong Chen
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 1/5] migration: create a dedicated connection for rdma return path Lidong Chen
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 2/5] migration: add the interface to set get_return_path Lidong Chen
@ 2018-04-07  8:26 ` Lidong Chen
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 4/5] migration: fix qemu carsh when RDMA live migration Lidong Chen
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 20+ messages in thread
From: Lidong Chen @ 2018-04-07  8:26 UTC (permalink / raw)
  To: quintela, dgilbert; +Cc: qemu-devel, adido, licq, Lidong Chen

the default get_return_path function does not work for RDMA live
migration, the patch implement the get_return_path for RDMA iochannel.

Signed-off-by: Lidong Chen <lidongchen@tencent.com>
---
 migration/rdma.c | 38 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/migration/rdma.c b/migration/rdma.c
index 230bd97..53773c7 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -3638,6 +3638,40 @@ err:
     return ret;
 }
 
+static QEMUFile *qio_channel_rdma_get_return_path(void *opaque, int input)
+{
+    QIOChannel *ioc = opaque;
+    QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
+    RDMAContext *rdma = rioc->rdma;
+
+    QIOChannelRDMA *rioc_return_path;
+    RDMAContext *rdma_return_path = rdma->return_path;
+
+    if (!rdma_return_path) {
+        return NULL;
+    }
+
+    rioc_return_path = QIO_CHANNEL_RDMA(object_new(TYPE_QIO_CHANNEL_RDMA));
+    rioc_return_path->rdma = rdma_return_path;
+    if (input) {
+        rioc_return_path->file = qemu_fopen_channel_output(
+                                 QIO_CHANNEL(rioc_return_path));
+    } else {
+        rioc_return_path->file = qemu_fopen_channel_input(
+                                 QIO_CHANNEL(rioc_return_path));
+    }
+    return rioc_return_path->file;
+}
+
+static QEMUFile *qio_channel_rdma_get_output_return_path(void *opaque)
+{
+   return qio_channel_rdma_get_return_path(opaque, 0);
+}
+
+static QEMUFile *qio_channel_rdma_get_input_return_path(void *opaque)
+{
+   return qio_channel_rdma_get_return_path(opaque, 1);
+}
 static const QEMUFileHooks rdma_read_hooks = {
     .hook_ram_load = rdma_load_hook,
 };
@@ -3700,9 +3734,13 @@ static QEMUFile *qemu_fopen_rdma(RDMAContext *rdma, const char *mode)
     if (mode[0] == 'w') {
         rioc->file = qemu_fopen_channel_output(QIO_CHANNEL(rioc));
         qemu_file_set_hooks(rioc->file, &rdma_write_hooks);
+        qemu_file_set_return_path(rioc->file,
+                                  qio_channel_rdma_get_output_return_path);
     } else {
         rioc->file = qemu_fopen_channel_input(QIO_CHANNEL(rioc));
         qemu_file_set_hooks(rioc->file, &rdma_read_hooks);
+        qemu_file_set_return_path(rioc->file,
+                                  qio_channel_rdma_get_input_return_path);
     }
 
     return rioc->file;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Qemu-devel] [PATCH 4/5] migration: fix qemu carsh when RDMA live migration
  2018-04-07  8:26 [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration Lidong Chen
                   ` (2 preceding siblings ...)
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 3/5] migration: implement the get_return_path for RDMA iochannel Lidong Chen
@ 2018-04-07  8:26 ` Lidong Chen
  2018-04-11 16:43   ` Dr. David Alan Gilbert
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 5/5] migration: disable RDMA WRITR after postcopy started Lidong Chen
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 20+ messages in thread
From: Lidong Chen @ 2018-04-07  8:26 UTC (permalink / raw)
  To: quintela, dgilbert; +Cc: qemu-devel, adido, licq, Lidong Chen

After postcopy, the destination qemu work in the dedicated
thread, so only invoke yield_until_fd_readable before postcopy
migration.

Signed-off-by: Lidong Chen <lidongchen@tencent.com>
---
 migration/rdma.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/migration/rdma.c b/migration/rdma.c
index 53773c7..81be482 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -1489,11 +1489,13 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma)
      * Coroutine doesn't start until migration_fd_process_incoming()
      * so don't yield unless we know we're running inside of a coroutine.
      */
-    if (rdma->migration_started_on_destination) {
+    if (rdma->migration_started_on_destination &&
+        migration_incoming_get_current()->state == MIGRATION_STATUS_ACTIVE) {
         yield_until_fd_readable(rdma->comp_channel->fd);
     } else {
         /* This is the source side, we're in a separate thread
          * or destination prior to migration_fd_process_incoming()
+         * after postcopy, the destination also in a seprate thread.
          * we can't yield; so we have to poll the fd.
          * But we need to be able to handle 'cancel' or an error
          * without hanging forever.
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Qemu-devel] [PATCH 5/5] migration: disable RDMA WRITR after postcopy started.
  2018-04-07  8:26 [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration Lidong Chen
                   ` (3 preceding siblings ...)
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 4/5] migration: fix qemu carsh when RDMA live migration Lidong Chen
@ 2018-04-07  8:26 ` Lidong Chen
  2018-04-11 15:56   ` Dr. David Alan Gilbert
  2018-04-09  1:05 ` [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration 858585 jemmy
  2018-04-11 12:29 ` Dr. David Alan Gilbert
  6 siblings, 1 reply; 20+ messages in thread
From: Lidong Chen @ 2018-04-07  8:26 UTC (permalink / raw)
  To: quintela, dgilbert; +Cc: qemu-devel, adido, licq, Lidong Chen

RDMA write operations are performed with no notification to the destination
qemu, then the destination qemu can not wakeup. So disable RDMA WRITE after
postcopy started.

Signed-off-by: Lidong Chen <lidongchen@tencent.com>
---
 migration/qemu-file.c |  3 ++-
 migration/rdma.c      | 12 ++++++++++++
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index 8acb574..a64ac3a 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -260,7 +260,8 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
         int ret = f->hooks->save_page(f, f->opaque, block_offset,
                                       offset, size, bytes_sent);
         f->bytes_xfer += size;
-        if (ret != RAM_SAVE_CONTROL_DELAYED) {
+        if (ret != RAM_SAVE_CONTROL_DELAYED &&
+            ret != RAM_SAVE_CONTROL_NOT_SUPP) {
             if (bytes_sent && *bytes_sent > 0) {
                 qemu_update_position(f, *bytes_sent);
             } else if (ret < 0) {
diff --git a/migration/rdma.c b/migration/rdma.c
index 81be482..8529ddd 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -2964,6 +2964,10 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
 
     CHECK_ERROR_STATE();
 
+    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
+        return RAM_SAVE_CONTROL_NOT_SUPP;
+    }
+
     qemu_fflush(f);
 
     if (size > 0) {
@@ -3528,6 +3532,10 @@ static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
 
     CHECK_ERROR_STATE();
 
+    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
+        return 0;
+    }
+
     trace_qemu_rdma_registration_start(flags);
     qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
     qemu_fflush(f);
@@ -3550,6 +3558,10 @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
 
     CHECK_ERROR_STATE();
 
+    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
+        return 0;
+    }
+
     qemu_fflush(f);
     ret = qemu_rdma_drain_cq(f, rdma);
 
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration
  2018-04-07  8:26 [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration Lidong Chen
                   ` (4 preceding siblings ...)
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 5/5] migration: disable RDMA WRITR after postcopy started Lidong Chen
@ 2018-04-09  1:05 ` 858585 jemmy
  2018-04-11 12:29 ` Dr. David Alan Gilbert
  6 siblings, 0 replies; 20+ messages in thread
From: 858585 jemmy @ 2018-04-09  1:05 UTC (permalink / raw)
  To: Juan Quintela, Dave Gilbert, licq, Aviad Yehezkel
  Cc: qemu-devel, Lidong Chen, adido

ping.

On Sat, Apr 7, 2018 at 4:26 PM, Lidong Chen <jemmy858585@gmail.com> wrote:
> Current Qemu RDMA communication does not support send and receive
> data at the same time, so when RDMA live migration with postcopy
> enabled, the source qemu return path thread get qemu file error.
>
> Those patch add the postcopy support for RDMA live migration.
>
> Lidong Chen (5):
>   migration: create a dedicated connection for rdma return path
>   migration: add the interface to set get_return_path
>   migration: implement the get_return_path for RDMA iochannel
>   migration: fix qemu carsh when RDMA live migration
>   migration: disable RDMA WRITR after postcopy started.
>
>  migration/qemu-file-channel.c |  12 ++--
>  migration/qemu-file.c         |  13 +++-
>  migration/qemu-file.h         |   2 +-
>  migration/rdma.c              | 148 ++++++++++++++++++++++++++++++++++++++++--
>  4 files changed, 163 insertions(+), 12 deletions(-)
>
> --
> 1.8.3.1
>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration
  2018-04-07  8:26 [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration Lidong Chen
                   ` (5 preceding siblings ...)
  2018-04-09  1:05 ` [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration 858585 jemmy
@ 2018-04-11 12:29 ` Dr. David Alan Gilbert
  2018-04-12  3:57   ` 858585 jemmy
  6 siblings, 1 reply; 20+ messages in thread
From: Dr. David Alan Gilbert @ 2018-04-11 12:29 UTC (permalink / raw)
  To: Lidong Chen; +Cc: quintela, qemu-devel, adido, licq, Lidong Chen

* Lidong Chen (jemmy858585@gmail.com) wrote:
> Current Qemu RDMA communication does not support send and receive
> data at the same time, so when RDMA live migration with postcopy
> enabled, the source qemu return path thread get qemu file error.
> 
> Those patch add the postcopy support for RDMA live migration.

This description is a little misleading; it doesn't really
do RDMA during the postcopy phase - what it really does is disable
the RDMA page sending during the postcopy phase, relying on the 
RDMA codes stream emulation to send the page.

That's not necessarily a bad fix; you get the nice performance of RDMA
during the precopy phase, but how bad are you finding the performance
during the postcopy phase - the RDMA code we have was only really
designed for sending small commands over the stream?

Dave

> Lidong Chen (5):
>   migration: create a dedicated connection for rdma return path
>   migration: add the interface to set get_return_path
>   migration: implement the get_return_path for RDMA iochannel
>   migration: fix qemu carsh when RDMA live migration
>   migration: disable RDMA WRITR after postcopy started.
> 
>  migration/qemu-file-channel.c |  12 ++--
>  migration/qemu-file.c         |  13 +++-
>  migration/qemu-file.h         |   2 +-
>  migration/rdma.c              | 148 ++++++++++++++++++++++++++++++++++++++++--
>  4 files changed, 163 insertions(+), 12 deletions(-)
> 
> -- 
> 1.8.3.1
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 5/5] migration: disable RDMA WRITR after postcopy started.
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 5/5] migration: disable RDMA WRITR after postcopy started Lidong Chen
@ 2018-04-11 15:56   ` Dr. David Alan Gilbert
  2018-04-12  6:50     ` 858585 jemmy
  0 siblings, 1 reply; 20+ messages in thread
From: Dr. David Alan Gilbert @ 2018-04-11 15:56 UTC (permalink / raw)
  To: Lidong Chen; +Cc: quintela, qemu-devel, adido, licq, Lidong Chen

* Lidong Chen (jemmy858585@gmail.com) wrote:
> RDMA write operations are performed with no notification to the destination
> qemu, then the destination qemu can not wakeup. So disable RDMA WRITE after
> postcopy started.
> 
> Signed-off-by: Lidong Chen <lidongchen@tencent.com>

This patch needs to be near the beginning of the series; at the moment a
bisect would lead you to the middle of the series which had return
paths, but then would fail to work properly because it would try and use
the RDMA code.

> ---
>  migration/qemu-file.c |  3 ++-
>  migration/rdma.c      | 12 ++++++++++++
>  2 files changed, 14 insertions(+), 1 deletion(-)
> 
> diff --git a/migration/qemu-file.c b/migration/qemu-file.c
> index 8acb574..a64ac3a 100644
> --- a/migration/qemu-file.c
> +++ b/migration/qemu-file.c
> @@ -260,7 +260,8 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
>          int ret = f->hooks->save_page(f, f->opaque, block_offset,
>                                        offset, size, bytes_sent);
>          f->bytes_xfer += size;
> -        if (ret != RAM_SAVE_CONTROL_DELAYED) {
> +        if (ret != RAM_SAVE_CONTROL_DELAYED &&
> +            ret != RAM_SAVE_CONTROL_NOT_SUPP) {

What about f->bytes_xfer in this case?

Is there anything we have to do at the switchover into postcopy to make
sure that all pages have been received?

Dave

>              if (bytes_sent && *bytes_sent > 0) {
>                  qemu_update_position(f, *bytes_sent);
>              } else if (ret < 0) {
> diff --git a/migration/rdma.c b/migration/rdma.c
> index 81be482..8529ddd 100644
> --- a/migration/rdma.c
> +++ b/migration/rdma.c
> @@ -2964,6 +2964,10 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
>  
>      CHECK_ERROR_STATE();
>  
> +    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
> +        return RAM_SAVE_CONTROL_NOT_SUPP;
> +    }
> +
>      qemu_fflush(f);
>  
>      if (size > 0) {
> @@ -3528,6 +3532,10 @@ static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
>  
>      CHECK_ERROR_STATE();
>  
> +    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
> +        return 0;
> +    }
> +
>      trace_qemu_rdma_registration_start(flags);
>      qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
>      qemu_fflush(f);
> @@ -3550,6 +3558,10 @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
>  
>      CHECK_ERROR_STATE();
>  
> +    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
> +        return 0;
> +    }
> +
>      qemu_fflush(f);
>      ret = qemu_rdma_drain_cq(f, rdma);
>  
> -- 
> 1.8.3.1
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 4/5] migration: fix qemu carsh when RDMA live migration
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 4/5] migration: fix qemu carsh when RDMA live migration Lidong Chen
@ 2018-04-11 16:43   ` Dr. David Alan Gilbert
  2018-04-12  9:40     ` 858585 jemmy
  0 siblings, 1 reply; 20+ messages in thread
From: Dr. David Alan Gilbert @ 2018-04-11 16:43 UTC (permalink / raw)
  To: Lidong Chen; +Cc: quintela, qemu-devel, adido, licq, Lidong Chen

* Lidong Chen (jemmy858585@gmail.com) wrote:
> After postcopy, the destination qemu work in the dedicated
> thread, so only invoke yield_until_fd_readable before postcopy
> migration.

The subject line needs to be more discriptive:
   migration: Stop rdma yielding during incoming postcopy

I think.
(Also please check the subject spellings)

> Signed-off-by: Lidong Chen <lidongchen@tencent.com>
> ---
>  migration/rdma.c | 4 +++-
>  1 file changed, 3 insertions(+), 1 deletion(-)
> 
> diff --git a/migration/rdma.c b/migration/rdma.c
> index 53773c7..81be482 100644
> --- a/migration/rdma.c
> +++ b/migration/rdma.c
> @@ -1489,11 +1489,13 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma)
>       * Coroutine doesn't start until migration_fd_process_incoming()
>       * so don't yield unless we know we're running inside of a coroutine.
>       */
> -    if (rdma->migration_started_on_destination) {
> +    if (rdma->migration_started_on_destination &&
> +        migration_incoming_get_current()->state == MIGRATION_STATUS_ACTIVE) {

OK, that's a bit delicate; watch if it ever gets called in a failure
case or similar - and also wathc out if we make more use of the status
on the destination, but otherwise, and with a fix for the subject;


Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

>          yield_until_fd_readable(rdma->comp_channel->fd);
>      } else {
>          /* This is the source side, we're in a separate thread
>           * or destination prior to migration_fd_process_incoming()
> +         * after postcopy, the destination also in a seprate thread.
>           * we can't yield; so we have to poll the fd.
>           * But we need to be able to handle 'cancel' or an error
>           * without hanging forever.
> -- 
> 1.8.3.1
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 1/5] migration: create a dedicated connection for rdma return path
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 1/5] migration: create a dedicated connection for rdma return path Lidong Chen
@ 2018-04-11 16:57   ` Dr. David Alan Gilbert
  0 siblings, 0 replies; 20+ messages in thread
From: Dr. David Alan Gilbert @ 2018-04-11 16:57 UTC (permalink / raw)
  To: Lidong Chen; +Cc: quintela, qemu-devel, adido, licq, Lidong Chen

* Lidong Chen (jemmy858585@gmail.com) wrote:
> If start a RDMA migration with postcopy enabled, the source qemu
> establish a dedicated connection for return path.
> 
> Signed-off-by: Lidong Chen <lidongchen@tencent.com>


OK, I think that'll work.

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
>  migration/rdma.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 91 insertions(+), 3 deletions(-)
> 
> diff --git a/migration/rdma.c b/migration/rdma.c
> index da474fc..230bd97 100644
> --- a/migration/rdma.c
> +++ b/migration/rdma.c
> @@ -387,6 +387,10 @@ typedef struct RDMAContext {
>      uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX];
>  
>      GHashTable *blockmap;
> +
> +    /* the RDMAContext for return path */
> +    struct RDMAContext *return_path;
> +    bool is_return_path;
>  } RDMAContext;
>  
>  #define TYPE_QIO_CHANNEL_RDMA "qio-channel-rdma"
> @@ -2329,10 +2333,22 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
>          rdma_destroy_id(rdma->cm_id);
>          rdma->cm_id = NULL;
>      }
> +
> +    /* the destination side, listen_id and channel is shared */
>      if (rdma->listen_id) {
> -        rdma_destroy_id(rdma->listen_id);
> +        if (!rdma->is_return_path) {
> +            rdma_destroy_id(rdma->listen_id);
> +        }
>          rdma->listen_id = NULL;
> +
> +        if (rdma->channel) {
> +            if (!rdma->is_return_path) {
> +                rdma_destroy_event_channel(rdma->channel);
> +            }
> +            rdma->channel = NULL;
> +        }
>      }
> +
>      if (rdma->channel) {
>          rdma_destroy_event_channel(rdma->channel);
>          rdma->channel = NULL;
> @@ -2561,6 +2577,25 @@ err_dest_init_create_listen_id:
>  
>  }
>  
> +static void qemu_rdma_return_path_dest_init(RDMAContext *rdma_return_path,
> +                                            RDMAContext *rdma)
> +{
> +    int idx;
> +
> +    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
> +        rdma_return_path->wr_data[idx].control_len = 0;
> +        rdma_return_path->wr_data[idx].control_curr = NULL;
> +    }
> +
> +    /*the CM channel and CM id is shared*/
> +    rdma_return_path->channel = rdma->channel;
> +    rdma_return_path->listen_id = rdma->listen_id;
> +
> +    rdma->return_path = rdma_return_path;
> +    rdma_return_path->return_path = rdma;
> +    rdma_return_path->is_return_path = true;
> +}
> +
>  static void *qemu_rdma_data_init(const char *host_port, Error **errp)
>  {
>      RDMAContext *rdma = NULL;
> @@ -3014,6 +3049,8 @@ err:
>      return ret;
>  }
>  
> +static void rdma_accept_incoming_migration(void *opaque);
> +
>  static int qemu_rdma_accept(RDMAContext *rdma)
>  {
>      RDMACapabilities cap;
> @@ -3108,7 +3145,14 @@ static int qemu_rdma_accept(RDMAContext *rdma)
>          }
>      }
>  
> -    qemu_set_fd_handler(rdma->channel->fd, NULL, NULL, NULL);
> +    /* Accept the second connection request for return path */
> +    if (migrate_postcopy() && !rdma->is_return_path) {
> +        qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration,
> +                            NULL,
> +                            (void *)(intptr_t)rdma->return_path);
> +    } else {
> +        qemu_set_fd_handler(rdma->channel->fd, NULL, NULL, NULL);
> +    }
>  
>      ret = rdma_accept(rdma->cm_id, &conn_param);
>      if (ret) {
> @@ -3681,6 +3725,10 @@ static void rdma_accept_incoming_migration(void *opaque)
>  
>      trace_qemu_rdma_accept_incoming_migration_accepted();
>  
> +    if (rdma->is_return_path) {
> +        return;
> +    }
> +
>      f = qemu_fopen_rdma(rdma, "rb");
>      if (f == NULL) {
>          ERROR(errp, "could not qemu_fopen_rdma!");
> @@ -3695,7 +3743,7 @@ static void rdma_accept_incoming_migration(void *opaque)
>  void rdma_start_incoming_migration(const char *host_port, Error **errp)
>  {
>      int ret;
> -    RDMAContext *rdma;
> +    RDMAContext *rdma, *rdma_return_path;
>      Error *local_err = NULL;
>  
>      trace_rdma_start_incoming_migration();
> @@ -3722,12 +3770,24 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp)
>  
>      trace_rdma_start_incoming_migration_after_rdma_listen();
>  
> +    /* initialize the RDMAContext for return path */
> +    if (migrate_postcopy()) {
> +        rdma_return_path = qemu_rdma_data_init(host_port, &local_err);
> +
> +        if (rdma_return_path == NULL) {
> +            goto err;
> +        }
> +
> +        qemu_rdma_return_path_dest_init(rdma_return_path, rdma);
> +    }
> +
>      qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration,
>                          NULL, (void *)(intptr_t)rdma);
>      return;
>  err:
>      error_propagate(errp, local_err);
>      g_free(rdma);
> +    g_free(rdma_return_path);
>  }
>  
>  void rdma_start_outgoing_migration(void *opaque,
> @@ -3735,6 +3795,7 @@ void rdma_start_outgoing_migration(void *opaque,
>  {
>      MigrationState *s = opaque;
>      RDMAContext *rdma = qemu_rdma_data_init(host_port, errp);
> +    RDMAContext *rdma_return_path = NULL;
>      int ret = 0;
>  
>      if (rdma == NULL) {
> @@ -3755,6 +3816,32 @@ void rdma_start_outgoing_migration(void *opaque,
>          goto err;
>      }
>  
> +    /* RDMA postcopy need a seprate queue pair for return path */
> +    if (migrate_postcopy()) {
> +        rdma_return_path = qemu_rdma_data_init(host_port, errp);
> +
> +        if (rdma_return_path == NULL) {
> +            goto err;
> +        }
> +
> +        ret = qemu_rdma_source_init(rdma_return_path,
> +            s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL], errp);
> +
> +        if (ret) {
> +            goto err;
> +        }
> +
> +        ret = qemu_rdma_connect(rdma_return_path, errp);
> +
> +        if (ret) {
> +            goto err;
> +        }
> +
> +        rdma->return_path = rdma_return_path;
> +        rdma_return_path->return_path = rdma;
> +        rdma_return_path->is_return_path = true;
> +    }
> +
>      trace_rdma_start_outgoing_migration_after_rdma_connect();
>  
>      s->to_dst_file = qemu_fopen_rdma(rdma, "wb");
> @@ -3762,4 +3849,5 @@ void rdma_start_outgoing_migration(void *opaque,
>      return;
>  err:
>      g_free(rdma);
> +    g_free(rdma_return_path);
>  }
> -- 
> 1.8.3.1
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/5] migration: add the interface to set get_return_path
  2018-04-07  8:26 ` [Qemu-devel] [PATCH 2/5] migration: add the interface to set get_return_path Lidong Chen
@ 2018-04-11 17:18   ` Dr. David Alan Gilbert
  2018-04-12  8:28     ` Daniel P. Berrangé
  0 siblings, 1 reply; 20+ messages in thread
From: Dr. David Alan Gilbert @ 2018-04-11 17:18 UTC (permalink / raw)
  To: Lidong Chen, berrange; +Cc: quintela, qemu-devel, adido, licq, Lidong Chen

* Lidong Chen (jemmy858585@gmail.com) wrote:
> The default get_return_path function of iochannel does not work for
> RDMA live migration. So add the interface to set get_return_path.
> 
> Signed-off-by: Lidong Chen <lidongchen@tencent.com>

Lets see how Dan wants this done, he knows the channel/file stuff;
to me this feels like it should be adding a member to QIOChannelClass
that gets used by QEMUFile's get_return_path.

(Dan and see next patch)

Dave
> ---
>  migration/qemu-file-channel.c | 12 ++++++++----
>  migration/qemu-file.c         | 10 ++++++++--
>  migration/qemu-file.h         |  2 +-
>  3 files changed, 17 insertions(+), 7 deletions(-)
> 
> diff --git a/migration/qemu-file-channel.c b/migration/qemu-file-channel.c
> index e202d73..d4dd8c4 100644
> --- a/migration/qemu-file-channel.c
> +++ b/migration/qemu-file-channel.c
> @@ -156,7 +156,6 @@ static const QEMUFileOps channel_input_ops = {
>      .close = channel_close,
>      .shut_down = channel_shutdown,
>      .set_blocking = channel_set_blocking,
> -    .get_return_path = channel_get_input_return_path,
>  };
>  
>  
> @@ -165,18 +164,23 @@ static const QEMUFileOps channel_output_ops = {
>      .close = channel_close,
>      .shut_down = channel_shutdown,
>      .set_blocking = channel_set_blocking,
> -    .get_return_path = channel_get_output_return_path,
>  };
>  
>  
>  QEMUFile *qemu_fopen_channel_input(QIOChannel *ioc)
>  {
> +    QEMUFile *f;
>      object_ref(OBJECT(ioc));
> -    return qemu_fopen_ops(ioc, &channel_input_ops);
> +    f = qemu_fopen_ops(ioc, &channel_input_ops);
> +    qemu_file_set_return_path(f, channel_get_input_return_path);
> +    return f;
>  }
>  
>  QEMUFile *qemu_fopen_channel_output(QIOChannel *ioc)
>  {
> +    QEMUFile *f;
>      object_ref(OBJECT(ioc));
> -    return qemu_fopen_ops(ioc, &channel_output_ops);
> +    f = qemu_fopen_ops(ioc, &channel_output_ops);
> +    qemu_file_set_return_path(f, channel_get_output_return_path);
> +    return f;
>  }
> diff --git a/migration/qemu-file.c b/migration/qemu-file.c
> index bb63c77..8acb574 100644
> --- a/migration/qemu-file.c
> +++ b/migration/qemu-file.c
> @@ -36,6 +36,7 @@
>  struct QEMUFile {
>      const QEMUFileOps *ops;
>      const QEMUFileHooks *hooks;
> +    QEMURetPathFunc *get_return_path;
>      void *opaque;
>  
>      int64_t bytes_xfer;
> @@ -72,10 +73,15 @@ int qemu_file_shutdown(QEMUFile *f)
>   */
>  QEMUFile *qemu_file_get_return_path(QEMUFile *f)
>  {
> -    if (!f->ops->get_return_path) {
> +    if (!f->get_return_path) {
>          return NULL;
>      }
> -    return f->ops->get_return_path(f->opaque);
> +    return f->get_return_path(f->opaque);
> +}
> +
> +void qemu_file_set_return_path(QEMUFile *f, QEMURetPathFunc *get_return_path)
> +{
> +    f->get_return_path = get_return_path;
>  }
>  
>  bool qemu_file_mode_is_not_valid(const char *mode)
> diff --git a/migration/qemu-file.h b/migration/qemu-file.h
> index f4f356a..74210b7 100644
> --- a/migration/qemu-file.h
> +++ b/migration/qemu-file.h
> @@ -102,7 +102,6 @@ typedef struct QEMUFileOps {
>      QEMUFileCloseFunc *close;
>      QEMUFileSetBlocking *set_blocking;
>      QEMUFileWritevBufferFunc *writev_buffer;
> -    QEMURetPathFunc *get_return_path;
>      QEMUFileShutdownFunc *shut_down;
>  } QEMUFileOps;
>  
> @@ -114,6 +113,7 @@ typedef struct QEMUFileHooks {
>  } QEMUFileHooks;
>  
>  QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops);
> +void qemu_file_set_return_path(QEMUFile *f, QEMURetPathFunc *get_return_path);
>  void qemu_file_set_hooks(QEMUFile *f, const QEMUFileHooks *hooks);
>  int qemu_get_fd(QEMUFile *f);
>  int qemu_fclose(QEMUFile *f);
> -- 
> 1.8.3.1
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration
  2018-04-11 12:29 ` Dr. David Alan Gilbert
@ 2018-04-12  3:57   ` 858585 jemmy
  2018-04-24 13:36     ` Dr. David Alan Gilbert
  0 siblings, 1 reply; 20+ messages in thread
From: 858585 jemmy @ 2018-04-12  3:57 UTC (permalink / raw)
  To: Dr. David Alan Gilbert
  Cc: Juan Quintela, qemu-devel, adido, licq, Lidong Chen, Gal Shachaf,
	Aviad Yehezkel

On Wed, Apr 11, 2018 at 8:29 PM, Dr. David Alan Gilbert
<dgilbert@redhat.com> wrote:
> * Lidong Chen (jemmy858585@gmail.com) wrote:
>> Current Qemu RDMA communication does not support send and receive
>> data at the same time, so when RDMA live migration with postcopy
>> enabled, the source qemu return path thread get qemu file error.
>>
>> Those patch add the postcopy support for RDMA live migration.
>
> This description is a little misleading; it doesn't really
> do RDMA during the postcopy phase - what it really does is disable
> the RDMA page sending during the postcopy phase, relying on the
> RDMA codes stream emulation to send the page.

Hi Dave:
    I will modify the description in next version patch.

>
> That's not necessarily a bad fix; you get the nice performance of RDMA
> during the precopy phase, but how bad are you finding the performance
> during the postcopy phase - the RDMA code we have was only really
> designed for sending small commands over the stream?

I have not finished the performance test. There are three choices for RDMA
migration during the postcopy phase.

1. RDMA SEND operation from the source qemu
2. RDMA Write with Immediate from the source qemu
3. RDMA READ from the destination qemu

In theory, RDMA READ from the destination qemu is the best way.
But I think it's better to make choice base on the performance result.
I will send the performance result later.

If use another way during the postcopy phase, it will a big change for the code.
This patch just make postcopy works, and i will send another patch to
improve the performance.

Thanks.

>
> Dave
>
>> Lidong Chen (5):
>>   migration: create a dedicated connection for rdma return path
>>   migration: add the interface to set get_return_path
>>   migration: implement the get_return_path for RDMA iochannel
>>   migration: fix qemu carsh when RDMA live migration
>>   migration: disable RDMA WRITR after postcopy started.
>>
>>  migration/qemu-file-channel.c |  12 ++--
>>  migration/qemu-file.c         |  13 +++-
>>  migration/qemu-file.h         |   2 +-
>>  migration/rdma.c              | 148 ++++++++++++++++++++++++++++++++++++++++--
>>  4 files changed, 163 insertions(+), 12 deletions(-)
>>
>> --
>> 1.8.3.1
>>
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 5/5] migration: disable RDMA WRITR after postcopy started.
  2018-04-11 15:56   ` Dr. David Alan Gilbert
@ 2018-04-12  6:50     ` 858585 jemmy
  2018-04-12 18:55       ` Dr. David Alan Gilbert
  0 siblings, 1 reply; 20+ messages in thread
From: 858585 jemmy @ 2018-04-12  6:50 UTC (permalink / raw)
  To: Dr. David Alan Gilbert
  Cc: Juan Quintela, qemu-devel, adido, licq, Lidong Chen, Gal Shachaf,
	Aviad Yehezkel

On Wed, Apr 11, 2018 at 11:56 PM, Dr. David Alan Gilbert
<dgilbert@redhat.com> wrote:
> * Lidong Chen (jemmy858585@gmail.com) wrote:
>> RDMA write operations are performed with no notification to the destination
>> qemu, then the destination qemu can not wakeup. So disable RDMA WRITE after
>> postcopy started.
>>
>> Signed-off-by: Lidong Chen <lidongchen@tencent.com>
>
> This patch needs to be near the beginning of the series; at the moment a
> bisect would lead you to the middle of the series which had return
> paths, but then would fail to work properly because it would try and use
> the RDMA code.

I will fix this problem in next version.

>
>> ---
>>  migration/qemu-file.c |  3 ++-
>>  migration/rdma.c      | 12 ++++++++++++
>>  2 files changed, 14 insertions(+), 1 deletion(-)
>>
>> diff --git a/migration/qemu-file.c b/migration/qemu-file.c
>> index 8acb574..a64ac3a 100644
>> --- a/migration/qemu-file.c
>> +++ b/migration/qemu-file.c
>> @@ -260,7 +260,8 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
>>          int ret = f->hooks->save_page(f, f->opaque, block_offset,
>>                                        offset, size, bytes_sent);
>>          f->bytes_xfer += size;
>> -        if (ret != RAM_SAVE_CONTROL_DELAYED) {
>> +        if (ret != RAM_SAVE_CONTROL_DELAYED &&
>> +            ret != RAM_SAVE_CONTROL_NOT_SUPP) {
>
> What about f->bytes_xfer in this case?

f->bytes_xfer should not update when RAM_SAVE_CONTROL_NOT_SUPP.
I will fix this problem in next version.

>
> Is there anything we have to do at the switchover into postcopy to make
> sure that all pages have been received?

ram_save_iterate invoke ram_control_after_iterate(f, RAM_CONTROL_ROUND),
so before next iteration which switchover into postcopy, all the pages
sent by previous
iteration have been received.

>
> Dave
>
>>              if (bytes_sent && *bytes_sent > 0) {
>>                  qemu_update_position(f, *bytes_sent);
>>              } else if (ret < 0) {
>> diff --git a/migration/rdma.c b/migration/rdma.c
>> index 81be482..8529ddd 100644
>> --- a/migration/rdma.c
>> +++ b/migration/rdma.c
>> @@ -2964,6 +2964,10 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
>>
>>      CHECK_ERROR_STATE();
>>
>> +    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
>> +        return RAM_SAVE_CONTROL_NOT_SUPP;
>> +    }
>> +
>>      qemu_fflush(f);
>>
>>      if (size > 0) {
>> @@ -3528,6 +3532,10 @@ static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
>>
>>      CHECK_ERROR_STATE();
>>
>> +    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
>> +        return 0;
>> +    }
>> +
>>      trace_qemu_rdma_registration_start(flags);
>>      qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
>>      qemu_fflush(f);
>> @@ -3550,6 +3558,10 @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
>>
>>      CHECK_ERROR_STATE();
>>
>> +    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
>> +        return 0;
>> +    }
>> +
>>      qemu_fflush(f);
>>      ret = qemu_rdma_drain_cq(f, rdma);
>>
>> --
>> 1.8.3.1
>>
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/5] migration: add the interface to set get_return_path
  2018-04-11 17:18   ` Dr. David Alan Gilbert
@ 2018-04-12  8:28     ` Daniel P. Berrangé
  2018-04-12 10:08       ` 858585 jemmy
  0 siblings, 1 reply; 20+ messages in thread
From: Daniel P. Berrangé @ 2018-04-12  8:28 UTC (permalink / raw)
  To: Dr. David Alan Gilbert
  Cc: Lidong Chen, quintela, qemu-devel, adido, licq, Lidong Chen

On Wed, Apr 11, 2018 at 06:18:18PM +0100, Dr. David Alan Gilbert wrote:
> * Lidong Chen (jemmy858585@gmail.com) wrote:
> > The default get_return_path function of iochannel does not work for
> > RDMA live migration. So add the interface to set get_return_path.
> > 
> > Signed-off-by: Lidong Chen <lidongchen@tencent.com>
> 
> Lets see how Dan wants this done, he knows the channel/file stuff;
> to me this feels like it should be adding a member to QIOChannelClass
> that gets used by QEMUFile's get_return_path.

No that doesn't really fit the model. IMHO the entire concept of a separate
return path object is really wrong. The QIOChannel implementations are
(almost) all capable of bi-directional I/O, which is why the the get_retun_path
function just creates a second QEMUFile pointing to the same QIOChannel
object we already had. Migration only needs the second QEMUFile, because that
struct re-uses the same struct fields for tracking different bits of info
depending on which direction you're doing I/O in. A real fix would be to
stop overloading the same fields for multiple purposes in the QEMUFile, so
that we only needed a single QEMUFile instance.

Ignoring that though, the particular problem we're facing here is that the
QIOChannelRDMA impl that is used is not written in a way that allows
bi-directional I/O, despite the RDMA code it uses being capable of it.

So rather than changing this get_return_path code, IMHO, the right fix to
simply improve the QIOChannelRDMA impl so that it fully supports bi-directional
I/O like all the other channels do.

Regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 4/5] migration: fix qemu carsh when RDMA live migration
  2018-04-11 16:43   ` Dr. David Alan Gilbert
@ 2018-04-12  9:40     ` 858585 jemmy
  2018-04-12 18:58       ` Dr. David Alan Gilbert
  0 siblings, 1 reply; 20+ messages in thread
From: 858585 jemmy @ 2018-04-12  9:40 UTC (permalink / raw)
  To: Dr. David Alan Gilbert
  Cc: Juan Quintela, qemu-devel, adido, licq, Lidong Chen

On Thu, Apr 12, 2018 at 12:43 AM, Dr. David Alan Gilbert
<dgilbert@redhat.com> wrote:
> * Lidong Chen (jemmy858585@gmail.com) wrote:
>> After postcopy, the destination qemu work in the dedicated
>> thread, so only invoke yield_until_fd_readable before postcopy
>> migration.
>
> The subject line needs to be more discriptive:
>    migration: Stop rdma yielding during incoming postcopy
>
> I think.
> (Also please check the subject spellings)
>
>> Signed-off-by: Lidong Chen <lidongchen@tencent.com>
>> ---
>>  migration/rdma.c | 4 +++-
>>  1 file changed, 3 insertions(+), 1 deletion(-)
>>
>> diff --git a/migration/rdma.c b/migration/rdma.c
>> index 53773c7..81be482 100644
>> --- a/migration/rdma.c
>> +++ b/migration/rdma.c
>> @@ -1489,11 +1489,13 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma)
>>       * Coroutine doesn't start until migration_fd_process_incoming()
>>       * so don't yield unless we know we're running inside of a coroutine.
>>       */
>> -    if (rdma->migration_started_on_destination) {
>> +    if (rdma->migration_started_on_destination &&
>> +        migration_incoming_get_current()->state == MIGRATION_STATUS_ACTIVE) {
>
> OK, that's a bit delicate; watch if it ever gets called in a failure
> case or similar - and also wathc out if we make more use of the status
> on the destination, but otherwise, and with a fix for the subject;

How about use migration_incoming_get_current()->have_listen_thread?

    if (rdma->migration_started_on_destination &&
        migration_incoming_get_current()->have_listen_thread == false) {
        yield_until_fd_readable(rdma->comp_channel->fd);
    }

>
>
> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
>
>>          yield_until_fd_readable(rdma->comp_channel->fd);
>>      } else {
>>          /* This is the source side, we're in a separate thread
>>           * or destination prior to migration_fd_process_incoming()
>> +         * after postcopy, the destination also in a seprate thread.
>>           * we can't yield; so we have to poll the fd.
>>           * But we need to be able to handle 'cancel' or an error
>>           * without hanging forever.
>> --
>> 1.8.3.1
>>
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/5] migration: add the interface to set get_return_path
  2018-04-12  8:28     ` Daniel P. Berrangé
@ 2018-04-12 10:08       ` 858585 jemmy
  0 siblings, 0 replies; 20+ messages in thread
From: 858585 jemmy @ 2018-04-12 10:08 UTC (permalink / raw)
  To: Daniel P. Berrangé
  Cc: Dr. David Alan Gilbert, Juan Quintela, qemu-devel, adido, licq,
	Lidong Chen

On Thu, Apr 12, 2018 at 4:28 PM, Daniel P. Berrangé <berrange@redhat.com> wrote:
> On Wed, Apr 11, 2018 at 06:18:18PM +0100, Dr. David Alan Gilbert wrote:
>> * Lidong Chen (jemmy858585@gmail.com) wrote:
>> > The default get_return_path function of iochannel does not work for
>> > RDMA live migration. So add the interface to set get_return_path.
>> >
>> > Signed-off-by: Lidong Chen <lidongchen@tencent.com>
>>
>> Lets see how Dan wants this done, he knows the channel/file stuff;
>> to me this feels like it should be adding a member to QIOChannelClass
>> that gets used by QEMUFile's get_return_path.
>
> No that doesn't really fit the model. IMHO the entire concept of a separate
> return path object is really wrong. The QIOChannel implementations are
> (almost) all capable of bi-directional I/O, which is why the the get_retun_path
> function just creates a second QEMUFile pointing to the same QIOChannel
> object we already had. Migration only needs the second QEMUFile, because that
> struct re-uses the same struct fields for tracking different bits of info
> depending on which direction you're doing I/O in. A real fix would be to
> stop overloading the same fields for multiple purposes in the QEMUFile, so
> that we only needed a single QEMUFile instance.
>
> Ignoring that though, the particular problem we're facing here is that the
> QIOChannelRDMA impl that is used is not written in a way that allows
> bi-directional I/O, despite the RDMA code it uses being capable of it.
>
> So rather than changing this get_return_path code, IMHO, the right fix to
> simply improve the QIOChannelRDMA impl so that it fully supports bi-directional
> I/O like all the other channels do.

Hi Daniel:
     Thanks for your suggestion.
     I will have a try.

>
> Regards,
> Daniel
> --
> |: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
> |: https://libvirt.org         -o-            https://fstop138.berrange.com :|
> |: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 5/5] migration: disable RDMA WRITR after postcopy started.
  2018-04-12  6:50     ` 858585 jemmy
@ 2018-04-12 18:55       ` Dr. David Alan Gilbert
  0 siblings, 0 replies; 20+ messages in thread
From: Dr. David Alan Gilbert @ 2018-04-12 18:55 UTC (permalink / raw)
  To: 858585 jemmy
  Cc: Juan Quintela, qemu-devel, adido, licq, Lidong Chen, Gal Shachaf,
	Aviad Yehezkel

* 858585 jemmy (jemmy858585@gmail.com) wrote:
> On Wed, Apr 11, 2018 at 11:56 PM, Dr. David Alan Gilbert
> <dgilbert@redhat.com> wrote:
> > * Lidong Chen (jemmy858585@gmail.com) wrote:
> >> RDMA write operations are performed with no notification to the destination
> >> qemu, then the destination qemu can not wakeup. So disable RDMA WRITE after
> >> postcopy started.
> >>
> >> Signed-off-by: Lidong Chen <lidongchen@tencent.com>
> >
> > This patch needs to be near the beginning of the series; at the moment a
> > bisect would lead you to the middle of the series which had return
> > paths, but then would fail to work properly because it would try and use
> > the RDMA code.
> 
> I will fix this problem in next version.
> 
> >
> >> ---
> >>  migration/qemu-file.c |  3 ++-
> >>  migration/rdma.c      | 12 ++++++++++++
> >>  2 files changed, 14 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/migration/qemu-file.c b/migration/qemu-file.c
> >> index 8acb574..a64ac3a 100644
> >> --- a/migration/qemu-file.c
> >> +++ b/migration/qemu-file.c
> >> @@ -260,7 +260,8 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
> >>          int ret = f->hooks->save_page(f, f->opaque, block_offset,
> >>                                        offset, size, bytes_sent);
> >>          f->bytes_xfer += size;
> >> -        if (ret != RAM_SAVE_CONTROL_DELAYED) {
> >> +        if (ret != RAM_SAVE_CONTROL_DELAYED &&
> >> +            ret != RAM_SAVE_CONTROL_NOT_SUPP) {
> >
> > What about f->bytes_xfer in this case?
> 
> f->bytes_xfer should not update when RAM_SAVE_CONTROL_NOT_SUPP.
> I will fix this problem in next version.
> 
> >
> > Is there anything we have to do at the switchover into postcopy to make
> > sure that all pages have been received?
> 
> ram_save_iterate invoke ram_control_after_iterate(f, RAM_CONTROL_ROUND),
> so before next iteration which switchover into postcopy, all the pages
> sent by previous
> iteration have been received.

OK, great.

Dave

> >
> > Dave
> >
> >>              if (bytes_sent && *bytes_sent > 0) {
> >>                  qemu_update_position(f, *bytes_sent);
> >>              } else if (ret < 0) {
> >> diff --git a/migration/rdma.c b/migration/rdma.c
> >> index 81be482..8529ddd 100644
> >> --- a/migration/rdma.c
> >> +++ b/migration/rdma.c
> >> @@ -2964,6 +2964,10 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
> >>
> >>      CHECK_ERROR_STATE();
> >>
> >> +    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
> >> +        return RAM_SAVE_CONTROL_NOT_SUPP;
> >> +    }
> >> +
> >>      qemu_fflush(f);
> >>
> >>      if (size > 0) {
> >> @@ -3528,6 +3532,10 @@ static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
> >>
> >>      CHECK_ERROR_STATE();
> >>
> >> +    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
> >> +        return 0;
> >> +    }
> >> +
> >>      trace_qemu_rdma_registration_start(flags);
> >>      qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
> >>      qemu_fflush(f);
> >> @@ -3550,6 +3558,10 @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
> >>
> >>      CHECK_ERROR_STATE();
> >>
> >> +    if (migrate_get_current()->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
> >> +        return 0;
> >> +    }
> >> +
> >>      qemu_fflush(f);
> >>      ret = qemu_rdma_drain_cq(f, rdma);
> >>
> >> --
> >> 1.8.3.1
> >>
> > --
> > Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 4/5] migration: fix qemu carsh when RDMA live migration
  2018-04-12  9:40     ` 858585 jemmy
@ 2018-04-12 18:58       ` Dr. David Alan Gilbert
  0 siblings, 0 replies; 20+ messages in thread
From: Dr. David Alan Gilbert @ 2018-04-12 18:58 UTC (permalink / raw)
  To: 858585 jemmy; +Cc: Juan Quintela, qemu-devel, adido, licq, Lidong Chen

* 858585 jemmy (jemmy858585@gmail.com) wrote:
> On Thu, Apr 12, 2018 at 12:43 AM, Dr. David Alan Gilbert
> <dgilbert@redhat.com> wrote:
> > * Lidong Chen (jemmy858585@gmail.com) wrote:
> >> After postcopy, the destination qemu work in the dedicated
> >> thread, so only invoke yield_until_fd_readable before postcopy
> >> migration.
> >
> > The subject line needs to be more discriptive:
> >    migration: Stop rdma yielding during incoming postcopy
> >
> > I think.
> > (Also please check the subject spellings)
> >
> >> Signed-off-by: Lidong Chen <lidongchen@tencent.com>
> >> ---
> >>  migration/rdma.c | 4 +++-
> >>  1 file changed, 3 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/migration/rdma.c b/migration/rdma.c
> >> index 53773c7..81be482 100644
> >> --- a/migration/rdma.c
> >> +++ b/migration/rdma.c
> >> @@ -1489,11 +1489,13 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma)
> >>       * Coroutine doesn't start until migration_fd_process_incoming()
> >>       * so don't yield unless we know we're running inside of a coroutine.
> >>       */
> >> -    if (rdma->migration_started_on_destination) {
> >> +    if (rdma->migration_started_on_destination &&
> >> +        migration_incoming_get_current()->state == MIGRATION_STATUS_ACTIVE) {
> >
> > OK, that's a bit delicate; watch if it ever gets called in a failure
> > case or similar - and also wathc out if we make more use of the status
> > on the destination, but otherwise, and with a fix for the subject;
> 
> How about use migration_incoming_get_current()->have_listen_thread?

That's supposed to be pretty internal to the postcopy code, so I prefer
the status check.

Dave

>     if (rdma->migration_started_on_destination &&
>         migration_incoming_get_current()->have_listen_thread == false) {
>         yield_until_fd_readable(rdma->comp_channel->fd);
>     }
> 
> >
> >
> > Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> >
> >>          yield_until_fd_readable(rdma->comp_channel->fd);
> >>      } else {
> >>          /* This is the source side, we're in a separate thread
> >>           * or destination prior to migration_fd_process_incoming()
> >> +         * after postcopy, the destination also in a seprate thread.
> >>           * we can't yield; so we have to poll the fd.
> >>           * But we need to be able to handle 'cancel' or an error
> >>           * without hanging forever.
> >> --
> >> 1.8.3.1
> >>
> > --
> > Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration
  2018-04-12  3:57   ` 858585 jemmy
@ 2018-04-24 13:36     ` Dr. David Alan Gilbert
  0 siblings, 0 replies; 20+ messages in thread
From: Dr. David Alan Gilbert @ 2018-04-24 13:36 UTC (permalink / raw)
  To: 858585 jemmy
  Cc: Juan Quintela, qemu-devel, adido, licq, Lidong Chen, Gal Shachaf,
	Aviad Yehezkel

* 858585 jemmy (jemmy858585@gmail.com) wrote:
> On Wed, Apr 11, 2018 at 8:29 PM, Dr. David Alan Gilbert
> <dgilbert@redhat.com> wrote:
> > * Lidong Chen (jemmy858585@gmail.com) wrote:
> >> Current Qemu RDMA communication does not support send and receive
> >> data at the same time, so when RDMA live migration with postcopy
> >> enabled, the source qemu return path thread get qemu file error.
> >>
> >> Those patch add the postcopy support for RDMA live migration.
> >
> > This description is a little misleading; it doesn't really
> > do RDMA during the postcopy phase - what it really does is disable
> > the RDMA page sending during the postcopy phase, relying on the
> > RDMA codes stream emulation to send the page.
> 
> Hi Dave:
>     I will modify the description in next version patch.
> 
> >
> > That's not necessarily a bad fix; you get the nice performance of RDMA
> > during the precopy phase, but how bad are you finding the performance
> > during the postcopy phase - the RDMA code we have was only really
> > designed for sending small commands over the stream?
> 
> I have not finished the performance test. There are three choices for RDMA
> migration during the postcopy phase.
> 
> 1. RDMA SEND operation from the source qemu
> 2. RDMA Write with Immediate from the source qemu
> 3. RDMA READ from the destination qemu
> 
> In theory, RDMA READ from the destination qemu is the best way.
> But I think it's better to make choice base on the performance result.
> I will send the performance result later.

An RDMA read certainly sounds like an interesting way for postcopy,
since it means the destination would be in control; so it can RDMA into
temporaries that it could then atomically place.  An interesting
thought.

> If use another way during the postcopy phase, it will a big change for the code.
> This patch just make postcopy works, and i will send another patch to
> improve the performance.

Sure; I just wanted to check that, because the existing code wasn't
designed for sending pages, that it wasn't really terribly slow.

Dave

> Thanks.
> 
> >
> > Dave
> >
> >> Lidong Chen (5):
> >>   migration: create a dedicated connection for rdma return path
> >>   migration: add the interface to set get_return_path
> >>   migration: implement the get_return_path for RDMA iochannel
> >>   migration: fix qemu carsh when RDMA live migration
> >>   migration: disable RDMA WRITR after postcopy started.
> >>
> >>  migration/qemu-file-channel.c |  12 ++--
> >>  migration/qemu-file.c         |  13 +++-
> >>  migration/qemu-file.h         |   2 +-
> >>  migration/rdma.c              | 148 ++++++++++++++++++++++++++++++++++++++++--
> >>  4 files changed, 163 insertions(+), 12 deletions(-)
> >>
> >> --
> >> 1.8.3.1
> >>
> > --
> > Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2018-04-24 13:37 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-07  8:26 [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration Lidong Chen
2018-04-07  8:26 ` [Qemu-devel] [PATCH 1/5] migration: create a dedicated connection for rdma return path Lidong Chen
2018-04-11 16:57   ` Dr. David Alan Gilbert
2018-04-07  8:26 ` [Qemu-devel] [PATCH 2/5] migration: add the interface to set get_return_path Lidong Chen
2018-04-11 17:18   ` Dr. David Alan Gilbert
2018-04-12  8:28     ` Daniel P. Berrangé
2018-04-12 10:08       ` 858585 jemmy
2018-04-07  8:26 ` [Qemu-devel] [PATCH 3/5] migration: implement the get_return_path for RDMA iochannel Lidong Chen
2018-04-07  8:26 ` [Qemu-devel] [PATCH 4/5] migration: fix qemu carsh when RDMA live migration Lidong Chen
2018-04-11 16:43   ` Dr. David Alan Gilbert
2018-04-12  9:40     ` 858585 jemmy
2018-04-12 18:58       ` Dr. David Alan Gilbert
2018-04-07  8:26 ` [Qemu-devel] [PATCH 5/5] migration: disable RDMA WRITR after postcopy started Lidong Chen
2018-04-11 15:56   ` Dr. David Alan Gilbert
2018-04-12  6:50     ` 858585 jemmy
2018-04-12 18:55       ` Dr. David Alan Gilbert
2018-04-09  1:05 ` [Qemu-devel] [PATCH 0/5] Enable postcopy RDMA live migration 858585 jemmy
2018-04-11 12:29 ` Dr. David Alan Gilbert
2018-04-12  3:57   ` 858585 jemmy
2018-04-24 13:36     ` Dr. David Alan Gilbert

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.