All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chuan Zheng <zhengchuan@huawei.com>
To: <quintela@redhat.com>, <dgilbert@redhat.com>
Cc: yubihong@huawei.com, zhang.zhanghailiang@huawei.com,
	fengzhimin1@huawei.com, qemu-devel@nongnu.org,
	xiexiangyou@huawei.com, alex.chen@huawei.com,
	wanghao232@huawei.com
Subject: [PATCH v3 10/18] migration/rdma: Create the multifd recv channels for RDMA
Date: Sat, 17 Oct 2020 12:25:40 +0800	[thread overview]
Message-ID: <1602908748-43335-11-git-send-email-zhengchuan@huawei.com> (raw)
In-Reply-To: <1602908748-43335-1-git-send-email-zhengchuan@huawei.com>

We still don't transmit anything through them, and we only build
the RDMA connections.

Signed-off-by: Zhimin Feng <fengzhimin1@huawei.com>
Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
---
 migration/rdma.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 68 insertions(+), 2 deletions(-)

diff --git a/migration/rdma.c b/migration/rdma.c
index 2baa933..63559f1 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -3266,6 +3266,40 @@ static void rdma_cm_poll_handler(void *opaque)
     }
 }
 
+static bool qemu_rdma_accept_setup(RDMAContext *rdma)
+{
+    RDMAContext *multifd_rdma = NULL;
+    int thread_count;
+    int i;
+    MultiFDRecvParams *multifd_recv_param;
+    thread_count = migrate_multifd_channels();
+    /* create the multifd channels for RDMA */
+    for (i = 0; i < thread_count; i++) {
+        if (get_multifd_recv_param(i, &multifd_recv_param) < 0) {
+            error_report("rdma: error getting multifd_recv_param(%d)", i);
+            return false;
+        }
+
+        multifd_rdma = (RDMAContext *) multifd_recv_param->rdma;
+        if (multifd_rdma->cm_id == NULL) {
+            break;
+        } else {
+            multifd_rdma = NULL;
+        }
+    }
+
+    if (multifd_rdma) {
+        qemu_set_fd_handler(rdma->channel->fd,
+                            rdma_accept_incoming_migration,
+                            NULL, (void *)(intptr_t)multifd_rdma);
+    } else {
+        qemu_set_fd_handler(rdma->channel->fd, rdma_cm_poll_handler,
+                            NULL, rdma);
+    }
+
+    return true;
+}
+
 static int qemu_rdma_accept(RDMAContext *rdma)
 {
     RDMACapabilities cap;
@@ -3365,6 +3399,10 @@ static int qemu_rdma_accept(RDMAContext *rdma)
         qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration,
                             NULL,
                             (void *)(intptr_t)rdma->return_path);
+    } else if (migrate_use_multifd()) {
+        if (!qemu_rdma_accept_setup(rdma)) {
+            goto err_rdma_dest_wait;
+        }
     } else {
         qemu_set_fd_handler(rdma->channel->fd, rdma_cm_poll_handler,
                             NULL, rdma);
@@ -3975,6 +4013,35 @@ static QEMUFile *qemu_fopen_rdma(RDMAContext *rdma, const char *mode)
     return rioc->file;
 }
 
+static void migration_rdma_process_incoming(QEMUFile *f,
+                                            RDMAContext *rdma, Error **errp)
+{
+    MigrationIncomingState *mis = migration_incoming_get_current();
+    QIOChannel *ioc = NULL;
+    bool start_migration = false;
+
+    /* FIXME: Need refactor */
+    if (!migrate_use_multifd()) {
+        rdma->migration_started_on_destination = 1;
+        migration_fd_process_incoming(f, errp);
+        return;
+    }
+
+    if (!mis->from_src_file) {
+        mis->from_src_file = f;
+        qemu_file_set_blocking(f, false);
+    } else {
+        ioc = QIO_CHANNEL(getQIOChannel(f));
+        /* Multiple connections */
+        assert(migrate_use_multifd());
+        start_migration = multifd_recv_new_channel(ioc, errp);
+    }
+
+    if (start_migration) {
+        migration_incoming_process();
+    }
+}
+
 static void rdma_accept_incoming_migration(void *opaque)
 {
     RDMAContext *rdma = opaque;
@@ -4003,8 +4070,7 @@ static void rdma_accept_incoming_migration(void *opaque)
         return;
     }
 
-    rdma->migration_started_on_destination = 1;
-    migration_fd_process_incoming(f, &local_err);
+    migration_rdma_process_incoming(f, rdma, &local_err);
     if (local_err) {
         error_reportf_err(local_err, "RDMA ERROR:");
     }
-- 
1.8.3.1



  parent reply	other threads:[~2020-10-17  4:16 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-17  4:25 [PATCH v3 00/18] Support Multifd for RDMA migration Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 01/18] migration/rdma: add the 'migrate_use_rdma_pin_all' function Chuan Zheng
2020-11-10 11:52   ` Dr. David Alan Gilbert
2020-10-17  4:25 ` [PATCH v3 02/18] migration/rdma: judge whether or not the RDMA is used for migration Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 03/18] migration/rdma: create multifd_setup_ops for Tx/Rx thread Chuan Zheng
2020-11-10 12:11   ` Dr. David Alan Gilbert
2020-11-11  7:51     ` Zheng Chuan
2020-10-17  4:25 ` [PATCH v3 04/18] migration/rdma: add multifd_setup_ops for rdma Chuan Zheng
2020-11-10 12:30   ` Dr. David Alan Gilbert
2020-11-11  7:56     ` Zheng Chuan
2020-10-17  4:25 ` [PATCH v3 05/18] migration/rdma: do not need sync main " Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 06/18] migration/rdma: export MultiFDSendParams/MultiFDRecvParams Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 07/18] migration/rdma: add rdma field into multifd send/recv param Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 08/18] migration/rdma: export getQIOChannel to get QIOchannel in rdma Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 09/18] migration/rdma: add multifd_rdma_load_setup() to setup multifd rdma Chuan Zheng
2020-11-10 16:51   ` Dr. David Alan Gilbert
2020-10-17  4:25 ` Chuan Zheng [this message]
2020-10-17  4:25 ` [PATCH v3 11/18] migration/rdma: record host_port for multifd RDMA Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 12/18] migration/rdma: Create the multifd send channels for RDMA Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 13/18] migration/rdma: Add the function for dynamic page registration Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 14/18] migration/rdma: register memory for multifd RDMA channels Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 15/18] migration/rdma: only register the memory for multifd channels Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 16/18] migration/rdma: add rdma_channel into Migrationstate field Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 17/18] migration/rdma: send data for both rdma-pin-all and NOT rdma-pin-all mode Chuan Zheng
2020-10-17  4:25 ` [PATCH v3 18/18] migration/rdma: RDMA cleanup for multifd migration Chuan Zheng
2020-10-21  9:25 ` [PATCH v3 00/18] Support Multifd for RDMA migration Zhanghailiang
2020-10-21  9:33   ` Zheng Chuan
2020-10-23 19:02     ` Dr. David Alan Gilbert
2020-10-25  2:29       ` Zheng Chuan
2020-12-15  7:28         ` Zheng Chuan
2020-12-18 20:01           ` Dr. David Alan Gilbert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1602908748-43335-11-git-send-email-zhengchuan@huawei.com \
    --to=zhengchuan@huawei.com \
    --cc=alex.chen@huawei.com \
    --cc=dgilbert@redhat.com \
    --cc=fengzhimin1@huawei.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=wanghao232@huawei.com \
    --cc=xiexiangyou@huawei.com \
    --cc=yubihong@huawei.com \
    --cc=zhang.zhanghailiang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.