All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zhimin Feng <fengzhimin1@huawei.com>
To: <quintela@redhat.com>, <dgilbert@redhat.com>, <armbru@redhat.com>,
	<eblake@redhat.com>
Cc: jemmy858585@gmail.com, Zhimin Feng <fengzhimin1@huawei.com>,
	qemu-devel@nongnu.org, zhang.zhanghailiang@huawei.com
Subject: [PATCH RFC 09/14] migration/rdma: register memory for multifd RDMA channels
Date: Thu, 13 Feb 2020 17:37:50 +0800	[thread overview]
Message-ID: <20200213093755.370-10-fengzhimin1@huawei.com> (raw)
In-Reply-To: <20200213093755.370-1-fengzhimin1@huawei.com>

register memory for multifd RDMA channels and transmit the destination
the keys to source to use including the virtual addresses.

Signed-off-by: Zhimin Feng <fengzhimin1@huawei.com>
---
 migration/multifd.c | 34 +++++++++++++++++++++++++++++---
 migration/rdma.c    | 48 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 79 insertions(+), 3 deletions(-)

diff --git a/migration/multifd.c b/migration/multifd.c
index a57d7a2eab..4ae25fc88f 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -388,7 +388,11 @@ static void multifd_send_terminate_threads(Error *err)
 
         qemu_mutex_lock(&p->mutex);
         p->quit = true;
-        qemu_sem_post(&p->sem);
+        if (migrate_use_rdma()) {
+            qemu_sem_post(&p->sem_sync);
+        } else {
+            qemu_sem_post(&p->sem);
+        }
         qemu_mutex_unlock(&p->mutex);
     }
 }
@@ -484,6 +488,8 @@ static void *multifd_rdma_send_thread(void *opaque)
 {
     MultiFDSendParams *p = opaque;
     Error *local_err = NULL;
+    int ret = 0;
+    RDMAControlHeader head = { .len = 0, .repeat = 1 };
 
     trace_multifd_send_thread_start(p->id);
 
@@ -491,14 +497,28 @@ static void *multifd_rdma_send_thread(void *opaque)
         goto out;
     }
 
+    /* wait for semaphore notification to register memory */
+    qemu_sem_wait(&p->sem_sync);
+    if (qemu_rdma_registration(p->rdma) < 0) {
+        goto out;
+    }
+
     while (true) {
+        qemu_sem_wait(&p->sem_sync);
+
         qemu_mutex_lock(&p->mutex);
         if (p->quit) {
             qemu_mutex_unlock(&p->mutex);
             break;
         }
         qemu_mutex_unlock(&p->mutex);
-        qemu_sem_wait(&p->sem);
+
+        /* Send FINISHED to the destination */
+        head.type = RDMA_CONTROL_REGISTER_FINISHED;
+        ret = qemu_rdma_exchange_send(p->rdma, &head, NULL, NULL, NULL, NULL);
+        if (ret < 0) {
+            return NULL;
+        }
     }
 
 out:
@@ -836,15 +856,23 @@ void multifd_recv_sync_main(void)
 static void *multifd_rdma_recv_thread(void *opaque)
 {
     MultiFDRecvParams *p = opaque;
+    int ret = 0;
 
     while (true) {
+        qemu_sem_wait(&p->sem_sync);
+
         qemu_mutex_lock(&p->mutex);
         if (p->quit) {
             qemu_mutex_unlock(&p->mutex);
             break;
         }
         qemu_mutex_unlock(&p->mutex);
-        qemu_sem_wait(&p->sem_sync);
+
+        ret = qemu_rdma_registration_handle(p->file, p->c);
+        if (ret < 0) {
+            qemu_file_set_error(p->file, ret);
+            break;
+        }
     }
 
     qemu_mutex_lock(&p->mutex);
diff --git a/migration/rdma.c b/migration/rdma.c
index 19a238be30..5de3a29712 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -3570,6 +3570,19 @@ static int rdma_load_hook(QEMUFile *f, void *opaque, uint64_t flags, void *data)
         return rdma_block_notification_handle(opaque, data);
 
     case RAM_CONTROL_HOOK:
+        if (migrate_use_multifd()) {
+            int i;
+            MultiFDRecvParams *multifd_recv_param = NULL;
+            int thread_count = migrate_multifd_channels();
+            /* Inform dest recv_thread to poll */
+            for (i = 0; i < thread_count; i++) {
+                if (get_multifd_recv_param(i, &multifd_recv_param)) {
+                    return -1;
+                }
+                qemu_sem_post(&multifd_recv_param->sem_sync);
+            }
+        }
+
         return qemu_rdma_registration_handle(f, opaque);
 
     default:
@@ -3643,6 +3656,25 @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
         head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST;
         trace_qemu_rdma_registration_stop_ram();
 
+        if (migrate_use_multifd()) {
+            /*
+             * Inform the multifd channels to register memory
+             */
+            int i;
+            int thread_count = migrate_multifd_channels();
+            MultiFDSendParams *multifd_send_param = NULL;
+            for (i = 0; i < thread_count; i++) {
+                ret = get_multifd_send_param(i, &multifd_send_param);
+                if (ret) {
+                    ERROR(errp, "rdma: error getting"
+                                "multifd_send_param(%d)", i);
+                    return ret;
+                }
+
+                qemu_sem_post(&multifd_send_param->sem_sync);
+            }
+        }
+
         /*
          * Make sure that we parallelize the pinning on both sides.
          * For very large guests, doing this serially takes a really
@@ -3708,6 +3740,22 @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
     head.type = RDMA_CONTROL_REGISTER_FINISHED;
     ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL);
 
+    if (migrate_use_multifd()) {
+        /* Inform src send_thread to send FINISHED signal */
+        int i;
+        int thread_count = migrate_multifd_channels();
+        MultiFDSendParams *multifd_send_param = NULL;
+        for (i = 0; i < thread_count; i++) {
+            ret = get_multifd_send_param(i, &multifd_send_param);
+            if (ret) {
+                ERROR(errp, "rdma: error getting multifd_send_param(%d)", i);
+                return ret;
+            }
+
+            qemu_sem_post(&multifd_send_param->sem_sync);
+        }
+    }
+
     if (ret < 0) {
         goto err;
     }
-- 
2.19.1




  parent reply	other threads:[~2020-02-13  9:43 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-13  9:37 [PATCH RFC 00/14] *** multifd for RDMA v2 *** Zhimin Feng
2020-02-13  9:37 ` [PATCH RFC 01/14] migration: add the 'migrate_use_rdma_pin_all' function Zhimin Feng
2020-02-13 10:02   ` Juan Quintela
2020-02-13  9:37 ` [PATCH RFC 02/14] migration: judge whether or not the RDMA is used for migration Zhimin Feng
2020-02-13 10:04   ` Juan Quintela
2020-02-13  9:37 ` [PATCH RFC 03/14] migration/rdma: Create multiFd migration threads Zhimin Feng
2020-02-13 10:12   ` Juan Quintela
2020-02-14  9:51     ` fengzhimin
2020-02-13  9:37 ` [PATCH RFC 04/14] migration/rdma: Export the RDMAContext struct Zhimin Feng
2020-02-13  9:37 ` [PATCH RFC 05/14] migration/rdma: Create the multifd channels for RDMA Zhimin Feng
2020-02-13  9:37 ` [PATCH RFC 06/14] migration/rdma: Transmit initial packet Zhimin Feng
2020-02-14 16:31   ` Dr. David Alan Gilbert
2020-02-13  9:37 ` [PATCH RFC 07/14] migration/rdma: Export the 'qemu_rdma_registration_handle' and 'qemu_rdma_exchange_send' functions Zhimin Feng
2020-02-13  9:37 ` [PATCH RFC 08/14] migration/rdma: Add the function for dynamic page registration Zhimin Feng
2020-02-13  9:37 ` Zhimin Feng [this message]
2020-02-13  9:37 ` [PATCH RFC 10/14] migration/rdma: Wait for all multifd to complete registration Zhimin Feng
2020-02-13  9:37 ` [PATCH RFC 11/14] migration/rdma: use multifd to migrate VM for rdma-pin-all mode Zhimin Feng
2020-02-13  9:37 ` [PATCH RFC 12/14] migration/rdma: use multifd to migrate VM for NOT " Zhimin Feng
2020-02-13  9:37 ` [PATCH RFC 13/14] migration/rdma: only register the memory for multifd channels Zhimin Feng
2020-02-13  9:37 ` [PATCH RFC 14/14] migration/rdma: RDMA cleanup for multifd migration Zhimin Feng
2020-02-13 10:14 ` [PATCH RFC 00/14] *** multifd for RDMA v2 *** no-reply
2020-02-14 13:23   ` Dr. David Alan Gilbert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200213093755.370-10-fengzhimin1@huawei.com \
    --to=fengzhimin1@huawei.com \
    --cc=armbru@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=eblake@redhat.com \
    --cc=jemmy858585@gmail.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=zhang.zhanghailiang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.