All of lore.kernel.org
 help / color / mirror / Atom feed
From: Li Zhijian <lizhijian@cn.fujitsu.com>
To: <quintela@redhat.com>, <dgilbert@redhat.com>
Cc: qemu-devel@nongnu.org, Li Zhijian <lizhijian@cn.fujitsu.com>
Subject: [PATCH v2 2/2] migration/rdma: rename cq and comp_channel with recv prefix
Date: Fri, 18 Jun 2021 18:36:12 +0800	[thread overview]
Message-ID: <20210618103612.152817-2-lizhijian@cn.fujitsu.com> (raw)
In-Reply-To: <20210618103612.152817-1-lizhijian@cn.fujitsu.com>

make the code more clear

Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
---
 migration/rdma.c | 50 ++++++++++++++++++++++++------------------------
 1 file changed, 25 insertions(+), 25 deletions(-)

diff --git a/migration/rdma.c b/migration/rdma.c
index 16fe0688858..527972d4970 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -358,10 +358,10 @@ typedef struct RDMAContext {
     struct ibv_context          *verbs;
     struct rdma_event_channel   *channel;
     struct ibv_qp *qp;                      /* queue pair */
-    struct ibv_comp_channel *comp_channel;  /* completion channel */
+    struct ibv_comp_channel *recv_comp_channel;  /* recv completion channel */
     struct ibv_comp_channel *send_comp_channel;  /* send completion channel */
     struct ibv_pd *pd;                      /* protection domain */
-    struct ibv_cq *cq;                      /* completion queue */
+    struct ibv_cq *recv_cq;                 /* recv completion queue */
     struct ibv_cq *send_cq;                 /* send completion queue */
 
     /*
@@ -1062,8 +1062,8 @@ static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
     }
 
     /* create completion channel */
-    rdma->comp_channel = ibv_create_comp_channel(rdma->verbs);
-    if (!rdma->comp_channel) {
+    rdma->recv_comp_channel = ibv_create_comp_channel(rdma->verbs);
+    if (!rdma->recv_comp_channel) {
         error_report("failed to allocate completion channel");
         goto err_alloc_pd_cq;
     }
@@ -1071,9 +1071,9 @@ static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
     /*
      * Completion queue can be filled by read work requests.
      */
-    rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
-            NULL, rdma->comp_channel, 0);
-    if (!rdma->cq) {
+    rdma->recv_cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
+                                  NULL, rdma->recv_comp_channel, 0);
+    if (!rdma->recv_cq) {
         error_report("failed to allocate completion queue");
         goto err_alloc_pd_cq;
     }
@@ -1098,18 +1098,18 @@ err_alloc_pd_cq:
     if (rdma->pd) {
         ibv_dealloc_pd(rdma->pd);
     }
-    if (rdma->comp_channel) {
-        ibv_destroy_comp_channel(rdma->comp_channel);
+    if (rdma->recv_comp_channel) {
+        ibv_destroy_comp_channel(rdma->recv_comp_channel);
     }
     if (rdma->send_comp_channel) {
         ibv_destroy_comp_channel(rdma->send_comp_channel);
     }
-    if (rdma->cq) {
-        ibv_destroy_cq(rdma->cq);
-        rdma->cq = NULL;
+    if (rdma->recv_cq) {
+        ibv_destroy_cq(rdma->recv_cq);
+        rdma->recv_cq = NULL;
     }
     rdma->pd = NULL;
-    rdma->comp_channel = NULL;
+    rdma->recv_comp_channel = NULL;
     rdma->send_comp_channel = NULL;
     return -1;
 
@@ -1128,7 +1128,7 @@ static int qemu_rdma_alloc_qp(RDMAContext *rdma)
     attr.cap.max_send_sge = 1;
     attr.cap.max_recv_sge = 1;
     attr.send_cq = rdma->send_cq;
-    attr.recv_cq = rdma->cq;
+    attr.recv_cq = rdma->recv_cq;
     attr.qp_type = IBV_QPT_RC;
 
     ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr);
@@ -1606,12 +1606,12 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma,
 static struct ibv_comp_channel *to_channel(RDMAContext *rdma, int wrid)
 {
     return wrid < RDMA_WRID_RECV_CONTROL ? rdma->send_comp_channel :
-           rdma->comp_channel;
+           rdma->recv_comp_channel;
 }
 
 static struct ibv_cq *to_cq(RDMAContext *rdma, int wrid)
 {
-    return wrid < RDMA_WRID_RECV_CONTROL ? rdma->send_cq : rdma->cq;
+    return wrid < RDMA_WRID_RECV_CONTROL ? rdma->send_cq : rdma->recv_cq;
 }
 
 /*
@@ -2398,17 +2398,17 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
         rdma_destroy_qp(rdma->cm_id);
         rdma->qp = NULL;
     }
-    if (rdma->cq) {
-        ibv_destroy_cq(rdma->cq);
-        rdma->cq = NULL;
+    if (rdma->recv_cq) {
+        ibv_destroy_cq(rdma->recv_cq);
+        rdma->recv_cq = NULL;
     }
     if (rdma->send_cq) {
         ibv_destroy_cq(rdma->send_cq);
         rdma->send_cq = NULL;
     }
-    if (rdma->comp_channel) {
-        ibv_destroy_comp_channel(rdma->comp_channel);
-        rdma->comp_channel = NULL;
+    if (rdma->recv_comp_channel) {
+        ibv_destroy_comp_channel(rdma->recv_comp_channel);
+        rdma->recv_comp_channel = NULL;
     }
     if (rdma->send_comp_channel) {
         ibv_destroy_comp_channel(rdma->send_comp_channel);
@@ -3084,12 +3084,12 @@ static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc,
 {
     QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
     if (io_read) {
-        aio_set_fd_handler(ctx, rioc->rdmain->comp_channel->fd,
+        aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd,
                            false, io_read, io_write, NULL, opaque);
         aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd,
                            false, io_read, io_write, NULL, opaque);
     } else {
-        aio_set_fd_handler(ctx, rioc->rdmaout->comp_channel->fd,
+        aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd,
                            false, io_read, io_write, NULL, opaque);
         aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd,
                            false, io_read, io_write, NULL, opaque);
@@ -3305,7 +3305,7 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
      */
     while (1) {
         uint64_t wr_id, wr_id_in;
-        int ret = qemu_rdma_poll(rdma, rdma->cq, &wr_id_in, NULL);
+        int ret = qemu_rdma_poll(rdma, rdma->recv_cq, &wr_id_in, NULL);
         if (ret < 0) {
             error_report("rdma migration: polling error! %d", ret);
             goto err;
-- 
2.31.1





  reply	other threads:[~2021-06-18 10:41 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-18 10:36 [PATCH v2 1/2] migration/rdma: Fix out of order wrid Li Zhijian
2021-06-18 10:36 ` Li Zhijian [this message]
2021-06-24 16:42 ` Dr. David Alan Gilbert
2021-06-28  7:13   ` lizhijian

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210618103612.152817-2-lizhijian@cn.fujitsu.com \
    --to=lizhijian@cn.fujitsu.com \
    --cc=dgilbert@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.