All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yuval Shaia <yuval.shaia@oracle.com>
To: dgilbert@redhat.com, yuval.shaia@oracle.com,
	marcel.apfelbaum@gmail.com, armbru@redhat.com,
	qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH v4 2/9] hw/rdma: Introduce protected qlist
Date: Sun,  3 Mar 2019 22:33:38 +0200	[thread overview]
Message-ID: <20190303203345.2472-3-yuval.shaia@oracle.com> (raw)
In-Reply-To: <20190303203345.2472-1-yuval.shaia@oracle.com>

To make code more readable move handling of protected list to a
rdma_utils

Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
Reviewed-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
---
 hw/rdma/rdma_backend.c      | 20 +++++--------------
 hw/rdma/rdma_backend_defs.h |  8 ++------
 hw/rdma/rdma_utils.c        | 39 +++++++++++++++++++++++++++++++++++++
 hw/rdma/rdma_utils.h        |  9 +++++++++
 4 files changed, 55 insertions(+), 21 deletions(-)

diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
index 24bac00a20..0ed14751be 100644
--- a/hw/rdma/rdma_backend.c
+++ b/hw/rdma/rdma_backend.c
@@ -527,9 +527,7 @@ static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
     bctx->up_ctx = ctx;
     bctx->sge = *sge;
 
-    qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
-    qlist_append_int(backend_dev->recv_mads_list.list, bctx_id);
-    qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
+    rdma_protected_qlist_append_int64(&backend_dev->recv_mads_list, bctx_id);
 
     return 0;
 }
@@ -913,23 +911,19 @@ static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid,
 static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
                                      RdmaCmMuxMsg *msg)
 {
-    QObject *o_ctx_id;
     unsigned long cqe_ctx_id;
     BackendCtx *bctx;
     char *mad;
 
     trace_mad_message("recv", msg->umad.mad, msg->umad_len);
 
-    qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
-    o_ctx_id = qlist_pop(backend_dev->recv_mads_list.list);
-    qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
-    if (!o_ctx_id) {
+    cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->recv_mads_list);
+    if (cqe_ctx_id == -ENOENT) {
         rdma_warn_report("No more free MADs buffers, waiting for a while");
         sleep(THR_POLL_TO);
         return;
     }
 
-    cqe_ctx_id = qnum_get_uint(qobject_to(QNum, o_ctx_id));
     bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
     if (unlikely(!bctx)) {
         rdma_error_report("No matching ctx for req %ld", cqe_ctx_id);
@@ -994,8 +988,7 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
         return -EIO;
     }
 
-    qemu_mutex_init(&backend_dev->recv_mads_list.lock);
-    backend_dev->recv_mads_list.list = qlist_new();
+    rdma_protected_qlist_init(&backend_dev->recv_mads_list);
 
     enable_rdmacm_mux_async(backend_dev);
 
@@ -1010,10 +1003,7 @@ static void mad_fini(RdmaBackendDev *backend_dev)
 {
     disable_rdmacm_mux_async(backend_dev);
     qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
-    if (backend_dev->recv_mads_list.list) {
-        qlist_destroy_obj(QOBJECT(backend_dev->recv_mads_list.list));
-        qemu_mutex_destroy(&backend_dev->recv_mads_list.lock);
-    }
+    rdma_protected_qlist_destroy(&backend_dev->recv_mads_list);
 }
 
 int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,
diff --git a/hw/rdma/rdma_backend_defs.h b/hw/rdma/rdma_backend_defs.h
index 15ae8b970e..a8c15b09ab 100644
--- a/hw/rdma/rdma_backend_defs.h
+++ b/hw/rdma/rdma_backend_defs.h
@@ -20,6 +20,7 @@
 #include "chardev/char-fe.h"
 #include <infiniband/verbs.h>
 #include "contrib/rdmacm-mux/rdmacm-mux.h"
+#include "rdma_utils.h"
 
 typedef struct RdmaDeviceResources RdmaDeviceResources;
 
@@ -30,11 +31,6 @@ typedef struct RdmaBackendThread {
     bool is_running; /* Set by the thread to report its status */
 } RdmaBackendThread;
 
-typedef struct RecvMadList {
-    QemuMutex lock;
-    QList *list;
-} RecvMadList;
-
 typedef struct RdmaCmMux {
     CharBackend *chr_be;
     int can_receive;
@@ -48,7 +44,7 @@ typedef struct RdmaBackendDev {
     struct ibv_context *context;
     struct ibv_comp_channel *channel;
     uint8_t port_num;
-    RecvMadList recv_mads_list;
+    RdmaProtectedQList recv_mads_list;
     RdmaCmMux rdmacm_mux;
 } RdmaBackendDev;
 
diff --git a/hw/rdma/rdma_utils.c b/hw/rdma/rdma_utils.c
index b9f07fcda7..0a8abe572d 100644
--- a/hw/rdma/rdma_utils.c
+++ b/hw/rdma/rdma_utils.c
@@ -14,6 +14,8 @@
  */
 
 #include "qemu/osdep.h"
+#include "qapi/qmp/qlist.h"
+#include "qapi/qmp/qnum.h"
 #include "trace.h"
 #include "rdma_utils.h"
 
@@ -51,3 +53,40 @@ void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len)
         pci_dma_unmap(dev, buffer, len, DMA_DIRECTION_TO_DEVICE, 0);
     }
 }
+
+void rdma_protected_qlist_init(RdmaProtectedQList *list)
+{
+    qemu_mutex_init(&list->lock);
+    list->list = qlist_new();
+}
+
+void rdma_protected_qlist_destroy(RdmaProtectedQList *list)
+{
+    if (list->list) {
+        qlist_destroy_obj(QOBJECT(list->list));
+        qemu_mutex_destroy(&list->lock);
+        list->list = NULL;
+    }
+}
+
+void rdma_protected_qlist_append_int64(RdmaProtectedQList *list, int64_t value)
+{
+    qemu_mutex_lock(&list->lock);
+    qlist_append_int(list->list, value);
+    qemu_mutex_unlock(&list->lock);
+}
+
+int64_t rdma_protected_qlist_pop_int64(RdmaProtectedQList *list)
+{
+    QObject *obj;
+
+    qemu_mutex_lock(&list->lock);
+    obj = qlist_pop(list->list);
+    qemu_mutex_unlock(&list->lock);
+
+    if (!obj) {
+        return -ENOENT;
+    }
+
+    return qnum_get_uint(qobject_to(QNum, obj));
+}
diff --git a/hw/rdma/rdma_utils.h b/hw/rdma/rdma_utils.h
index acd148837f..a8bf1d4fec 100644
--- a/hw/rdma/rdma_utils.h
+++ b/hw/rdma/rdma_utils.h
@@ -29,8 +29,17 @@
 #define rdma_info_report(fmt, ...) \
     info_report("%s: " fmt, "rdma", ## __VA_ARGS__)
 
+typedef struct RdmaProtectedQList {
+    QemuMutex lock;
+    QList *list;
+} RdmaProtectedQList;
+
 void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen);
 void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len);
+void rdma_protected_qlist_init(RdmaProtectedQList *list);
+void rdma_protected_qlist_destroy(RdmaProtectedQList *list);
+void rdma_protected_qlist_append_int64(RdmaProtectedQList *list, int64_t value);
+int64_t rdma_protected_qlist_pop_int64(RdmaProtectedQList *list);
 
 static inline void addrconf_addr_eui48(uint8_t *eui, const char *addr)
 {
-- 
2.17.2

  parent reply	other threads:[~2019-03-03 20:34 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-03 20:33 [Qemu-devel] [PATCH v4 0/9] Misc fixes to pvrdma device Yuval Shaia
2019-03-03 20:33 ` [Qemu-devel] [PATCH v4 1/9] hw/rdma: Switch to generic error reporting way Yuval Shaia
2019-03-03 20:33 ` Yuval Shaia [this message]
2019-03-03 20:33 ` [Qemu-devel] [PATCH v4 3/9] hw/rdma: Protect against concurrent execution of poll_cq Yuval Shaia
2019-03-03 20:33 ` [Qemu-devel] [PATCH v4 4/9] {hmp, hw/pvrdma}: Expose device internals via monitor interface Yuval Shaia
2019-03-06 10:22   ` Yuval Shaia
2019-03-06 12:20     ` Dr. David Alan Gilbert
2019-03-06 12:46       ` Yuval Shaia
2019-03-07  9:50   ` Marcel Apfelbaum
2019-03-08 16:37   ` Markus Armbruster
2019-03-08 18:57     ` Yuval Shaia
2019-03-10  8:06     ` Yuval Shaia
2019-03-03 20:33 ` [Qemu-devel] [PATCH v4 5/9] hw/rdma: Free all MAD receive buffers when device is closed Yuval Shaia
2019-03-03 20:33 ` [Qemu-devel] [PATCH v4 6/9] hw/rdma: Free all receive buffers when QP is destroyed Yuval Shaia
2019-03-03 20:33 ` [Qemu-devel] [PATCH v4 7/9] hw/pvrdma: Delete unneeded function argument Yuval Shaia
2019-03-03 20:33 ` [Qemu-devel] [PATCH v4 8/9] hw/pvrdma: Delete pvrdma_exit function Yuval Shaia
2019-03-03 20:33 ` [Qemu-devel] [PATCH v4 9/9] hw/pvrdma: Unregister from shutdown notifier when device goes down Yuval Shaia

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190303203345.2472-3-yuval.shaia@oracle.com \
    --to=yuval.shaia@oracle.com \
    --cc=armbru@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.