linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Chuck Lever <chuck.lever@oracle.com>
To: trondmy@hammerspace.com
Cc: linux-nfs@vger.kernel.org, linux-rdma@vger.kernel.org
Subject: [PATCH v3 23/26] xprtrdma: Move cqe to struct rpcrdma_mr
Date: Mon, 19 Apr 2021 14:04:03 -0400	[thread overview]
Message-ID: <161885544311.38598.17973108446796585522.stgit@manet.1015granger.net> (raw)
In-Reply-To: <161885481568.38598.16682844600209775665.stgit@manet.1015granger.net>

Clean up.

- Simplify variable initialization in the completion handlers.

- Move another field out of struct rpcrdma_frwr.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 net/sunrpc/xprtrdma/frwr_ops.c  |   35 +++++++++++++++--------------------
 net/sunrpc/xprtrdma/xprt_rdma.h |    2 +-
 2 files changed, 16 insertions(+), 21 deletions(-)

diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index d3c18c776bf9..2a886a28d82b 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -366,9 +366,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
 {
 	struct ib_cqe *cqe = wc->wr_cqe;
-	struct rpcrdma_frwr *frwr =
-		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
-	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
+	struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
 
 	/* WARNING: Only wr_cqe and status are reliable at this point */
 	trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid);
@@ -405,9 +403,9 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 		trace_xprtrdma_mr_fastreg(mr);
 		frwr = &mr->frwr;
 
-		frwr->fr_cqe.done = frwr_wc_fastreg;
+		mr->mr_cqe.done = frwr_wc_fastreg;
 		frwr->fr_regwr.wr.next = post_wr;
-		frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
+		frwr->fr_regwr.wr.wr_cqe = &mr->mr_cqe;
 		frwr->fr_regwr.wr.num_sge = 0;
 		frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
 		frwr->fr_regwr.wr.send_flags = 0;
@@ -463,9 +461,7 @@ static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
 {
 	struct ib_cqe *cqe = wc->wr_cqe;
-	struct rpcrdma_frwr *frwr =
-		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
-	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
+	struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
 
 	/* WARNING: Only wr_cqe and status are reliable at this point */
 	trace_xprtrdma_wc_li(wc, &mr->mr_cid);
@@ -484,9 +480,8 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
 {
 	struct ib_cqe *cqe = wc->wr_cqe;
-	struct rpcrdma_frwr *frwr =
-		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
-	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
+	struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
+	struct rpcrdma_frwr *frwr = &mr->frwr;
 
 	/* WARNING: Only wr_cqe and status are reliable at this point */
 	trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid);
@@ -529,16 +524,17 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 		r_xprt->rx_stats.local_inv_needed++;
 
 		frwr = &mr->frwr;
-		frwr->fr_cqe.done = frwr_wc_localinv;
 		last = &frwr->fr_invwr;
 		last->next = NULL;
-		last->wr_cqe = &frwr->fr_cqe;
+		last->wr_cqe = &mr->mr_cqe;
 		last->sg_list = NULL;
 		last->num_sge = 0;
 		last->opcode = IB_WR_LOCAL_INV;
 		last->send_flags = IB_SEND_SIGNALED;
 		last->ex.invalidate_rkey = mr->mr_handle;
 
+		last->wr_cqe->done = frwr_wc_localinv;
+
 		*prev = last;
 		prev = &last->next;
 	}
@@ -547,7 +543,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 	 * last WR in the chain completes, all WRs in the chain
 	 * are complete.
 	 */
-	frwr->fr_cqe.done = frwr_wc_localinv_wake;
+	last->wr_cqe->done = frwr_wc_localinv_wake;
 	reinit_completion(&frwr->fr_linv_done);
 
 	/* Transport disconnect drains the receive CQ before it
@@ -579,9 +575,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 	struct ib_cqe *cqe = wc->wr_cqe;
-	struct rpcrdma_frwr *frwr =
-		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
-	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
+	struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
 	struct rpcrdma_rep *rep;
 
 	/* WARNING: Only wr_cqe and status are reliable at this point */
@@ -630,16 +624,17 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 		r_xprt->rx_stats.local_inv_needed++;
 
 		frwr = &mr->frwr;
-		frwr->fr_cqe.done = frwr_wc_localinv;
 		last = &frwr->fr_invwr;
 		last->next = NULL;
-		last->wr_cqe = &frwr->fr_cqe;
+		last->wr_cqe = &mr->mr_cqe;
 		last->sg_list = NULL;
 		last->num_sge = 0;
 		last->opcode = IB_WR_LOCAL_INV;
 		last->send_flags = IB_SEND_SIGNALED;
 		last->ex.invalidate_rkey = mr->mr_handle;
 
+		last->wr_cqe->done = frwr_wc_localinv;
+
 		*prev = last;
 		prev = &last->next;
 	}
@@ -649,7 +644,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 	 * are complete. The last completion will wake up the
 	 * RPC waiter.
 	 */
-	frwr->fr_cqe.done = frwr_wc_localinv_done;
+	last->wr_cqe->done = frwr_wc_localinv_done;
 
 	/* Transport disconnect drains the receive CQ before it
 	 * replaces the QP. The RPC reply handler won't call us
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 0cf073f0ee64..f72b69c3f0ea 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -231,7 +231,6 @@ struct rpcrdma_sendctx {
  */
 struct rpcrdma_frwr {
 	struct ib_mr			*fr_mr;
-	struct ib_cqe			fr_cqe;
 	struct completion		fr_linv_done;
 	union {
 		struct ib_reg_wr	fr_regwr;
@@ -247,6 +246,7 @@ struct rpcrdma_mr {
 	struct scatterlist	*mr_sg;
 	int			mr_nents;
 	enum dma_data_direction	mr_dir;
+	struct ib_cqe		mr_cqe;
 	struct rpcrdma_frwr	frwr;
 	struct rpcrdma_xprt	*mr_xprt;
 	u32			mr_handle;



  parent reply	other threads:[~2021-04-19 18:04 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-19 18:01 [PATCH v3 00/26] NFS/RDMA client patches for next Chuck Lever
2021-04-19 18:01 ` [PATCH v3 01/26] SUNRPC: Move fault injection call sites Chuck Lever
2021-04-19 18:05   ` Chuck Lever III
2021-04-19 18:01 ` [PATCH v3 02/26] SUNRPC: Remove trace_xprt_transmit_queued Chuck Lever
2021-04-19 18:01 ` [PATCH v3 03/26] SUNRPC: Add tracepoint that fires when an RPC is retransmitted Chuck Lever
2021-04-19 18:02 ` [PATCH v3 04/26] xprtrdma: Avoid Receive Queue wrapping Chuck Lever
2021-04-19 18:02 ` [PATCH v3 05/26] xprtrdma: Do not refresh Receive Queue while it is draining Chuck Lever
2021-04-19 18:02 ` [PATCH v3 06/26] xprtrdma: Put flushed Receives on free list instead of destroying them Chuck Lever
2021-04-19 18:02 ` [PATCH v3 07/26] xprtrdma: Improve locking around rpcrdma_rep destruction Chuck Lever
2021-04-23 21:06   ` Trond Myklebust
2021-04-24 17:39     ` Chuck Lever III
2021-04-24 17:59       ` Trond Myklebust
2021-04-19 18:02 ` [PATCH v3 08/26] xprtrdma: Improve commentary around rpcrdma_reps_unmap() Chuck Lever
2021-04-19 18:02 ` [PATCH v3 09/26] xprtrdma: Improve locking around rpcrdma_rep creation Chuck Lever
2021-04-19 18:02 ` [PATCH v3 10/26] xprtrdma: Fix cwnd update ordering Chuck Lever
2021-04-19 18:02 ` [PATCH v3 11/26] xprtrdma: Delete rpcrdma_recv_buffer_put() Chuck Lever
2021-04-19 18:02 ` [PATCH v3 12/26] xprtrdma: rpcrdma_mr_pop() already does list_del_init() Chuck Lever
2021-04-19 18:03 ` [PATCH v3 13/26] xprtrdma: Rename frwr_release_mr() Chuck Lever
2021-04-19 18:03 ` [PATCH v3 14/26] xprtrdma: Clarify use of barrier in frwr_wc_localinv_done() Chuck Lever
2021-04-19 18:03 ` [PATCH v3 15/26] xprtrdma: Do not recycle MR after FastReg/LocalInv flushes Chuck Lever
2021-04-25 14:19   ` Dan Aloni
2021-04-25 16:21     ` Chuck Lever III
2021-04-25 17:00       ` Dan Aloni
2021-04-19 18:03 ` [PATCH v3 16/26] xprtrdma: Do not wake RPC consumer on a failed LocalInv Chuck Lever
2021-04-19 18:03 ` [PATCH v3 17/26] xprtrdma: Avoid Send Queue wrapping Chuck Lever
2021-04-19 18:03 ` [PATCH v3 18/26] xprtrdma: Add tracepoints showing FastReg WRs and remote invalidation Chuck Lever
2021-04-19 18:03 ` [PATCH v3 19/26] xprtrdma: Add an rpcrdma_mr_completion_class Chuck Lever
2021-04-19 18:03 ` [PATCH v3 20/26] xprtrdma: Don't display r_xprt memory addresses in tracepoints Chuck Lever
2021-04-19 18:03 ` [PATCH v3 21/26] xprtrdma: Remove the RPC/RDMA QP event handler Chuck Lever
2021-04-19 18:03 ` [PATCH v3 22/26] xprtrdma: Move fr_cid to struct rpcrdma_mr Chuck Lever
2021-04-19 18:04 ` Chuck Lever [this message]
2021-04-19 18:04 ` [PATCH v3 24/26] xprtrdma: Move fr_linv_done field " Chuck Lever
2021-04-19 18:04 ` [PATCH v3 25/26] xprtrdma: Move the Work Request union " Chuck Lever
2021-04-19 18:04 ` [PATCH v3 26/26] xprtrdma: Move fr_mr field " Chuck Lever

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=161885544311.38598.17973108446796585522.stgit@manet.1015granger.net \
    --to=chuck.lever@oracle.com \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=trondmy@hammerspace.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).