linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Chuck Lever <chuck.lever@oracle.com>
To: trondmy@hammerspace.com
Cc: linux-nfs@vger.kernel.org, linux-rdma@vger.kernel.org
Subject: [PATCH v3 15/26] xprtrdma: Do not recycle MR after FastReg/LocalInv flushes
Date: Mon, 19 Apr 2021 14:03:12 -0400	[thread overview]
Message-ID: <161885539285.38598.13978652738422395833.stgit@manet.1015granger.net> (raw)
In-Reply-To: <161885481568.38598.16682844600209775665.stgit@manet.1015granger.net>

Better not to touch MRs involved in a flush or post error until the
Send and Receive Queues are drained and the transport is fully
quiescent. Simply don't insert such MRs back onto the free list.
They remain on mr_all and will be released when the connection is
torn down.

I had thought that recycling would prevent hardware resources from
being tied up for a long time. However, since v5.7, a transport
disconnect destroys the QP and other hardware-owned resources. The
MRs get cleaned up nicely at that point.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 include/trace/events/rpcrdma.h |    1 -
 net/sunrpc/xprtrdma/frwr_ops.c |   69 ++++++++++------------------------------
 2 files changed, 17 insertions(+), 53 deletions(-)

diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index c838e7ac1c2d..e38e745d13b0 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -1014,7 +1014,6 @@ DEFINE_MR_EVENT(localinv);
 DEFINE_MR_EVENT(map);
 
 DEFINE_ANON_MR_EVENT(unmap);
-DEFINE_ANON_MR_EVENT(recycle);
 
 TRACE_EVENT(xprtrdma_dma_maperr,
 	TP_PROTO(
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index af85cec0ce31..27087dc8ba3c 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -49,6 +49,16 @@
 # define RPCDBG_FACILITY	RPCDBG_TRANS
 #endif
 
+static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
+{
+	if (mr->mr_device) {
+		trace_xprtrdma_mr_unmap(mr);
+		ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents,
+				mr->mr_dir);
+		mr->mr_device = NULL;
+	}
+}
+
 /**
  * frwr_mr_release - Destroy one MR
  * @mr: MR allocated by frwr_mr_init
@@ -58,6 +68,8 @@ void frwr_mr_release(struct rpcrdma_mr *mr)
 {
 	int rc;
 
+	frwr_mr_unmap(mr->mr_xprt, mr);
+
 	rc = ib_dereg_mr(mr->frwr.fr_mr);
 	if (rc)
 		trace_xprtrdma_frwr_dereg(mr, rc);
@@ -65,32 +77,6 @@ void frwr_mr_release(struct rpcrdma_mr *mr)
 	kfree(mr);
 }
 
-static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
-{
-	if (mr->mr_device) {
-		trace_xprtrdma_mr_unmap(mr);
-		ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents,
-				mr->mr_dir);
-		mr->mr_device = NULL;
-	}
-}
-
-static void frwr_mr_recycle(struct rpcrdma_mr *mr)
-{
-	struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
-
-	trace_xprtrdma_mr_recycle(mr);
-
-	frwr_mr_unmap(r_xprt, mr);
-
-	spin_lock(&r_xprt->rx_buf.rb_lock);
-	list_del(&mr->mr_all);
-	r_xprt->rx_stats.mrs_recycled++;
-	spin_unlock(&r_xprt->rx_buf.rb_lock);
-
-	frwr_mr_release(mr);
-}
-
 static void frwr_mr_put(struct rpcrdma_mr *mr)
 {
 	frwr_mr_unmap(mr->mr_xprt, mr);
@@ -365,6 +351,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
  * @cq: completion queue
  * @wc: WCE for a completed FastReg WR
  *
+ * Each flushed MR gets destroyed after the QP has drained.
  */
 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
 {
@@ -374,7 +361,6 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
 
 	/* WARNING: Only wr_cqe and status are reliable at this point */
 	trace_xprtrdma_wc_fastreg(wc, &frwr->fr_cid);
-	/* The MR will get recycled when the associated req is retransmitted */
 
 	rpcrdma_flush_disconnect(cq->cq_context, wc);
 }
@@ -448,9 +434,7 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
 
 static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
 {
-	if (wc->status != IB_WC_SUCCESS)
-		frwr_mr_recycle(mr);
-	else
+	if (likely(wc->status == IB_WC_SUCCESS))
 		frwr_mr_put(mr);
 }
 
@@ -567,17 +551,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 	if (!rc)
 		return;
 
-	/* Recycle MRs in the LOCAL_INV chain that did not get posted.
-	 */
+	/* On error, the MRs get destroyed once the QP has drained. */
 	trace_xprtrdma_post_linv_err(req, rc);
-	while (bad_wr) {
-		frwr = container_of(bad_wr, struct rpcrdma_frwr,
-				    fr_invwr);
-		mr = container_of(frwr, struct rpcrdma_mr, frwr);
-		bad_wr = bad_wr->next;
-
-		frwr_mr_recycle(mr);
-	}
 }
 
 /**
@@ -621,7 +596,6 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 {
 	struct ib_send_wr *first, *last, **prev;
 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
-	const struct ib_send_wr *bad_wr;
 	struct rpcrdma_frwr *frwr;
 	struct rpcrdma_mr *mr;
 	int rc;
@@ -663,21 +637,12 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 	 * replaces the QP. The RPC reply handler won't call us
 	 * unless re_id->qp is a valid pointer.
 	 */
-	bad_wr = NULL;
-	rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
+	rc = ib_post_send(ep->re_id->qp, first, NULL);
 	if (!rc)
 		return;
 
-	/* Recycle MRs in the LOCAL_INV chain that did not get posted.
-	 */
+	/* On error, the MRs get destroyed once the QP has drained. */
 	trace_xprtrdma_post_linv_err(req, rc);
-	while (bad_wr) {
-		frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
-		mr = container_of(frwr, struct rpcrdma_mr, frwr);
-		bad_wr = bad_wr->next;
-
-		frwr_mr_recycle(mr);
-	}
 
 	/* The final LOCAL_INV WR in the chain is supposed to
 	 * do the wake. If it was never posted, the wake will



  parent reply	other threads:[~2021-04-19 18:03 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-19 18:01 [PATCH v3 00/26] NFS/RDMA client patches for next Chuck Lever
2021-04-19 18:01 ` [PATCH v3 01/26] SUNRPC: Move fault injection call sites Chuck Lever
2021-04-19 18:05   ` Chuck Lever III
2021-04-19 18:01 ` [PATCH v3 02/26] SUNRPC: Remove trace_xprt_transmit_queued Chuck Lever
2021-04-19 18:01 ` [PATCH v3 03/26] SUNRPC: Add tracepoint that fires when an RPC is retransmitted Chuck Lever
2021-04-19 18:02 ` [PATCH v3 04/26] xprtrdma: Avoid Receive Queue wrapping Chuck Lever
2021-04-19 18:02 ` [PATCH v3 05/26] xprtrdma: Do not refresh Receive Queue while it is draining Chuck Lever
2021-04-19 18:02 ` [PATCH v3 06/26] xprtrdma: Put flushed Receives on free list instead of destroying them Chuck Lever
2021-04-19 18:02 ` [PATCH v3 07/26] xprtrdma: Improve locking around rpcrdma_rep destruction Chuck Lever
2021-04-23 21:06   ` Trond Myklebust
2021-04-24 17:39     ` Chuck Lever III
2021-04-24 17:59       ` Trond Myklebust
2021-04-19 18:02 ` [PATCH v3 08/26] xprtrdma: Improve commentary around rpcrdma_reps_unmap() Chuck Lever
2021-04-19 18:02 ` [PATCH v3 09/26] xprtrdma: Improve locking around rpcrdma_rep creation Chuck Lever
2021-04-19 18:02 ` [PATCH v3 10/26] xprtrdma: Fix cwnd update ordering Chuck Lever
2021-04-19 18:02 ` [PATCH v3 11/26] xprtrdma: Delete rpcrdma_recv_buffer_put() Chuck Lever
2021-04-19 18:02 ` [PATCH v3 12/26] xprtrdma: rpcrdma_mr_pop() already does list_del_init() Chuck Lever
2021-04-19 18:03 ` [PATCH v3 13/26] xprtrdma: Rename frwr_release_mr() Chuck Lever
2021-04-19 18:03 ` [PATCH v3 14/26] xprtrdma: Clarify use of barrier in frwr_wc_localinv_done() Chuck Lever
2021-04-19 18:03 ` Chuck Lever [this message]
2021-04-25 14:19   ` [PATCH v3 15/26] xprtrdma: Do not recycle MR after FastReg/LocalInv flushes Dan Aloni
2021-04-25 16:21     ` Chuck Lever III
2021-04-25 17:00       ` Dan Aloni
2021-04-19 18:03 ` [PATCH v3 16/26] xprtrdma: Do not wake RPC consumer on a failed LocalInv Chuck Lever
2021-04-19 18:03 ` [PATCH v3 17/26] xprtrdma: Avoid Send Queue wrapping Chuck Lever
2021-04-19 18:03 ` [PATCH v3 18/26] xprtrdma: Add tracepoints showing FastReg WRs and remote invalidation Chuck Lever
2021-04-19 18:03 ` [PATCH v3 19/26] xprtrdma: Add an rpcrdma_mr_completion_class Chuck Lever
2021-04-19 18:03 ` [PATCH v3 20/26] xprtrdma: Don't display r_xprt memory addresses in tracepoints Chuck Lever
2021-04-19 18:03 ` [PATCH v3 21/26] xprtrdma: Remove the RPC/RDMA QP event handler Chuck Lever
2021-04-19 18:03 ` [PATCH v3 22/26] xprtrdma: Move fr_cid to struct rpcrdma_mr Chuck Lever
2021-04-19 18:04 ` [PATCH v3 23/26] xprtrdma: Move cqe " Chuck Lever
2021-04-19 18:04 ` [PATCH v3 24/26] xprtrdma: Move fr_linv_done field " Chuck Lever
2021-04-19 18:04 ` [PATCH v3 25/26] xprtrdma: Move the Work Request union " Chuck Lever
2021-04-19 18:04 ` [PATCH v3 26/26] xprtrdma: Move fr_mr field " Chuck Lever

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=161885539285.38598.13978652738422395833.stgit@manet.1015granger.net \
    --to=chuck.lever@oracle.com \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=trondmy@hammerspace.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).