All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chuck Lever <chuck.lever@oracle.com>
To: bfields@fieldses.org
Cc: linux-rdma@vger.kernel.org, linux-nfs@vger.kernel.org
Subject: [PATCH v1 07/19] svcrdma: Remove sc_rq_depth
Date: Mon, 07 May 2018 15:27:27 -0400	[thread overview]
Message-ID: <20180507192726.4608.25691.stgit@klimt.1015granger.net> (raw)
In-Reply-To: <20180507192126.4608.63295.stgit@klimt.1015granger.net>

Clean up: No need to retain rq_depth in struct svcrdma_xprt, it is
used only in svc_rdma_accept().

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 include/linux/sunrpc/svc_rdma.h          |    1 -
 net/sunrpc/xprtrdma/svc_rdma_transport.c |   17 ++++++++---------
 2 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 37f759d..3cb6631 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -101,7 +101,6 @@ struct svcxprt_rdma {
 
 	atomic_t             sc_sq_avail;	/* SQEs ready to be consumed */
 	unsigned int	     sc_sq_depth;	/* Depth of SQ */
-	unsigned int	     sc_rq_depth;	/* Depth of RQ */
 	__be32		     sc_fc_credits;	/* Forward credits */
 	u32		     sc_max_requests;	/* Max requests */
 	u32		     sc_max_bc_requests;/* Backward credits */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index afd5e61..20abd3a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -589,9 +589,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 	struct rdma_conn_param conn_param;
 	struct rpcrdma_connect_private pmsg;
 	struct ib_qp_init_attr qp_attr;
+	unsigned int ctxts, rq_depth;
 	struct ib_device *dev;
 	struct sockaddr *sap;
-	unsigned int ctxts;
 	int ret = 0;
 
 	listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
@@ -622,19 +622,18 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 	newxprt->sc_max_req_size = svcrdma_max_req_size;
 	newxprt->sc_max_requests = svcrdma_max_requests;
 	newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
-	newxprt->sc_rq_depth = newxprt->sc_max_requests +
-			       newxprt->sc_max_bc_requests;
-	if (newxprt->sc_rq_depth > dev->attrs.max_qp_wr) {
+	rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests;
+	if (rq_depth > dev->attrs.max_qp_wr) {
 		pr_warn("svcrdma: reducing receive depth to %d\n",
 			dev->attrs.max_qp_wr);
-		newxprt->sc_rq_depth = dev->attrs.max_qp_wr;
-		newxprt->sc_max_requests = newxprt->sc_rq_depth - 2;
+		rq_depth = dev->attrs.max_qp_wr;
+		newxprt->sc_max_requests = rq_depth - 2;
 		newxprt->sc_max_bc_requests = 2;
 	}
 	newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
 	ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES);
 	ctxts *= newxprt->sc_max_requests;
-	newxprt->sc_sq_depth = newxprt->sc_rq_depth + ctxts;
+	newxprt->sc_sq_depth = rq_depth + ctxts;
 	if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) {
 		pr_warn("svcrdma: reducing send depth to %d\n",
 			dev->attrs.max_qp_wr);
@@ -656,7 +655,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 		dprintk("svcrdma: error creating SQ CQ for connect request\n");
 		goto errout;
 	}
-	newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth,
+	newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth,
 					0, IB_POLL_WORKQUEUE);
 	if (IS_ERR(newxprt->sc_rq_cq)) {
 		dprintk("svcrdma: error creating RQ CQ for connect request\n");
@@ -669,7 +668,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 	qp_attr.port_num = newxprt->sc_port_num;
 	qp_attr.cap.max_rdma_ctxs = ctxts;
 	qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts;
-	qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
+	qp_attr.cap.max_recv_wr = rq_depth;
 	qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
 	qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;


  parent reply	other threads:[~2018-05-07 19:27 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-07 19:26 [PATCH v1 00/19] NFS/RDMA server for-next Chuck Lever
2018-05-07 19:26 ` [PATCH v1 01/19] svcrdma: Add proper SPDX tags for NetApp-contributed source Chuck Lever
2018-05-09 20:23   ` J. Bruce Fields
2018-05-09 20:42     ` Chuck Lever
2018-05-15 14:52       ` Doug Ledford
2018-05-07 19:27 ` [PATCH v1 02/19] svcrdma: Use passed-in net namespace when creating RDMA listener Chuck Lever
2018-05-07 19:27 ` [PATCH v1 03/19] xprtrdma: Prepare RPC/RDMA includes for server-side trace points Chuck Lever
2018-05-07 19:27 ` [PATCH v1 04/19] svcrdma: Trace key RPC/RDMA protocol events Chuck Lever
2018-05-07 19:27 ` [PATCH v1 05/19] svcrdma: Trace key RDMA API events Chuck Lever
2018-05-07 19:27 ` [PATCH v1 06/19] svcrdma: Introduce svc_rdma_recv_ctxt Chuck Lever
2018-05-09 20:48   ` J. Bruce Fields
2018-05-09 21:02     ` Chuck Lever
2018-05-07 19:27 ` Chuck Lever [this message]
2018-05-07 19:27 ` [PATCH v1 08/19] svcrdma: Simplify svc_rdma_recv_ctxt_put Chuck Lever
2018-05-07 19:27 ` [PATCH v1 09/19] svcrdma: Preserve Receive buffer until svc_rdma_sendto Chuck Lever
2018-05-09 21:03   ` J. Bruce Fields
2018-05-07 19:27 ` [PATCH v1 10/19] svcrdma: Persistently allocate and DMA-map Receive buffers Chuck Lever
2018-05-09 21:18   ` J. Bruce Fields
2018-05-09 21:31     ` Chuck Lever
2018-05-09 21:37       ` Bruce Fields
2018-05-07 19:27 ` [PATCH v1 11/19] svcrdma: Allocate recv_ctxt's on CPU handling Receives Chuck Lever
2018-05-07 19:27 ` [PATCH v1 12/19] svcrdma: Refactor svc_rdma_dma_map_buf Chuck Lever
2018-05-07 19:27 ` [PATCH v1 13/19] svcrdma: Clean up Send SGE accounting Chuck Lever
2018-05-07 19:28 ` [PATCH v1 14/19] svcrdma: Introduce svc_rdma_send_ctxt Chuck Lever
2018-05-07 19:28 ` [PATCH v1 15/19] svcrdma: Don't overrun the SGE array in svc_rdma_send_ctxt Chuck Lever
2018-05-07 19:28 ` [PATCH v1 16/19] svcrdma: Remove post_send_wr Chuck Lever
2018-05-07 19:28 ` [PATCH v1 17/19] svcrdma: Simplify svc_rdma_send() Chuck Lever
2018-05-07 19:28 ` [PATCH v1 18/19] svcrdma: Persistently allocate and DMA-map Send buffers Chuck Lever
2018-05-07 19:28 ` [PATCH v1 19/19] svcrdma: Remove unused svc_rdma_op_ctxt Chuck Lever

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180507192726.4608.25691.stgit@klimt.1015granger.net \
    --to=chuck.lever@oracle.com \
    --cc=bfields@fieldses.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.