All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chuck Lever <chuck.lever@oracle.com>
To: linux-rdma@vger.kernel.org, linux-nfs@vger.kernel.org
Subject: [PATCH v1 06/22] xprtrdma: Refactor chunktype handling
Date: Mon, 10 Sep 2018 11:09:27 -0400	[thread overview]
Message-ID: <20180910150927.10564.93847.stgit@manet.1015granger.net> (raw)
In-Reply-To: <20180910150040.10564.97487.stgit@manet.1015granger.net>

This is a pre-requisite for adding support for sending both a Write
chunk and a Reply chunk in the same RPC. It is a refactoring/clean
up patch, so no functional change is expected.

rpcrdma_convert_iovs is a helper function that is invoked by each
chunk encoder. It's passed an enum rpcrdma_chunktype variable that
indicates either the set of chunks needed for the Call arguments or
the Reply results.

While marshaling the same RPC, rpcrdma_convert_iovs is invoked once
for the Write chunk and once for the Reply chunk. But it's passed
the same @type both times. Thus it can't distinguish between when
it's called to encode the Write chunk versus when it's called to
encode the Reply chunk.

Let's refactor rpcrdma_convert_iovs so it needs just a detail about
how the chunk should be handled. That detail is different for Write
chunks and Reply chunks.

Looking at this change I realized that "wtype" is unhelpfully named
in the entire marshaling code path, since it will include both the
Write chunk list and the Reply chunk. As part of refactoring
rpcrdma_convert_iovs, rename wtype to better fit its purpose. Update
"rtype" as well to match this convention.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 include/trace/events/rpcrdma.h  |   18 ++++---
 net/sunrpc/xprtrdma/rpc_rdma.c  |  100 +++++++++++++++++++++------------------
 net/sunrpc/xprtrdma/xprt_rdma.h |    2 -
 3 files changed, 63 insertions(+), 57 deletions(-)

diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 89e017b..b9e6802 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -459,11 +459,11 @@
 	TP_PROTO(
 		const struct rpc_rqst *rqst,
 		unsigned int hdrlen,
-		unsigned int rtype,
-		unsigned int wtype
+		unsigned int argtype,
+		unsigned int restype
 	),
 
-	TP_ARGS(rqst, hdrlen, rtype, wtype),
+	TP_ARGS(rqst, hdrlen, argtype, restype),
 
 	TP_STRUCT__entry(
 		__field(unsigned int, task_id)
@@ -473,8 +473,8 @@
 		__field(unsigned int, headlen)
 		__field(unsigned int, pagelen)
 		__field(unsigned int, taillen)
-		__field(unsigned int, rtype)
-		__field(unsigned int, wtype)
+		__field(unsigned int, argtype)
+		__field(unsigned int, restype)
 	),
 
 	TP_fast_assign(
@@ -485,16 +485,16 @@
 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
 		__entry->pagelen = rqst->rq_snd_buf.page_len;
 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
-		__entry->rtype = rtype;
-		__entry->wtype = wtype;
+		__entry->argtype = argtype;
+		__entry->restype = restype;
 	),
 
 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
 		__entry->task_id, __entry->client_id, __entry->xid,
 		__entry->hdrlen,
 		__entry->headlen, __entry->pagelen, __entry->taillen,
-		xprtrdma_show_chunktype(__entry->rtype),
-		xprtrdma_show_chunktype(__entry->wtype)
+		xprtrdma_show_chunktype(__entry->argtype),
+		xprtrdma_show_chunktype(__entry->restype)
 	)
 );
 
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 39996df..26640e6 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -200,11 +200,10 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
  *
  * Returns positive number of SGEs consumed, or a negative errno.
  */
-
 static int
 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
-		     unsigned int pos, enum rpcrdma_chunktype type,
-		     struct rpcrdma_mr_seg *seg)
+		     unsigned int pos, struct rpcrdma_mr_seg *seg,
+		     bool omit_xdr_pad)
 {
 	unsigned long page_base;
 	unsigned int len, n;
@@ -236,18 +235,10 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 		page_base = 0;
 	}
 
-	/* When encoding a Read chunk, the tail iovec contains an
-	 * XDR pad and may be omitted.
-	 */
-	if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
-		goto out;
-
-	/* When encoding a Write chunk, some servers need to see an
-	 * extra segment for non-XDR-aligned Write chunks. The upper
-	 * layer provides space in the tail iovec that may be used
-	 * for this purpose.
+	/* A normal Read or Write chunk may omit the XDR pad.
+	 * The upper layer provides the pad in @xdrbuf's tail.
 	 */
-	if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
+	if (omit_xdr_pad)
 		goto out;
 
 	if (xdrbuf->tail[0].iov_len)
@@ -338,23 +329,31 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
  */
 static noinline int
 rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
-			 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
+			 struct rpc_rqst *rqst, enum rpcrdma_chunktype argtype)
 {
 	struct xdr_stream *xdr = &req->rl_stream;
 	struct rpcrdma_mr_seg *seg;
 	struct rpcrdma_mr *mr;
+	bool omit_xdr_pad;
 	unsigned int pos;
 	int nsegs;
 
-	if (rtype == rpcrdma_noch)
+	switch (argtype) {
+	case rpcrdma_readch:
+		omit_xdr_pad = r_xprt->rx_ia.ri_implicit_roundup;
+		pos = rqst->rq_snd_buf.head[0].iov_len;
+		break;
+	case rpcrdma_areadch:
+		omit_xdr_pad = false;
+		pos = 0;
+		break;
+	default:
 		goto done;
+	}
 
-	pos = rqst->rq_snd_buf.head[0].iov_len;
-	if (rtype == rpcrdma_areadch)
-		pos = 0;
 	seg = req->rl_segments;
-	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
-				     rtype, seg);
+	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, seg,
+				     omit_xdr_pad);
 	if (nsegs < 0)
 		return nsegs;
 
@@ -394,7 +393,7 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
  */
 static noinline int
 rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
-			  struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
+			  struct rpc_rqst *rqst, enum rpcrdma_chunktype restype)
 {
 	struct xdr_stream *xdr = &req->rl_stream;
 	struct rpcrdma_mr_seg *seg;
@@ -402,13 +401,18 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 	int nsegs, nchunks;
 	__be32 *segcount;
 
-	if (wtype != rpcrdma_writech)
+	if (restype != rpcrdma_writech)
 		goto done;
 
+	/* When encoding a Write chunk, some servers need to see an
+	 * extra segment for non-XDR-aligned Write chunks. The upper
+	 * layer provides space in the tail iovec that may be used
+	 * for this purpose.
+	 */
 	seg = req->rl_segments;
 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
-				     rqst->rq_rcv_buf.head[0].iov_len,
-				     wtype, seg);
+				     rqst->rq_rcv_buf.head[0].iov_len, seg,
+				     r_xprt->rx_ia.ri_implicit_roundup);
 	if (nsegs < 0)
 		return nsegs;
 
@@ -457,8 +461,9 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
  * @xdr is advanced to the next position in the stream.
  */
 static noinline int
-rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
-			   struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
+rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
+			   struct rpcrdma_req *req, struct rpc_rqst *rqst,
+			   enum rpcrdma_chunktype restype)
 {
 	struct xdr_stream *xdr = &req->rl_stream;
 	struct rpcrdma_mr_seg *seg;
@@ -466,11 +471,11 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 	int nsegs, nchunks;
 	__be32 *segcount;
 
-	if (wtype != rpcrdma_replych)
+	if (restype != rpcrdma_replych)
 		return encode_item_not_present(xdr);
 
 	seg = req->rl_segments;
-	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
+	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, seg, false);
 	if (nsegs < 0)
 		return nsegs;
 
@@ -563,7 +568,7 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
  */
 static bool
 rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
-			 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
+			 struct xdr_buf *xdr, enum rpcrdma_chunktype argtype)
 {
 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
 	unsigned int sge_no, page_base, len, remaining;
@@ -591,7 +596,7 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 	 * well as additional content, and may not reside in the
 	 * same page as the head iovec.
 	 */
-	if (rtype == rpcrdma_readch) {
+	if (argtype == rpcrdma_readch) {
 		len = xdr->tail[0].iov_len;
 
 		/* Do not include the tail if it is only an XDR pad */
@@ -688,14 +693,14 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
  * @req: context of RPC Call being marshalled
  * @hdrlen: size of transport header, in bytes
  * @xdr: xdr_buf containing RPC Call
- * @rtype: chunk type being encoded
+ * @argtype: chunk type being encoded
  *
  * Returns 0 on success; otherwise a negative errno is returned.
  */
 int
 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
 			  struct rpcrdma_req *req, u32 hdrlen,
-			  struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
+			  struct xdr_buf *xdr, enum rpcrdma_chunktype argtype)
 {
 	req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
 	if (!req->rl_sendctx)
@@ -708,8 +713,9 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 	if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
 		return -EIO;
 
-	if (rtype != rpcrdma_areadch)
-		if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
+	if (argtype != rpcrdma_areadch)
+		if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req,
+					      xdr, argtype))
 			return -EIO;
 
 	return 0;
@@ -739,7 +745,7 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 {
 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 	struct xdr_stream *xdr = &req->rl_stream;
-	enum rpcrdma_chunktype rtype, wtype;
+	enum rpcrdma_chunktype argtype, restype;
 	bool ddp_allowed;
 	__be32 *p;
 	int ret;
@@ -774,11 +780,11 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 	 * o Large non-read ops return as a single reply chunk.
 	 */
 	if (rpcrdma_results_inline(r_xprt, rqst))
-		wtype = rpcrdma_noch;
+		restype = rpcrdma_noch;
 	else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
-		wtype = rpcrdma_writech;
+		restype = rpcrdma_writech;
 	else
-		wtype = rpcrdma_replych;
+		restype = rpcrdma_replych;
 
 	/*
 	 * Chunks needed for arguments?
@@ -796,14 +802,14 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 	 */
 	if (rpcrdma_args_inline(r_xprt, rqst)) {
 		*p++ = rdma_msg;
-		rtype = rpcrdma_noch;
+		argtype = rpcrdma_noch;
 	} else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
 		*p++ = rdma_msg;
-		rtype = rpcrdma_readch;
+		argtype = rpcrdma_readch;
 	} else {
 		r_xprt->rx_stats.nomsg_call_count++;
 		*p++ = rdma_nomsg;
-		rtype = rpcrdma_areadch;
+		argtype = rpcrdma_areadch;
 	}
 
 	/* If this is a retransmit, discard previously registered
@@ -839,19 +845,19 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 	 * send a Call message with a Position Zero Read chunk and a
 	 * regular Read chunk at the same time.
 	 */
-	ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
+	ret = rpcrdma_encode_read_list(r_xprt, req, rqst, argtype);
 	if (ret)
 		goto out_err;
-	ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
+	ret = rpcrdma_encode_write_list(r_xprt, req, rqst, restype);
 	if (ret)
 		goto out_err;
-	ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
+	ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, restype);
 	if (ret)
 		goto out_err;
-	trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
+	trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), argtype, restype);
 
 	ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
-					&rqst->rq_snd_buf, rtype);
+					&rqst->rq_snd_buf, argtype);
 	if (ret)
 		goto out_err;
 	return 0;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index eae2166..d29bf38 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -633,7 +633,7 @@ enum rpcrdma_chunktype {
 int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
 			      struct rpcrdma_req *req, u32 hdrlen,
 			      struct xdr_buf *xdr,
-			      enum rpcrdma_chunktype rtype);
+			      enum rpcrdma_chunktype argtype);
 void rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc);
 int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);

  parent reply	other threads:[~2018-09-10 20:04 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-10 15:08 [PATCH v1 00/22] NFS/RDMA client patches for v4.20 Chuck Lever
2018-09-10 15:09 ` [PATCH v1 01/22] xprtrdma: Reset credit grant properly after a disconnect Chuck Lever
2018-09-10 15:09 ` [PATCH v1 02/22] xprtrdma: Create more MRs at a time Chuck Lever
2018-09-10 15:09 ` [PATCH v1 03/22] xprtrdma: Explicitly resetting MRs is no longer necessary Chuck Lever
2018-09-10 15:09 ` [PATCH v1 04/22] xprtrdma: Name MR trace events consistently Chuck Lever
2018-09-10 15:09 ` [PATCH v1 05/22] xprtrdma: Refactor chunk encoding Chuck Lever
2018-09-10 15:09 ` Chuck Lever [this message]
2018-09-10 15:09 ` [PATCH v1 07/22] xprtrdma: Support Write+Reply Replies Chuck Lever
2018-09-10 15:09 ` [PATCH v1 08/22] sunrpc: Fix connect metrics Chuck Lever
2018-09-12 18:41   ` Anna Schumaker
2018-09-10 15:09 ` [PATCH v1 09/22] sunrpc: Report connect_time in seconds Chuck Lever
2018-09-10 15:09 ` [PATCH v1 10/22] xprtrdma: Rename rpcrdma_conn_upcall Chuck Lever
2018-09-10 15:09 ` [PATCH v1 11/22] xprtrdma: Conventional variable names in rpcrdma_conn_upcall Chuck Lever
2018-09-10 15:09 ` [PATCH v1 12/22] xprtrdma: Eliminate "connstate" variable from rpcrdma_conn_upcall() Chuck Lever
2018-09-10 15:10 ` [PATCH v1 13/22] xprtrdma: Re-organize the switch() in rpcrdma_conn_upcall Chuck Lever
2018-09-10 15:10 ` [PATCH v1 14/22] xprtrdma: Simplify RPC wake-ups on connect Chuck Lever
2018-09-10 15:10 ` [PATCH v1 15/22] xprtrdma: Rename rpcrdma_qp_async_error_upcall Chuck Lever
2018-09-10 15:10 ` [PATCH v1 16/22] xprtrdma: Remove memory address of "ep" from an error message Chuck Lever
2018-09-10 15:10 ` [PATCH v1 17/22] svcrdma: Don't disable BH's in backchannel Chuck Lever
2018-09-10 15:10 ` [PATCH v1 18/22] xprtrdma: Move rb_flags initialization Chuck Lever
2018-09-10 15:10 ` [PATCH v1 19/22] xprtrdma: Report when there were zero posted Receives Chuck Lever
2018-09-10 15:10 ` [PATCH v1 20/22] xprtrdma: Add documenting comments Chuck Lever
2018-09-10 15:10 ` [PATCH v1 21/22] xprtrdma: Clean up xprt_rdma_disconnect_inject Chuck Lever
2018-09-10 15:10 ` [PATCH v1 22/22] xprtrdma: Squelch a sparse warning Chuck Lever

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180910150927.10564.93847.stgit@manet.1015granger.net \
    --to=chuck.lever@oracle.com \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.