All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chuck Lever <chuck.lever@oracle.com>
To: bfields@fieldses.org
Cc: linux-nfs@vger.kernel.org, linux-rdma@vger.kernel.org
Subject: [PATCH v4 03/33] svcrdma: Clean up the tracing for rw_ctx_init errors
Date: Sat, 30 May 2020 09:28:20 -0400	[thread overview]
Message-ID: <20200530132820.10117.48859.stgit@klimt.1015granger.net> (raw)
In-Reply-To: <20200530131711.10117.74063.stgit@klimt.1015granger.net>

- De-duplicate code
- Rename the tracepoint with "_err" to allow enabling via glob
- Report the sg_cnt for the failing rw_ctx
- Fix a dumb signage issue

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 include/trace/events/rpcrdma.h    |   12 +++++---
 net/sunrpc/xprtrdma/svc_rdma_rw.c |   56 +++++++++++++++++++++++--------------
 2 files changed, 43 insertions(+), 25 deletions(-)

diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 132c3c778a43..f231975064cb 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -1583,28 +1583,32 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
 DEFINE_SVC_DMA_EVENT(dma_map_page);
 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
 
-TRACE_EVENT(svcrdma_dma_map_rwctx,
+TRACE_EVENT(svcrdma_dma_map_rw_err,
 	TP_PROTO(
 		const struct svcxprt_rdma *rdma,
+		unsigned int nents,
 		int status
 	),
 
-	TP_ARGS(rdma, status),
+	TP_ARGS(rdma, nents, status),
 
 	TP_STRUCT__entry(
 		__field(int, status)
+		__field(unsigned int, nents)
 		__string(device, rdma->sc_cm_id->device->name)
 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
 	),
 
 	TP_fast_assign(
 		__entry->status = status;
+		__entry->nents = nents;
 		__assign_str(device, rdma->sc_cm_id->device->name);
 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
 	),
 
-	TP_printk("addr=%s device=%s status=%d",
-		__get_str(addr), __get_str(device), __entry->status
+	TP_printk("addr=%s device=%s nents=%u status=%d",
+		__get_str(addr), __get_str(device), __entry->nents,
+		__entry->status
 	)
 );
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 23c2d3ce0dc9..db70709e165a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -39,7 +39,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
 struct svc_rdma_rw_ctxt {
 	struct list_head	rw_list;
 	struct rdma_rw_ctx	rw_ctx;
-	int			rw_nents;
+	unsigned int		rw_nents;
 	struct sg_table		rw_sg_table;
 	struct scatterlist	rw_first_sgl[];
 };
@@ -107,6 +107,34 @@ void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
 	}
 }
 
+/**
+ * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
+ * @rdma: controlling transport instance
+ * @ctxt: R/W context to prepare
+ * @offset: RDMA offset
+ * @handle: RDMA tag/handle
+ * @direction: I/O direction
+ *
+ * Returns on success, the number of WQEs that will be needed
+ * on the workqueue, or a negative errno.
+ */
+static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
+				struct svc_rdma_rw_ctxt *ctxt,
+				u64 offset, u32 handle,
+				enum dma_data_direction direction)
+{
+	int ret;
+
+	ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
+			       ctxt->rw_sg_table.sgl, ctxt->rw_nents,
+			       0, offset, handle, direction);
+	if (unlikely(ret < 0)) {
+		svc_rdma_put_rw_ctxt(rdma, ctxt);
+		trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
+	}
+	return ret;
+}
+
 /* A chunk context tracks all I/O for moving one Read or Write
  * chunk. This is a a set of rdma_rw's that handle data movement
  * for all segments of one chunk.
@@ -431,12 +459,10 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
 			goto out_noctx;
 
 		constructor(info, write_len, ctxt);
-		ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
-				       rdma->sc_port_num, ctxt->rw_sg_table.sgl,
-				       ctxt->rw_nents, 0, seg_offset,
-				       seg_handle, DMA_TO_DEVICE);
+		ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle,
+					   DMA_TO_DEVICE);
 		if (ret < 0)
-			goto out_initerr;
+			return -EIO;
 
 		trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset);
 
@@ -462,11 +488,6 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
 out_noctx:
 	dprintk("svcrdma: no R/W ctxs available\n");
 	return -ENOMEM;
-
-out_initerr:
-	svc_rdma_put_rw_ctxt(rdma, ctxt);
-	trace_svcrdma_dma_map_rwctx(rdma, ret);
-	return -EIO;
 }
 
 /* Send one of an xdr_buf's kvecs by itself. To send a Reply
@@ -646,12 +667,10 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
 			goto out_overrun;
 	}
 
-	ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
-			       cc->cc_rdma->sc_port_num,
-			       ctxt->rw_sg_table.sgl, ctxt->rw_nents,
-			       0, offset, rkey, DMA_FROM_DEVICE);
+	ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
+				   DMA_FROM_DEVICE);
 	if (ret < 0)
-		goto out_initerr;
+		return -EIO;
 
 	list_add(&ctxt->rw_list, &cc->cc_rwctxts);
 	cc->cc_sqecount += ret;
@@ -664,11 +683,6 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
 out_overrun:
 	dprintk("svcrdma: request overruns rq_pages\n");
 	return -EINVAL;
-
-out_initerr:
-	trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret);
-	svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
-	return -EIO;
 }
 
 /* Walk the segments in the Read chunk starting at @p and construct


  parent reply	other threads:[~2020-05-30 13:28 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-30 13:28 [PATCH v4 00/33] Possible NFSD patches for v5.8 Chuck Lever
2020-05-30 13:28 ` [PATCH v4 01/33] SUNRPC: Split the xdr_buf event class Chuck Lever
2020-05-30 13:28 ` [PATCH v4 02/33] SUNRPC: Move xpt_mutex into socket xpo_sendto methods Chuck Lever
2020-05-30 13:28 ` Chuck Lever [this message]
2020-05-30 13:28 ` [PATCH v4 04/33] svcrdma: Clean up handling of get_rw_ctx errors Chuck Lever
2020-05-30 13:28 ` [PATCH v4 05/33] svcrdma: Trace page overruns when constructing RDMA Reads Chuck Lever
2020-05-30 13:28 ` [PATCH v4 06/33] svcrdma: trace undersized Write chunks Chuck Lever
2020-05-30 13:28 ` [PATCH v4 07/33] svcrdma: Fix backchannel return code Chuck Lever
2020-05-30 13:28 ` [PATCH v4 08/33] svcrdma: Remove backchannel dprintk call sites Chuck Lever
2020-05-30 13:28 ` [PATCH v4 09/33] svcrdma: Rename tracepoints that record header decoding errors Chuck Lever
2020-05-30 13:28 ` [PATCH v4 10/33] svcrdma: Remove the SVCRDMA_DEBUG macro Chuck Lever
2020-05-30 13:29 ` [PATCH v4 11/33] svcrdma: Displayed remote IP address should match stored address Chuck Lever
2020-05-30 13:29 ` [PATCH v4 12/33] svcrdma: Add tracepoints to report ->xpo_accept failures Chuck Lever
2020-05-30 13:29 ` [PATCH v4 13/33] SUNRPC: Remove kernel memory address from svc_xprt tracepoints Chuck Lever
2020-05-30 13:29 ` [PATCH v4 14/33] SUNRPC: Tracepoint to record errors in svc_xpo_create() Chuck Lever
2020-05-30 13:29 ` [PATCH v4 15/33] SUNRPC: Trace a few more generic svc_xprt events Chuck Lever
2020-05-30 13:29 ` [PATCH v4 16/33] SUNRPC: Remove "#include <trace/events/skb.h>" Chuck Lever
2020-05-30 13:29 ` [PATCH v4 17/33] SUNRPC: Add more svcsock tracepoints Chuck Lever
2020-05-30 13:29 ` [PATCH v4 18/33] SUNRPC: Replace dprintk call sites in TCP state change callouts Chuck Lever
2020-05-30 13:29 ` [PATCH v4 19/33] SUNRPC: Trace server-side rpcbind registration events Chuck Lever
2020-05-30 13:29 ` [PATCH v4 20/33] SUNRPC: Rename svc_sock::sk_reclen Chuck Lever
2020-05-30 13:29 ` [PATCH v4 21/33] SUNRPC: Restructure svc_tcp_recv_record() Chuck Lever
2020-05-30 13:30 ` [PATCH v4 22/33] SUNRPC: Replace dprintk() call sites in TCP receive path Chuck Lever
2020-05-30 13:30 ` [PATCH v4 23/33] SUNRPC: Refactor recvfrom path dealing with incomplete TCP receives Chuck Lever
2020-05-30 13:30 ` [PATCH v4 24/33] SUNRPC: Clean up svc_release_skb() functions Chuck Lever
2020-05-30 13:30 ` [PATCH v4 25/33] SUNRPC: Refactor svc_recvfrom() Chuck Lever
2020-05-30 13:30 ` [PATCH v4 26/33] SUNRPC: Restructure svc_udp_recvfrom() Chuck Lever
2020-05-30 13:30 ` [PATCH v4 27/33] SUNRPC: svc_show_status() macro should have enum definitions Chuck Lever
2020-05-30 13:30 ` [PATCH v4 28/33] NFSD: Add tracepoints to NFSD's duplicate reply cache Chuck Lever
2020-05-30 13:30 ` [PATCH v4 29/33] NFSD: Add tracepoints to the NFSD state management code Chuck Lever
2020-05-30 13:30 ` [PATCH v4 30/33] NFSD: Add tracepoints for monitoring NFSD callbacks Chuck Lever
2020-05-30 13:30 ` [PATCH v4 31/33] SUNRPC: Clean up request deferral tracepoints Chuck Lever
2020-05-30 13:30 ` [PATCH v4 32/33] NFSD: Squash an annoying compiler warning Chuck Lever
2020-05-30 13:30 ` [PATCH v4 33/33] NFSD: Fix improperly-formatted Doxygen comments Chuck Lever
2020-06-01 14:38 ` [PATCH v4 00/33] Possible NFSD patches for v5.8 J. Bruce Fields

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200530132820.10117.48859.stgit@klimt.1015granger.net \
    --to=chuck.lever@oracle.com \
    --cc=bfields@fieldses.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.