linux-nfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Chuck Lever <chuck.lever@oracle.com>
To: linux-nfs@vger.kernel.org, linux-rdma@vger.kernel.org
Subject: [PATCH v1 3/5] svcrdma: Convert rdma_stat_sq_starve to a per-CPU counter
Date: Thu, 14 Jan 2021 11:22:46 -0500	[thread overview]
Message-ID: <161064136651.6061.4240701868650517710.stgit@klimt.1015granger.net> (raw)
In-Reply-To: <161064114388.6061.6808790429789225779.stgit@klimt.1015granger.net>

Avoid the overhead of a memory bus lock cycle for counting a value
that is hardly every used.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 include/linux/sunrpc/svc_rdma.h       |    2 +-
 net/sunrpc/xprtrdma/svc_rdma.c        |   13 +++++++++----
 net/sunrpc/xprtrdma/svc_rdma_rw.c     |    1 +
 net/sunrpc/xprtrdma/svc_rdma_sendto.c |    2 +-
 4 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index ff32c59a27e7..c06b16ccf83e 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -69,7 +69,7 @@ extern unsigned int svcrdma_max_req_size;
 extern struct percpu_counter svcrdma_stat_recv;
 extern atomic_t rdma_stat_read;
 extern atomic_t rdma_stat_write;
-extern atomic_t rdma_stat_sq_starve;
+extern struct percpu_counter svcrdma_stat_sq_starve;
 extern atomic_t rdma_stat_rq_starve;
 extern atomic_t rdma_stat_rq_poll;
 extern atomic_t rdma_stat_rq_prod;
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 3e5e622bad81..ee768d4c3c90 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -66,7 +66,7 @@ static unsigned int max_max_inline = RPCRDMA_MAX_INLINE_THRESH;
 struct percpu_counter svcrdma_stat_recv;
 atomic_t rdma_stat_read;
 atomic_t rdma_stat_write;
-atomic_t rdma_stat_sq_starve;
+struct percpu_counter svcrdma_stat_sq_starve;
 atomic_t rdma_stat_rq_starve;
 atomic_t rdma_stat_rq_poll;
 atomic_t rdma_stat_rq_prod;
@@ -199,10 +199,10 @@ static struct ctl_table svcrdma_parm_table[] = {
 	},
 	{
 		.procname	= "rdma_stat_sq_starve",
-		.data		= &rdma_stat_sq_starve,
-		.maxlen		= sizeof(atomic_t),
+		.data		= &svcrdma_stat_sq_starve,
+		.maxlen		= SVCRDMA_COUNTER_BUFSIZ,
 		.mode		= 0644,
-		.proc_handler	= read_reset_stat,
+		.proc_handler	= svcrdma_counter_handler,
 	},
 	{
 		.procname	= "rdma_stat_rq_starve",
@@ -267,6 +267,7 @@ static void svc_rdma_proc_cleanup(void)
 	unregister_sysctl_table(svcrdma_table_header);
 	svcrdma_table_header = NULL;
 
+	percpu_counter_destroy(&svcrdma_stat_sq_starve);
 	percpu_counter_destroy(&svcrdma_stat_recv);
 }
 
@@ -278,6 +279,9 @@ static int svc_rdma_proc_init(void)
 		return 0;
 
 	rc = percpu_counter_init(&svcrdma_stat_recv, 0, GFP_KERNEL);
+	if (rc)
+		goto out_err;
+	rc = percpu_counter_init(&svcrdma_stat_sq_starve, 0, GFP_KERNEL);
 	if (rc)
 		goto out_err;
 
@@ -285,6 +289,7 @@ static int svc_rdma_proc_init(void)
 	return 0;
 
 out_err:
+	percpu_counter_destroy(&svcrdma_stat_recv);
 	return rc;
 }
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 0b63e1321d74..d7d98b2df00b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -364,6 +364,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
 			return 0;
 		}
 
+		percpu_counter_inc(&svcrdma_stat_sq_starve);
 		trace_svcrdma_sq_full(rdma);
 		atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
 		wait_event(rdma->sc_send_wait,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 68af79d4f04f..52c759a8543e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -317,7 +317,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
 	/* If the SQ is full, wait until an SQ entry is available */
 	while (1) {
 		if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
-			atomic_inc(&rdma_stat_sq_starve);
+			percpu_counter_inc(&svcrdma_stat_sq_starve);
 			trace_svcrdma_sq_full(rdma);
 			atomic_inc(&rdma->sc_sq_avail);
 			wait_event(rdma->sc_send_wait,



  parent reply	other threads:[~2021-01-14 16:23 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-14 16:22 [PATCH v1 0/5] Convert svcrdma stats to per-CPU counters Chuck Lever
2021-01-14 16:22 ` [PATCH v1 1/5] svcrdma: Refactor svc_rdma_init() and svc_rdma_clean_up() Chuck Lever
2021-01-14 16:22 ` [PATCH v1 2/5] svcrdma: Convert rdma_stat_recv to a per-CPU counter Chuck Lever
2021-01-14 16:22 ` Chuck Lever [this message]
2021-01-14 16:22 ` [PATCH v1 4/5] svcrdma: Restore read and write stats Chuck Lever
2021-01-14 16:22 ` [PATCH v1 5/5] svcrdma: Deprecate stat variables that are no longer used Chuck Lever

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=161064136651.6061.4240701868650517710.stgit@klimt.1015granger.net \
    --to=chuck.lever@oracle.com \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).