linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
To: sagi@grimberg.me, hch@lst.de
Cc: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>,
	linux-nvme@lists.infradead.org
Subject: [RFC PATCH 2/2] nvmet: add file-ns polling support
Date: Mon,  9 Dec 2019 22:25:57 -0800	[thread overview]
Message-ID: <20191210062557.5171-3-chaitanya.kulkarni@wdc.com> (raw)
In-Reply-To: <20191210062557.5171-1-chaitanya.kulkarni@wdc.com>

This patch adds polling support for file-ns. We only enable polling
based on underlying fs support and use_poll attr. By default we don't
poll on any operation we poll on the request with Reqd/write when we
receive -EIOCBQUEUED i.e. for direct I/O.

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
 drivers/nvme/target/core.c        |  4 +++
 drivers/nvme/target/io-cmd-file.c | 60 +++++++++++++++++++++++++++----
 drivers/nvme/target/nvmet.h       |  9 +++--
 3 files changed, 63 insertions(+), 10 deletions(-)

diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index d8f9130d1cd1..cd2f5c6f896e 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -514,12 +514,16 @@ inline void nvmet_req_done(struct nvmet_req *req)
 {
 	if (req->ns->bdev)
 		nvmet_bdev_req_complete(req);
+	if (req->ns->file)
+		nvmet_file_req_complete(req);
 }
 
 inline void nvmet_req_poll_complete(struct nvmet_req *req)
 {
 	if (req->ns->bdev)
 		nvmet_bdev_poll_complete(req);
+	if (req->ns->file)
+		nvmet_file_poll_complete(req);
 }
 
 int nvmet_ns_enable(struct nvmet_ns *ns)
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index caebfce06605..1f49a02fd437 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -16,6 +16,7 @@
 void nvmet_file_ns_disable(struct nvmet_ns *ns)
 {
 	if (ns->file) {
+		ns->poll ? nvmet_ns_stop_poll(ns) : 0;
 		if (ns->buffered_io)
 			flush_workqueue(buffered_io_wq);
 		mempool_destroy(ns->bvec_pool);
@@ -72,6 +73,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
 		goto err;
 	}
 
+	ns->poll = ns->use_poll && ns->file->f_op->iopoll;
+	ret = ns->poll ? nvmet_ns_start_poll(ns) : 0;
+	if (ret)
+		goto err;
+
 	return ret;
 err:
 	ns->size = 0;
@@ -114,9 +120,8 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
 	return call_iter(iocb, &iter);
 }
 
-static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
+void nvmet_file_req_complete(struct nvmet_req *req)
 {
-	struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
 	u16 status = NVME_SC_SUCCESS;
 
 	if (req->f.bvec != req->inline_bvec) {
@@ -126,13 +131,39 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
 			mempool_free(req->f.bvec, req->ns->bvec_pool);
 	}
 
-	if (unlikely(ret != req->transfer_len))
-		status = errno_to_nvme_status(req, ret);
+	if (unlikely(req->f.iosize != req->transfer_len))
+		status = errno_to_nvme_status(req, req->f.iosize);
 	nvmet_req_complete(req, status);
 }
 
+static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
+{
+	struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
+
+	req->f.iosize = ret;
+	req->poll ? complete(&req->wait) : nvmet_file_req_complete(req);
+}
+
+void nvmet_file_poll_complete(struct nvmet_req *req)
+{
+	while (!completion_done(&req->wait)) {
+		int ret = req->f.iocb.ki_filp->f_op->iopoll(&req->f.iocb, true);
+
+		if (ret < 0)
+			pr_err("tid %d poll error %d", req->t->id, ret);
+	}
+	/*
+	 * We are out of the lock anyway, by completing the polled request here
+	 * we reduce lock contention and decrease the size of done list which
+	 * reduces the size of list_lock. This allows nvmet_file_execute_rw()
+	 * to make progress as and when we scheduled out.
+	 */
+	nvmet_file_req_complete(req);
+}
+
 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 {
+	struct kiocb *iocb = &req->f.iocb;
 	ssize_t nr_bvec = req->sg_cnt;
 	unsigned long bv_cnt = 0;
 	bool is_sync = false;
@@ -151,7 +182,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 		return true;
 	}
 
-	memset(&req->f.iocb, 0, sizeof(struct kiocb));
+	memset(iocb, 0, sizeof(struct kiocb));
 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
 		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
 		len += req->f.bvec[bv_cnt].bv_len;
@@ -187,13 +218,20 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 	 * A NULL ki_complete ask for synchronous execution, which we want
 	 * for the IOCB_NOWAIT case.
 	 */
-	if (!(ki_flags & IOCB_NOWAIT))
-		req->f.iocb.ki_complete = nvmet_file_io_done;
+	if (!(ki_flags & IOCB_NOWAIT)) {
+		iocb->ki_complete = nvmet_file_io_done;
+		ki_flags |= req->ns->poll ? IOCB_HIPRI : 0;
+	}
 
 	ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
 
 	switch (ret) {
 	case -EIOCBQUEUED:
+		if (req->ns->poll) {
+			req->poll = true;
+			nvmet_req_prep_poll(req);
+			nvmet_req_issue_poll(req);
+		}
 		return true;
 	case -EAGAIN:
 		if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
@@ -211,6 +249,10 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 	}
 
 complete:
+	/*
+	 * If we are here, then I/O is synchronously completed and ret holds
+	 * number of bytes transferred.
+	 */
 	nvmet_file_io_done(&req->f.iocb, ret, 0);
 	return true;
 }
@@ -379,15 +421,19 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
 	switch (cmd->common.opcode) {
 	case nvme_cmd_read:
 	case nvme_cmd_write:
+		req->poll = false;
 		req->execute = nvmet_file_execute_rw;
 		return 0;
 	case nvme_cmd_flush:
+		req->poll = false;
 		req->execute = nvmet_file_execute_flush;
 		return 0;
 	case nvme_cmd_dsm:
+		req->poll = false;
 		req->execute = nvmet_file_execute_dsm;
 		return 0;
 	case nvme_cmd_write_zeroes:
+		req->poll = false;
 		req->execute = nvmet_file_execute_write_zeroes;
 		return 0;
 	default:
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index ef2919e23e0b..e7e0e0de705e 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -317,9 +317,10 @@ struct nvmet_req {
 		} b;
 		struct {
 			bool			mpool_alloc;
-			struct kiocb            iocb;
-			struct bio_vec          *bvec;
-			struct work_struct      work;
+			struct kiocb		iocb;
+			struct bio_vec		*bvec;
+			struct work_struct	work;
+			long			iosize;
 		} f;
 	};
 	int			sg_cnt;
@@ -469,6 +470,8 @@ void nvmet_req_issue_poll(struct nvmet_req *req);
 void nvmet_req_poll_complete(struct nvmet_req *req);
 void nvmet_bdev_poll_complete(struct nvmet_req *req);
 void nvmet_bdev_req_complete(struct nvmet_req *req);
+void nvmet_file_poll_complete(struct nvmet_req *req);
+void nvmet_file_req_complete(struct nvmet_req *req);
 void nvmet_req_done(struct nvmet_req *req);
 
 #define NVMET_QUEUE_SIZE	1024
-- 
2.22.1


_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  parent reply	other threads:[~2019-12-10  6:26 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-10  6:25 [RFC PATCH 0/2] nvmet: add polling support Chaitanya Kulkarni
2019-12-10  6:25 ` [RFC PATCH 1/2] nvmet: add bdev-ns " Chaitanya Kulkarni
2020-01-20 12:52   ` Max Gurtovoy
2020-01-21 19:22     ` Chaitanya Kulkarni
2020-01-23 14:23       ` Max Gurtovoy
2020-01-30 18:19         ` Chaitanya Kulkarni
2019-12-10  6:25 ` Chaitanya Kulkarni [this message]
2019-12-12  1:01 ` [RFC PATCH 0/2] nvmet: add " Sagi Grimberg
2019-12-12  5:44   ` Chaitanya Kulkarni
2019-12-12 20:32     ` Sagi Grimberg
2020-01-20  5:13       ` Chaitanya Kulkarni
2020-01-20  4:48   ` Chaitanya Kulkarni

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191210062557.5171-3-chaitanya.kulkarni@wdc.com \
    --to=chaitanya.kulkarni@wdc.com \
    --cc=hch@lst.de \
    --cc=linux-nvme@lists.infradead.org \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).