All of lore.kernel.org
 help / color / mirror / Atom feed
From: Keith Busch <kbusch@meta.com>
To: <linux-block@vger.kernel.org>, <axboe@kernel.dk>,
	<linux-nvme@lists.infradead.org>, <hch@lst.de>,
	<sagi@grimberg.me>
Cc: Keith Busch <kbusch@kernel.org>
Subject: [PATCH 2/3] nvme: add polling options for loop target
Date: Tue, 21 Mar 2023 17:23:49 -0700	[thread overview]
Message-ID: <20230322002350.4038048-3-kbusch@meta.com> (raw)
In-Reply-To: <20230322002350.4038048-1-kbusch@meta.com>

From: Keith Busch <kbusch@kernel.org>

This is for mostly for testing purposes.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 drivers/nvme/target/loop.c | 63 +++++++++++++++++++++++++++++++++++---
 1 file changed, 58 insertions(+), 5 deletions(-)

diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index f2d24b2d992f8..0587ead60b09e 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -22,6 +22,7 @@ struct nvme_loop_iod {
 	struct nvmet_req	req;
 	struct nvme_loop_queue	*queue;
 	struct work_struct	work;
+	struct work_struct	poll;
 	struct sg_table		sg_table;
 	struct scatterlist	first_sgl[];
 };
@@ -37,6 +38,7 @@ struct nvme_loop_ctrl {
 	struct nvme_ctrl	ctrl;
 
 	struct nvmet_port	*port;
+	u32			io_queues[HCTX_MAX_TYPES];
 };
 
 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
@@ -76,7 +78,11 @@ static void nvme_loop_complete_rq(struct request *req)
 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
 
 	sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
-	nvme_complete_rq(req);
+
+	if (req->mq_hctx->type != HCTX_TYPE_POLL || !in_interrupt())
+		nvme_complete_rq(req);
+	else
+		queue_work(nvmet_wq, &iod->poll);
 }
 
 static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
@@ -120,6 +126,15 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
 	}
 }
 
+static void nvme_loop_poll_work(struct work_struct *work)
+{
+	struct nvme_loop_iod *iod =
+		container_of(work, struct nvme_loop_iod, poll);
+	struct request *req = blk_mq_rq_from_pdu(iod);
+
+	nvme_complete_rq(req);
+}
+
 static void nvme_loop_execute_work(struct work_struct *work)
 {
 	struct nvme_loop_iod *iod =
@@ -170,6 +185,30 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	return BLK_STS_OK;
 }
 
+static bool nvme_loop_poll_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
+{
+	struct blk_mq_hw_ctx *hctx = data;
+	struct nvme_loop_iod *iod;
+	struct request *rq;
+
+	rq = blk_mq_tag_to_rq(hctx->tags, bitnr);
+	if (!rq)
+		return true;
+
+	iod = blk_mq_rq_to_pdu(rq);
+	flush_work(&iod->poll);
+	return true;
+}
+
+static int nvme_loop_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+	struct blk_mq_tags *tags = hctx->tags;
+	struct sbitmap_queue *btags = &tags->bitmap_tags;
+
+	sbitmap_for_each_set(&btags->sb, nvme_loop_poll_iter, hctx);
+	return 1;
+}
+
 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
 {
 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
@@ -197,6 +236,7 @@ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
 	iod->req.cqe = &iod->cqe;
 	iod->queue = &ctrl->queues[queue_idx];
 	INIT_WORK(&iod->work, nvme_loop_execute_work);
+	INIT_WORK(&iod->poll, nvme_loop_poll_work);
 	return 0;
 }
 
@@ -247,11 +287,20 @@ static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 	return 0;
 }
 
+static void nvme_loop_map_queues(struct blk_mq_tag_set *set)
+{
+	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
+
+	nvme_map_queues(set, &ctrl->ctrl, NULL, ctrl->io_queues);
+}
+
 static const struct blk_mq_ops nvme_loop_mq_ops = {
 	.queue_rq	= nvme_loop_queue_rq,
 	.complete	= nvme_loop_complete_rq,
 	.init_request	= nvme_loop_init_request,
 	.init_hctx	= nvme_loop_init_hctx,
+	.map_queues	= nvme_loop_map_queues,
+	.poll		= nvme_loop_poll,
 };
 
 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
@@ -305,7 +354,7 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
 	unsigned int nr_io_queues;
 	int ret, i;
 
-	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+	nr_io_queues = nvme_nr_io_queues(opts);
 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
 	if (ret || !nr_io_queues)
 		return ret;
@@ -321,6 +370,7 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
 		ctrl->ctrl.queue_count++;
 	}
 
+	nvme_set_io_queues(opts, nr_io_queues, ctrl->io_queues);
 	return 0;
 
 out_destroy_queues:
@@ -494,7 +544,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 		return ret;
 
 	ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
-			&nvme_loop_mq_ops, 1,
+			&nvme_loop_mq_ops, ctrl->ctrl.opts->nr_poll_queues ? 3 : 2,
 			sizeof(struct nvme_loop_iod) +
 			NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
 	if (ret)
@@ -534,6 +584,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 		struct nvmf_ctrl_options *opts)
 {
 	struct nvme_loop_ctrl *ctrl;
+	unsigned int nr_io_queues;
 	int ret;
 
 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@@ -559,7 +610,8 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 	ctrl->ctrl.kato = opts->kato;
 	ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
 
-	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
+	nr_io_queues = nvme_nr_io_queues(ctrl->ctrl.opts);;
+	ctrl->queues = kcalloc(nr_io_queues + 1, sizeof(*ctrl->queues),
 			GFP_KERNEL);
 	if (!ctrl->queues)
 		goto out_uninit_ctrl;
@@ -648,7 +700,8 @@ static struct nvmf_transport_ops nvme_loop_transport = {
 	.name		= "loop",
 	.module		= THIS_MODULE,
 	.create_ctrl	= nvme_loop_create_ctrl,
-	.allowed_opts	= NVMF_OPT_TRADDR,
+	.allowed_opts	= NVMF_OPT_TRADDR | NVMF_OPT_NR_WRITE_QUEUES |
+			  NVMF_OPT_NR_POLL_QUEUES,
 };
 
 static int __init nvme_loop_init_module(void)
-- 
2.34.1


  parent reply	other threads:[~2023-03-22  0:24 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-22  0:23 [PATCH 0/3] nvme fabrics polling fixes Keith Busch
2023-03-22  0:23 ` [PATCH 1/3] nvme-fabrics: add queue setup helpers Keith Busch
2023-03-22  1:46   ` Chaitanya Kulkarni
2023-03-22  4:38   ` kernel test robot
2023-03-22  5:21     ` Chaitanya Kulkarni
2023-03-22  7:35   ` Sagi Grimberg
2023-03-22  8:27     ` Christoph Hellwig
2023-03-22  9:07       ` Sagi Grimberg
2023-03-22  9:25   ` kernel test robot
2023-03-22  0:23 ` Keith Busch [this message]
2023-03-22  1:47   ` [PATCH 2/3] nvme: add polling options for loop target Chaitanya Kulkarni
2023-03-22  7:44   ` Sagi Grimberg
2023-03-22  8:23   ` Christoph Hellwig
2023-03-22  8:46     ` Daniel Wagner
2023-03-22 13:52       ` Christoph Hellwig
2023-03-22 14:06         ` Daniel Wagner
2023-03-22 14:20           ` Christoph Hellwig
2023-03-22 14:30     ` Keith Busch
2023-03-22  0:23 ` [PATCH 3/3] blk-mq: directly poll requests Keith Busch
2023-03-22  7:36   ` Sagi Grimberg
2023-03-22  8:23   ` Christoph Hellwig
2023-03-22  9:08     ` Sagi Grimberg
2023-03-22  8:37   ` Daniel Wagner
2023-03-22 18:16     ` Chaitanya Kulkarni
2023-03-31  7:57   ` Shinichiro Kawasaki
2023-03-22  7:31 ` [PATCH 0/3] nvme fabrics polling fixes Sagi Grimberg
2023-03-22  8:48 ` Daniel Wagner
2023-03-22 13:24   ` Sagi Grimberg

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230322002350.4038048-3-kbusch@meta.com \
    --to=kbusch@meta.com \
    --cc=axboe@kernel.dk \
    --cc=hch@lst.de \
    --cc=kbusch@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.