All of lore.kernel.org
 help / color / mirror / Atom feed
* consolidate tagset / misc request_queue allocation
@ 2022-09-20 17:15 Christoph Hellwig
  2022-09-20 17:15 ` [PATCH 01/13] nvme: add common helpers to allocate and free tagsets Christoph Hellwig
                   ` (13 more replies)
  0 siblings, 14 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:15 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Hi all,

this series consolidates the allocation and freeing of the tagsets
and misc (non-I/O) requests queues over the various fabrics drivers.

Eventually PCIe and the apple controller driver should use these
helpers as well, but that will require some additional cleanups of
the request_queue lifetime first.

Diffstat:
 host/core.c   |  100 +++++++++++++++++++++++++++++++++++++++++
 host/fc.c     |  121 +++++++++++--------------------------------------
 host/nvme.h   |   16 +++---
 host/rdma.c   |  141 +++++++++++++++-------------------------------------------
 host/tcp.c    |  118 +++++++++---------------------------------------
 target/loop.c |   91 +++++++++----------------------------
 6 files changed, 220 insertions(+), 367 deletions(-)


^ permalink raw reply	[flat|nested] 49+ messages in thread

* [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
@ 2022-09-20 17:15 ` Christoph Hellwig
  2022-09-20 21:42   ` Chaitanya Kulkarni
                     ` (2 more replies)
  2022-09-20 17:16 ` [PATCH 02/13] nvme-tcp: remove the unused queue_size member in nvme_tcp_queue Christoph Hellwig
                   ` (12 subsequent siblings)
  13 siblings, 3 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:15 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Add common helpers to allocate and tear down the admin and I/O tag sets,
including the special queues allocated with them.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/core.c | 100 +++++++++++++++++++++++++++++++++++++++
 drivers/nvme/host/nvme.h |   8 ++++
 2 files changed, 108 insertions(+)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8c9c1176624da..f8d9f32adc87c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4800,6 +4800,106 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 }
 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+		const struct blk_mq_ops *ops, unsigned int flags,
+		unsigned int cmd_size)
+{
+	int ret;
+
+	memset(set, 0, sizeof(*set));
+	set->ops = ops;
+	set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+	if (ctrl->ops->flags & NVME_F_FABRICS)
+		set->reserved_tags = NVMF_RESERVED_TAGS;
+	set->numa_node = ctrl->numa_node;
+	set->flags = flags;
+	set->cmd_size = cmd_size;
+	set->driver_data = ctrl;
+	set->nr_hw_queues = 1;
+	set->timeout = NVME_ADMIN_TIMEOUT;
+	ret = blk_mq_alloc_tag_set(set);
+	if (ret)
+		return ret;
+
+	ctrl->admin_q = blk_mq_init_queue(set);
+	if (IS_ERR(ctrl->admin_q)) {
+		ret = PTR_ERR(ctrl->admin_q);
+		goto out_free_tagset;
+	}
+
+	if (ctrl->ops->flags & NVME_F_FABRICS) {
+		ctrl->fabrics_q = blk_mq_init_queue(set);
+		if (IS_ERR(ctrl->fabrics_q)) {
+			ret = PTR_ERR(ctrl->fabrics_q);
+			goto out_cleanup_admin_q;
+		}
+	}
+
+	ctrl->admin_tagset = set;
+	return 0;
+
+out_cleanup_admin_q:
+	blk_mq_destroy_queue(ctrl->fabrics_q);
+out_free_tagset:
+	blk_mq_free_tag_set(ctrl->admin_tagset);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
+
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
+{
+	blk_mq_destroy_queue(ctrl->admin_q);
+	if (ctrl->ops->flags & NVME_F_FABRICS)
+		blk_mq_destroy_queue(ctrl->fabrics_q);
+	blk_mq_free_tag_set(ctrl->admin_tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
+
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+		const struct blk_mq_ops *ops, unsigned int flags,
+		unsigned int cmd_size)
+{
+	int ret;
+
+	memset(set, 0, sizeof(*set));
+	set->ops = ops;
+	set->queue_depth = ctrl->sqsize + 1;
+	set->reserved_tags = NVMF_RESERVED_TAGS;
+	set->numa_node = ctrl->numa_node;
+	set->flags = flags;
+	set->cmd_size = cmd_size,
+	set->driver_data = ctrl;
+	set->nr_hw_queues = ctrl->queue_count - 1;
+	set->timeout = NVME_IO_TIMEOUT;
+	if (ops->map_queues)
+		set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+	ret = blk_mq_alloc_tag_set(set);
+	if (ret)
+		return ret;
+
+	ctrl->connect_q = blk_mq_init_queue(set);
+        if (IS_ERR(ctrl->connect_q)) {
+		ret = PTR_ERR(ctrl->connect_q);
+		goto out_free_tag_set;
+	}
+
+	ctrl->tagset = set;
+	return 0;
+
+out_free_tag_set:
+	blk_mq_free_tag_set(set);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
+
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
+{
+	if (ctrl->ops->flags & NVME_F_FABRICS)
+		blk_mq_destroy_queue(ctrl->connect_q);
+	blk_mq_free_tag_set(ctrl->tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
+
 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
 {
 	nvme_mpath_stop(ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1bdf714dcd9e4..6dec8a3bef1aa 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -722,6 +722,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+		const struct blk_mq_ops *ops, unsigned int flags,
+		unsigned int cmd_size);
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+		const struct blk_mq_ops *ops, unsigned int flags,
+		unsigned int cmd_size);
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
 
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 02/13] nvme-tcp: remove the unused queue_size member in nvme_tcp_queue
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
  2022-09-20 17:15 ` [PATCH 01/13] nvme: add common helpers to allocate and free tagsets Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-20 21:43   ` Chaitanya Kulkarni
  2022-09-21  9:24   ` Sagi Grimberg
  2022-09-20 17:16 ` [PATCH 03/13] nvme-tcp: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
                   ` (11 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

->nvme_tcp_queue is not used anywhere, so remove it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/tcp.c | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index b5f22ceaae823..8a749ef63afee 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -134,7 +134,6 @@ struct nvme_tcp_queue {
 	/* send state */
 	struct nvme_tcp_request *request;
 
-	int			queue_size;
 	u32			maxh2cdata;
 	size_t			cmnd_capsule_len;
 	struct nvme_tcp_ctrl	*ctrl;
@@ -1479,8 +1478,7 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
 	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
 }
 
-static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
-		int qid, size_t queue_size)
+static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
 {
 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
@@ -1492,7 +1490,6 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
 	INIT_LIST_HEAD(&queue->send_list);
 	mutex_init(&queue->send_mutex);
 	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
-	queue->queue_size = queue_size;
 
 	if (qid > 0)
 		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
@@ -1785,7 +1782,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
 {
 	int ret;
 
-	ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+	ret = nvme_tcp_alloc_queue(ctrl, 0);
 	if (ret)
 		return ret;
 
@@ -1805,7 +1802,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
 	int i, ret;
 
 	for (i = 1; i < ctrl->queue_count; i++) {
-		ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
+		ret = nvme_tcp_alloc_queue(ctrl, i);
 		if (ret)
 			goto out_free_queues;
 	}
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 03/13] nvme-tcp: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
  2022-09-20 17:15 ` [PATCH 01/13] nvme: add common helpers to allocate and free tagsets Christoph Hellwig
  2022-09-20 17:16 ` [PATCH 02/13] nvme-tcp: remove the unused queue_size member in nvme_tcp_queue Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-20 21:43   ` Chaitanya Kulkarni
  2022-09-21  9:25   ` Sagi Grimberg
  2022-09-20 17:16 ` [PATCH 04/13] nvme-tcp: use the tagset alloc/free helpers Christoph Hellwig
                   ` (10 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Point the private data to the generic controller structure in preparation
of using the common tagset init/exit code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/tcp.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 8a749ef63afee..863e985085d4d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -465,7 +465,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
 		struct request *rq, unsigned int hctx_idx,
 		unsigned int numa_node)
 {
-	struct nvme_tcp_ctrl *ctrl = set->driver_data;
+	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
 	struct nvme_tcp_cmd_pdu *pdu;
 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
@@ -489,7 +489,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
-	struct nvme_tcp_ctrl *ctrl = data;
+	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
 	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
 
 	hctx->driver_data = queue;
@@ -499,7 +499,7 @@ static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
-	struct nvme_tcp_ctrl *ctrl = data;
+	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
 
 	hctx->driver_data = queue;
@@ -1700,7 +1700,7 @@ static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
 	set->numa_node = nctrl->numa_node;
 	set->flags = BLK_MQ_F_BLOCKING;
 	set->cmd_size = sizeof(struct nvme_tcp_request);
-	set->driver_data = ctrl;
+	set->driver_data = &ctrl->ctrl;
 	set->nr_hw_queues = 1;
 	set->timeout = NVME_ADMIN_TIMEOUT;
 	ret = blk_mq_alloc_tag_set(set);
@@ -1722,7 +1722,7 @@ static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
 	set->numa_node = nctrl->numa_node;
 	set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
 	set->cmd_size = sizeof(struct nvme_tcp_request);
-	set->driver_data = ctrl;
+	set->driver_data = &ctrl->ctrl;
 	set->nr_hw_queues = nctrl->queue_count - 1;
 	set->timeout = NVME_IO_TIMEOUT;
 	set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
@@ -2486,7 +2486,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
 {
-	struct nvme_tcp_ctrl *ctrl = set->driver_data;
+	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
 
 	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 04/13] nvme-tcp: use the tagset alloc/free helpers
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (2 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 03/13] nvme-tcp: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-20 21:44   ` Chaitanya Kulkarni
  2022-09-21  9:26   ` Sagi Grimberg
  2022-09-20 17:16 ` [PATCH 05/13] nvme-rdma: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
                   ` (9 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Use the common helpers to allocate and free the tagsets.  To make this
work the generic nvme_ctrl now needs to be stored in the hctx private
data instead of the nvme_tcp_ctrl.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/tcp.c | 101 +++++++---------------------------------
 1 file changed, 16 insertions(+), 85 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 863e985085d4d..3e7b29d07c713 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1687,51 +1687,6 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
 	return ret;
 }
 
-static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
-{
-	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
-	struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
-	int ret;
-
-	memset(set, 0, sizeof(*set));
-	set->ops = &nvme_tcp_admin_mq_ops;
-	set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-	set->reserved_tags = NVMF_RESERVED_TAGS;
-	set->numa_node = nctrl->numa_node;
-	set->flags = BLK_MQ_F_BLOCKING;
-	set->cmd_size = sizeof(struct nvme_tcp_request);
-	set->driver_data = &ctrl->ctrl;
-	set->nr_hw_queues = 1;
-	set->timeout = NVME_ADMIN_TIMEOUT;
-	ret = blk_mq_alloc_tag_set(set);
-	if (!ret)
-		nctrl->admin_tagset = set;
-	return ret;
-}
-
-static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
-{
-	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
-	struct blk_mq_tag_set *set = &ctrl->tag_set;
-	int ret;
-
-	memset(set, 0, sizeof(*set));
-	set->ops = &nvme_tcp_mq_ops;
-	set->queue_depth = nctrl->sqsize + 1;
-	set->reserved_tags = NVMF_RESERVED_TAGS;
-	set->numa_node = nctrl->numa_node;
-	set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
-	set->cmd_size = sizeof(struct nvme_tcp_request);
-	set->driver_data = &ctrl->ctrl;
-	set->nr_hw_queues = nctrl->queue_count - 1;
-	set->timeout = NVME_IO_TIMEOUT;
-	set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
-	ret = blk_mq_alloc_tag_set(set);
-	if (!ret)
-		nctrl->tagset = set;
-	return ret;
-}
-
 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
 {
 	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
@@ -1890,10 +1845,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
 {
 	nvme_tcp_stop_io_queues(ctrl);
-	if (remove) {
-		blk_mq_destroy_queue(ctrl->connect_q);
-		blk_mq_free_tag_set(ctrl->tagset);
-	}
+	if (remove)
+		nvme_remove_io_tag_set(ctrl);
 	nvme_tcp_free_io_queues(ctrl);
 }
 
@@ -1906,13 +1859,12 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
 		return ret;
 
 	if (new) {
-		ret = nvme_tcp_alloc_tag_set(ctrl);
+		ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
+				&nvme_tcp_mq_ops,
+				BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
+				sizeof(struct nvme_tcp_request));
 		if (ret)
 			goto out_free_io_queues;
-
-		ret = nvme_ctrl_init_connect_q(ctrl);
-		if (ret)
-			goto out_free_tag_set;
 	}
 
 	/*
@@ -1959,10 +1911,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
 out_cleanup_connect_q:
 	nvme_cancel_tagset(ctrl);
 	if (new)
-		blk_mq_destroy_queue(ctrl->connect_q);
-out_free_tag_set:
-	if (new)
-		blk_mq_free_tag_set(ctrl->tagset);
+		nvme_remove_io_tag_set(ctrl);
 out_free_io_queues:
 	nvme_tcp_free_io_queues(ctrl);
 	return ret;
@@ -1971,11 +1920,8 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
 {
 	nvme_tcp_stop_queue(ctrl, 0);
-	if (remove) {
-		blk_mq_destroy_queue(ctrl->admin_q);
-		blk_mq_destroy_queue(ctrl->fabrics_q);
-		blk_mq_free_tag_set(ctrl->admin_tagset);
-	}
+	if (remove)
+		nvme_remove_admin_tag_set(ctrl);
 	nvme_tcp_free_admin_queue(ctrl);
 }
 
@@ -1988,26 +1934,17 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
 		return error;
 
 	if (new) {
-		error = nvme_tcp_alloc_admin_tag_set(ctrl);
+		error = nvme_alloc_admin_tag_set(ctrl,
+				&to_tcp_ctrl(ctrl)->admin_tag_set,
+				&nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
+				sizeof(struct nvme_tcp_request));
 		if (error)
 			goto out_free_queue;
-
-		ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
-		if (IS_ERR(ctrl->fabrics_q)) {
-			error = PTR_ERR(ctrl->fabrics_q);
-			goto out_free_tagset;
-		}
-
-		ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
-		if (IS_ERR(ctrl->admin_q)) {
-			error = PTR_ERR(ctrl->admin_q);
-			goto out_cleanup_fabrics_q;
-		}
 	}
 
 	error = nvme_tcp_start_queue(ctrl, 0);
 	if (error)
-		goto out_cleanup_queue;
+		goto out_cleanup_tagset;
 
 	error = nvme_enable_ctrl(ctrl);
 	if (error)
@@ -2027,15 +1964,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
 out_stop_queue:
 	nvme_tcp_stop_queue(ctrl, 0);
 	nvme_cancel_admin_tagset(ctrl);
-out_cleanup_queue:
-	if (new)
-		blk_mq_destroy_queue(ctrl->admin_q);
-out_cleanup_fabrics_q:
-	if (new)
-		blk_mq_destroy_queue(ctrl->fabrics_q);
-out_free_tagset:
+out_cleanup_tagset:
 	if (new)
-		blk_mq_free_tag_set(ctrl->admin_tagset);
+		nvme_remove_admin_tag_set(ctrl);
 out_free_queue:
 	nvme_tcp_free_admin_queue(ctrl);
 	return error;
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 05/13] nvme-rdma: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (3 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 04/13] nvme-tcp: use the tagset alloc/free helpers Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-20 21:44   ` Chaitanya Kulkarni
  2022-09-21  9:26   ` Sagi Grimberg
  2022-09-20 17:16 ` [PATCH 06/13] nvme-rdma: use the tagset alloc/free helpers Christoph Hellwig
                   ` (8 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Point the private data to the generic controller structure in preparation
of using the common tagset init/exit code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/rdma.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4c6df34f9d7ac..8bc2930fd496e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -295,7 +295,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
 		struct request *rq, unsigned int hctx_idx,
 		unsigned int numa_node)
 {
-	struct nvme_rdma_ctrl *ctrl = set->driver_data;
+	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
 	struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
@@ -320,7 +320,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
-	struct nvme_rdma_ctrl *ctrl = data;
+	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
 	struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
 
 	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
@@ -332,7 +332,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
-	struct nvme_rdma_ctrl *ctrl = data;
+	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
 	struct nvme_rdma_queue *queue = &ctrl->queues[0];
 
 	BUG_ON(hctx_idx != 0);
@@ -801,7 +801,7 @@ static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
 	set->numa_node = nctrl->numa_node;
 	set->cmd_size = sizeof(struct nvme_rdma_request) +
 			NVME_RDMA_DATA_SGL_SIZE;
-	set->driver_data = ctrl;
+	set->driver_data = &ctrl->ctrl;
 	set->nr_hw_queues = 1;
 	set->timeout = NVME_ADMIN_TIMEOUT;
 	set->flags = BLK_MQ_F_NO_SCHED;
@@ -828,7 +828,7 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
 	if (nctrl->max_integrity_segments)
 		set->cmd_size += sizeof(struct nvme_rdma_sgl) +
 				 NVME_RDMA_METADATA_SGL_SIZE;
-	set->driver_data = ctrl;
+	set->driver_data = &ctrl->ctrl;
 	set->nr_hw_queues = nctrl->queue_count - 1;
 	set->timeout = NVME_IO_TIMEOUT;
 	set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
@@ -2206,7 +2206,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
 
 static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
 {
-	struct nvme_rdma_ctrl *ctrl = set->driver_data;
+	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
 
 	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 06/13] nvme-rdma: use the tagset alloc/free helpers
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (4 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 05/13] nvme-rdma: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-20 21:45   ` Chaitanya Kulkarni
  2022-09-21  9:29   ` Sagi Grimberg
  2022-09-20 17:16 ` [PATCH 07/13] nvme-fc: keep ctrl->sqsize in sync with opts->queue_size Christoph Hellwig
                   ` (7 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Use the common helpers to allocate and free the tagsets.  To make this
work the generic nvme_ctrl now needs to be stored in the hctx private
data instead of the nvme_rdma_ctrl.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/rdma.c | 133 ++++++++++-----------------------------
 1 file changed, 34 insertions(+), 99 deletions(-)

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 8bc2930fd496e..5ad0ab2853a49 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -788,64 +788,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
 	return ret;
 }
 
-static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
+static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
 {
-	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-	struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
-	int ret;
+	unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
+				NVME_RDMA_DATA_SGL_SIZE;
 
-	memset(set, 0, sizeof(*set));
-	set->ops = &nvme_rdma_admin_mq_ops;
-	set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-	set->reserved_tags = NVMF_RESERVED_TAGS;
-	set->numa_node = nctrl->numa_node;
-	set->cmd_size = sizeof(struct nvme_rdma_request) +
-			NVME_RDMA_DATA_SGL_SIZE;
-	set->driver_data = &ctrl->ctrl;
-	set->nr_hw_queues = 1;
-	set->timeout = NVME_ADMIN_TIMEOUT;
-	set->flags = BLK_MQ_F_NO_SCHED;
-	ret = blk_mq_alloc_tag_set(set);
-	if (!ret)
-		ctrl->ctrl.admin_tagset = set;
-	return ret;
-}
-
-static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
-{
-	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-	struct blk_mq_tag_set *set = &ctrl->tag_set;
-	int ret;
+	if (ctrl->max_integrity_segments)
+		cmd_size += sizeof(struct nvme_rdma_sgl) +
+			    NVME_RDMA_METADATA_SGL_SIZE;
 
-	memset(set, 0, sizeof(*set));
-	set->ops = &nvme_rdma_mq_ops;
-	set->queue_depth = nctrl->sqsize + 1;
-	set->reserved_tags = NVMF_RESERVED_TAGS;
-	set->numa_node = nctrl->numa_node;
-	set->flags = BLK_MQ_F_SHOULD_MERGE;
-	set->cmd_size = sizeof(struct nvme_rdma_request) +
-			NVME_RDMA_DATA_SGL_SIZE;
-	if (nctrl->max_integrity_segments)
-		set->cmd_size += sizeof(struct nvme_rdma_sgl) +
-				 NVME_RDMA_METADATA_SGL_SIZE;
-	set->driver_data = &ctrl->ctrl;
-	set->nr_hw_queues = nctrl->queue_count - 1;
-	set->timeout = NVME_IO_TIMEOUT;
-	set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
-	ret = blk_mq_alloc_tag_set(set);
-	if (!ret)
-		ctrl->ctrl.tagset = set;
-	return ret;
+	return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
+			&nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
 }
 
-static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
-		bool remove)
+static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
 {
-	if (remove) {
-		blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-		blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-		blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
-	}
 	if (ctrl->async_event_sqe.data) {
 		cancel_work_sync(&ctrl->ctrl.async_event_work);
 		nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -887,26 +844,19 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 		goto out_free_queue;
 
 	if (new) {
-		error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
+		error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
+				&ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
+				BLK_MQ_F_NO_SCHED,
+				sizeof(struct nvme_rdma_request) +
+				NVME_RDMA_DATA_SGL_SIZE);
 		if (error)
 			goto out_free_async_qe;
 
-		ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
-		if (IS_ERR(ctrl->ctrl.fabrics_q)) {
-			error = PTR_ERR(ctrl->ctrl.fabrics_q);
-			goto out_free_tagset;
-		}
-
-		ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
-		if (IS_ERR(ctrl->ctrl.admin_q)) {
-			error = PTR_ERR(ctrl->ctrl.admin_q);
-			goto out_cleanup_fabrics_q;
-		}
 	}
 
 	error = nvme_rdma_start_queue(ctrl, 0);
 	if (error)
-		goto out_cleanup_queue;
+		goto out_remove_admin_tag_set;
 
 	error = nvme_enable_ctrl(&ctrl->ctrl);
 	if (error)
@@ -933,15 +883,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 out_stop_queue:
 	nvme_rdma_stop_queue(&ctrl->queues[0]);
 	nvme_cancel_admin_tagset(&ctrl->ctrl);
-out_cleanup_queue:
+out_remove_admin_tag_set:
 	if (new)
-		blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
-	if (new)
-		blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
-	if (new)
-		blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
+		nvme_remove_admin_tag_set(&ctrl->ctrl);
 out_free_async_qe:
 	if (ctrl->async_event_sqe.data) {
 		nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -953,16 +897,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 	return error;
 }
 
-static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
-		bool remove)
-{
-	if (remove) {
-		blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-		blk_mq_free_tag_set(ctrl->ctrl.tagset);
-	}
-	nvme_rdma_free_io_queues(ctrl);
-}
-
 static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
 {
 	int ret, nr_queues;
@@ -975,10 +909,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
 		ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
 		if (ret)
 			goto out_free_io_queues;
-
-		ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
-		if (ret)
-			goto out_free_tag_set;
 	}
 
 	/*
@@ -989,7 +919,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
 	nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
 	ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
 	if (ret)
-		goto out_cleanup_connect_q;
+		goto out_cleanup_tagset;
 
 	if (!new) {
 		nvme_start_queues(&ctrl->ctrl);
@@ -1022,13 +952,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
 	nvme_stop_queues(&ctrl->ctrl);
 	nvme_sync_io_queues(&ctrl->ctrl);
 	nvme_rdma_stop_io_queues(ctrl);
-out_cleanup_connect_q:
+out_cleanup_tagset:
 	nvme_cancel_tagset(&ctrl->ctrl);
 	if (new)
-		blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tag_set:
-	if (new)
-		blk_mq_free_tag_set(ctrl->ctrl.tagset);
+		nvme_remove_io_tag_set(&ctrl->ctrl);
 out_free_io_queues:
 	nvme_rdma_free_io_queues(ctrl);
 	return ret;
@@ -1041,9 +968,11 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
 	blk_sync_queue(ctrl->ctrl.admin_q);
 	nvme_rdma_stop_queue(&ctrl->queues[0]);
 	nvme_cancel_admin_tagset(&ctrl->ctrl);
-	if (remove)
+	if (remove) {
 		nvme_start_admin_queue(&ctrl->ctrl);
-	nvme_rdma_destroy_admin_queue(ctrl, remove);
+		nvme_remove_admin_tag_set(&ctrl->ctrl);
+	}
+	nvme_rdma_destroy_admin_queue(ctrl);
 }
 
 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
@@ -1055,9 +984,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
 		nvme_sync_io_queues(&ctrl->ctrl);
 		nvme_rdma_stop_io_queues(ctrl);
 		nvme_cancel_tagset(&ctrl->ctrl);
-		if (remove)
+		if (remove) {
 			nvme_start_queues(&ctrl->ctrl);
-		nvme_rdma_destroy_io_queues(ctrl, remove);
+			nvme_remove_io_tag_set(&ctrl->ctrl);
+		}
+		nvme_rdma_free_io_queues(ctrl);
 	}
 }
 
@@ -1179,14 +1110,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
 		nvme_sync_io_queues(&ctrl->ctrl);
 		nvme_rdma_stop_io_queues(ctrl);
 		nvme_cancel_tagset(&ctrl->ctrl);
-		nvme_rdma_destroy_io_queues(ctrl, new);
+		if (new)
+			nvme_remove_io_tag_set(&ctrl->ctrl);
+		nvme_rdma_free_io_queues(ctrl);
 	}
 destroy_admin:
 	nvme_stop_admin_queue(&ctrl->ctrl);
 	blk_sync_queue(ctrl->ctrl.admin_q);
 	nvme_rdma_stop_queue(&ctrl->queues[0]);
 	nvme_cancel_admin_tagset(&ctrl->ctrl);
-	nvme_rdma_destroy_admin_queue(ctrl, new);
+	if (new)
+		nvme_remove_admin_tag_set(&ctrl->ctrl);
+	nvme_rdma_destroy_admin_queue(ctrl);
 	return ret;
 }
 
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 07/13] nvme-fc: keep ctrl->sqsize in sync with opts->queue_size
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (5 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 06/13] nvme-rdma: use the tagset alloc/free helpers Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-21  9:30   ` Sagi Grimberg
  2022-09-22 22:48   ` James Smart
  2022-09-20 17:16 ` [PATCH 08/13] nvme-fc: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
                   ` (6 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Also update the sqsize field when capping the queue size, and remove the
check a queue size that is larger than sqsize given that sqsize is only
initialized from opts->queue_size.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/fc.c | 10 +---------
 1 file changed, 1 insertion(+), 9 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 42767fb754552..ee376111f5610 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3165,15 +3165,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 			"to maxcmd\n",
 			opts->queue_size, ctrl->ctrl.maxcmd);
 		opts->queue_size = ctrl->ctrl.maxcmd;
-	}
-
-	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
-		/* warn if sqsize is lower than queue_size */
-		dev_warn(ctrl->ctrl.device,
-			"queue_size %zu > ctrl sqsize %u, reducing "
-			"to sqsize\n",
-			opts->queue_size, ctrl->ctrl.sqsize + 1);
-		opts->queue_size = ctrl->ctrl.sqsize + 1;
+		ctrl->ctrl.sqsize = opts->queue_size - 1;
 	}
 
 	ret = nvme_fc_init_aen_ops(ctrl);
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 08/13] nvme-fc: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (6 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 07/13] nvme-fc: keep ctrl->sqsize in sync with opts->queue_size Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-21  9:32   ` Sagi Grimberg
  2022-09-22 22:51   ` James Smart
  2022-09-20 17:16 ` [PATCH 09/13] nvme-fc: use the tagset alloc/free helpers Christoph Hellwig
                   ` (5 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Point the private data to the generic controller structure in preparation
of using the common tagset init/exit code and use the chance the cleanup
the init_hctx methods a bit.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/fc.c | 32 ++++++++++++--------------------
 1 file changed, 12 insertions(+), 20 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index ee376111f5610..d707cf93f1f4b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1829,7 +1829,7 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
 {
 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
 
-	return __nvme_fc_exit_request(set->driver_data, op);
+	return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op);
 }
 
 static int
@@ -2135,7 +2135,7 @@ static int
 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
 		unsigned int hctx_idx, unsigned int numa_node)
 {
-	struct nvme_fc_ctrl *ctrl = set->driver_data;
+	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
 	struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
 	struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
@@ -2206,36 +2206,28 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
 	}
 }
 
-static inline void
-__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
-		unsigned int qidx)
+static inline int
+__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx)
 {
+	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data);
 	struct nvme_fc_queue *queue = &ctrl->queues[qidx];
 
 	hctx->driver_data = queue;
 	queue->hctx = hctx;
+	return 0;
 }
 
 static int
-nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
-		unsigned int hctx_idx)
+nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
 {
-	struct nvme_fc_ctrl *ctrl = data;
-
-	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
-
-	return 0;
+	return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1);
 }
 
 static int
 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
-	struct nvme_fc_ctrl *ctrl = data;
-
-	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
-
-	return 0;
+	return __nvme_fc_init_hctx(hctx, data, hctx_idx);
 }
 
 static void
@@ -2862,7 +2854,7 @@ nvme_fc_complete_rq(struct request *rq)
 
 static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
 {
-	struct nvme_fc_ctrl *ctrl = set->driver_data;
+	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
 	int i;
 
 	for (i = 0; i < set->nr_maps; i++) {
@@ -2923,7 +2915,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
 	ctrl->tag_set.cmd_size =
 		struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
 			    ctrl->lport->ops->fcprqst_priv_sz);
-	ctrl->tag_set.driver_data = ctrl;
+	ctrl->tag_set.driver_data = &ctrl->ctrl;
 	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
 
@@ -3546,7 +3538,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 	ctrl->admin_tag_set.cmd_size =
 		struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
 			    ctrl->lport->ops->fcprqst_priv_sz);
-	ctrl->admin_tag_set.driver_data = ctrl;
+	ctrl->admin_tag_set.driver_data = &ctrl->ctrl;
 	ctrl->admin_tag_set.nr_hw_queues = 1;
 	ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
 	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 09/13] nvme-fc: use the tagset alloc/free helpers
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (7 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 08/13] nvme-fc: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-21  9:33   ` Sagi Grimberg
  2022-09-22 22:56   ` James Smart
  2022-09-20 17:16 ` [PATCH 10/13] nvme-loop: initialize sqsize later Christoph Hellwig
                   ` (4 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Use the common helpers to allocate and free the tagsets.  To make this
work the generic nvme_ctrl now needs to be stored in the hctx private
data instead of the nvme_fc_ctrl.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/fc.c | 83 +++++++++---------------------------------
 1 file changed, 17 insertions(+), 66 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d707cf93f1f4b..5d57a042dbcad 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2383,10 +2383,8 @@ nvme_fc_ctrl_free(struct kref *ref)
 		container_of(ref, struct nvme_fc_ctrl, ref);
 	unsigned long flags;
 
-	if (ctrl->ctrl.tagset) {
-		blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-		blk_mq_free_tag_set(&ctrl->tag_set);
-	}
+	if (ctrl->ctrl.tagset)
+		nvme_remove_io_tag_set(&ctrl->ctrl);
 
 	/* remove from rport list */
 	spin_lock_irqsave(&ctrl->rport->lock, flags);
@@ -2394,9 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
 	spin_unlock_irqrestore(&ctrl->rport->lock, flags);
 
 	nvme_start_admin_queue(&ctrl->ctrl);
-	blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-	blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-	blk_mq_free_tag_set(&ctrl->admin_tag_set);
+	nvme_remove_admin_tag_set(&ctrl->ctrl);
 
 	kfree(ctrl->queues);
 
@@ -2906,32 +2902,16 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
 
 	nvme_fc_init_io_queues(ctrl);
 
-	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
-	ctrl->tag_set.ops = &nvme_fc_mq_ops;
-	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
-	ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
-	ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
-	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
-	ctrl->tag_set.cmd_size =
-		struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
-			    ctrl->lport->ops->fcprqst_priv_sz);
-	ctrl->tag_set.driver_data = &ctrl->ctrl;
-	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
-	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
-
-	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+	ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+			&nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+				    ctrl->lport->ops->fcprqst_priv_sz));
 	if (ret)
 		return ret;
 
-	ctrl->ctrl.tagset = &ctrl->tag_set;
-
-	ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
-	if (ret)
-		goto out_free_tag_set;
-
 	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
 	if (ret)
-		goto out_cleanup_blk_queue;
+		goto out_cleanup_tagset;
 
 	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
 	if (ret)
@@ -2943,10 +2923,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
 
 out_delete_hw_queues:
 	nvme_fc_delete_hw_io_queues(ctrl);
-out_cleanup_blk_queue:
-	blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tag_set:
-	blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+	nvme_remove_io_tag_set(&ctrl->ctrl);
 	nvme_fc_free_io_queues(ctrl);
 
 	/* force put free routine to ignore io queues */
@@ -3530,35 +3508,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
 	nvme_fc_init_queue(ctrl, 0);
 
-	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
-	ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
-	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-	ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
-	ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
-	ctrl->admin_tag_set.cmd_size =
-		struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
-			    ctrl->lport->ops->fcprqst_priv_sz);
-	ctrl->admin_tag_set.driver_data = &ctrl->ctrl;
-	ctrl->admin_tag_set.nr_hw_queues = 1;
-	ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
-	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
-	ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+	ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+			&nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+				    ctrl->lport->ops->fcprqst_priv_sz));
 	if (ret)
 		goto out_free_queues;
-	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
-
-	ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
-	if (IS_ERR(ctrl->ctrl.fabrics_q)) {
-		ret = PTR_ERR(ctrl->ctrl.fabrics_q);
-		goto out_free_admin_tag_set;
-	}
-
-	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
-	if (IS_ERR(ctrl->ctrl.admin_q)) {
-		ret = PTR_ERR(ctrl->ctrl.admin_q);
-		goto out_cleanup_fabrics_q;
-	}
 
 	/*
 	 * Would have been nice to init io queues tag set as well.
@@ -3569,7 +3524,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
 	if (ret)
-		goto out_cleanup_admin_q;
+		goto out_cleanup_tagset;
 
 	/* at this point, teardown path changes to ref counting on nvme ctrl */
 
@@ -3624,12 +3579,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
 	return ERR_PTR(-EIO);
 
-out_cleanup_admin_q:
-	blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
-	blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_admin_tag_set:
-	blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_cleanup_tagset:
+	nvme_remove_admin_tag_set(&ctrl->ctrl);
 out_free_queues:
 	kfree(ctrl->queues);
 out_free_ida:
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 10/13] nvme-loop: initialize sqsize later
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (8 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 09/13] nvme-fc: use the tagset alloc/free helpers Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-20 21:45   ` Chaitanya Kulkarni
  2022-09-21  9:33   ` Sagi Grimberg
  2022-09-20 17:16 ` [PATCH 11/13] nvme-loop: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
                   ` (3 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Defer initializing the sqsize field from the options until it has been
capped by MAXCMD.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/target/loop.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9750a7fca2688..ed6d36eb7d295 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -601,7 +601,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 
 	ret = -ENOMEM;
 
-	ctrl->ctrl.sqsize = opts->queue_size - 1;
 	ctrl->ctrl.kato = opts->kato;
 	ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
 
@@ -621,6 +620,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 			opts->queue_size, ctrl->ctrl.maxcmd);
 		opts->queue_size = ctrl->ctrl.maxcmd;
 	}
+	ctrl->ctrl.sqsize = opts->queue_size - 1;
 
 	if (opts->nr_io_queues) {
 		ret = nvme_loop_create_io_queues(ctrl);
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 11/13] nvme-loop: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (9 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 10/13] nvme-loop: initialize sqsize later Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-20 21:46   ` Chaitanya Kulkarni
  2022-09-21  9:34   ` Sagi Grimberg
  2022-09-20 17:16 ` [PATCH 12/13] nvme-loop: use the tagset alloc/free helpers Christoph Hellwig
                   ` (2 subsequent siblings)
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Point the private data to the generic controller structure in preparation
of using the common tagset init/exit code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/target/loop.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index ed6d36eb7d295..54578cc18d528 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -204,7 +204,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
 		struct request *req, unsigned int hctx_idx,
 		unsigned int numa_node)
 {
-	struct nvme_loop_ctrl *ctrl = set->driver_data;
+	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
 
 	nvme_req(req)->ctrl = &ctrl->ctrl;
@@ -218,7 +218,7 @@ static struct lock_class_key loop_hctx_fq_lock_key;
 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
-	struct nvme_loop_ctrl *ctrl = data;
+	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
 
 	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
@@ -238,7 +238,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
-	struct nvme_loop_ctrl *ctrl = data;
+	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
 	struct nvme_loop_queue *queue = &ctrl->queues[0];
 
 	BUG_ON(hctx_idx != 0);
@@ -357,7 +357,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 	ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
 		NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
-	ctrl->admin_tag_set.driver_data = ctrl;
+	ctrl->admin_tag_set.driver_data = &ctrl->ctrl;
 	ctrl->admin_tag_set.nr_hw_queues = 1;
 	ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
 	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
@@ -530,7 +530,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
 		NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
-	ctrl->tag_set.driver_data = ctrl;
+	ctrl->tag_set.driver_data = &ctrl->ctrl;
 	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
 	ctrl->ctrl.tagset = &ctrl->tag_set;
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 12/13] nvme-loop: use the tagset alloc/free helpers
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (10 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 11/13] nvme-loop: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-20 21:46   ` Chaitanya Kulkarni
  2022-09-21  9:34   ` Sagi Grimberg
  2022-09-20 17:16 ` [PATCH 13/13] nvme: remove nvme_ctrl_init_connect_q Christoph Hellwig
  2022-09-27  7:27 ` consolidate tagset / misc request_queue allocation Christoph Hellwig
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Use the common helpers to allocate and free the tagsets.  To make this
work the generic nvme_ctrl now needs to be stored in the hctx private
data instead of the nvme_loop_ctrl.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/target/loop.c | 83 +++++++++-----------------------------
 1 file changed, 19 insertions(+), 64 deletions(-)

diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 54578cc18d528..b45fe3adf015f 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -266,9 +266,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 	if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
 		return;
 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
-	blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-	blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-	blk_mq_free_tag_set(&ctrl->admin_tag_set);
+	nvme_remove_admin_tag_set(&ctrl->ctrl);
 }
 
 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -282,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
 	list_del(&ctrl->list);
 	mutex_unlock(&nvme_loop_ctrl_mutex);
 
-	if (nctrl->tagset) {
-		blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-		blk_mq_free_tag_set(&ctrl->tag_set);
-	}
+	if (nctrl->tagset)
+		nvme_remove_io_tag_set(nctrl);
 	kfree(ctrl->queues);
 	nvmf_free_options(nctrl->opts);
 free_ctrl:
@@ -350,52 +346,31 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
 	int error;
 
-	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
-	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
-	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-	ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
-	ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
-	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
-		NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
-	ctrl->admin_tag_set.driver_data = &ctrl->ctrl;
-	ctrl->admin_tag_set.nr_hw_queues = 1;
-	ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
-	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
 	ctrl->queues[0].ctrl = ctrl;
 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
 	if (error)
 		return error;
 	ctrl->ctrl.queue_count = 1;
 
-	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+	error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+			&nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+			sizeof(struct nvme_loop_iod) +
+			NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
 	if (error)
 		goto out_free_sq;
-	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
-
-	ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
-	if (IS_ERR(ctrl->ctrl.fabrics_q)) {
-		error = PTR_ERR(ctrl->ctrl.fabrics_q);
-		goto out_free_tagset;
-	}
 
-	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
-	if (IS_ERR(ctrl->ctrl.admin_q)) {
-		error = PTR_ERR(ctrl->ctrl.admin_q);
-		goto out_cleanup_fabrics_q;
-	}
 	/* reset stopped state for the fresh admin queue */
 	clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
 
 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
 	if (error)
-		goto out_cleanup_queue;
+		goto out_cleanup_tagset;
 
 	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
 
 	error = nvme_enable_ctrl(&ctrl->ctrl);
 	if (error)
-		goto out_cleanup_queue;
+		goto out_cleanup_tagset;
 
 	ctrl->ctrl.max_hw_sectors =
 		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
@@ -404,17 +379,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 
 	error = nvme_init_ctrl_finish(&ctrl->ctrl);
 	if (error)
-		goto out_cleanup_queue;
+		goto out_cleanup_tagset;
 
 	return 0;
 
-out_cleanup_queue:
+out_cleanup_tagset:
 	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
-	blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
-	blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
-	blk_mq_free_tag_set(&ctrl->admin_tag_set);
+	nvme_remove_admin_tag_set(&ctrl->ctrl);
 out_free_sq:
 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
 	return error;
@@ -522,37 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 	if (ret)
 		return ret;
 
-	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
-	ctrl->tag_set.ops = &nvme_loop_mq_ops;
-	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
-	ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
-	ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
-	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
-	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
-		NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
-	ctrl->tag_set.driver_data = &ctrl->ctrl;
-	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
-	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
-	ctrl->ctrl.tagset = &ctrl->tag_set;
-
-	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+	ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+			&nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+			sizeof(struct nvme_loop_iod) +
+			NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
 	if (ret)
 		goto out_destroy_queues;
 
-	ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
-	if (ret)
-		goto out_free_tagset;
-
 	ret = nvme_loop_connect_io_queues(ctrl);
 	if (ret)
-		goto out_cleanup_connect_q;
+		goto out_cleanup_tagset;
 
 	return 0;
 
-out_cleanup_connect_q:
-	blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tagset:
-	blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+	nvme_remove_io_tag_set(&ctrl->ctrl);
 out_destroy_queues:
 	nvme_loop_destroy_io_queues(ctrl);
 	return ret;
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* [PATCH 13/13] nvme: remove nvme_ctrl_init_connect_q
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (11 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 12/13] nvme-loop: use the tagset alloc/free helpers Christoph Hellwig
@ 2022-09-20 17:16 ` Christoph Hellwig
  2022-09-20 21:46   ` Chaitanya Kulkarni
  2022-09-21  9:34   ` Sagi Grimberg
  2022-09-27  7:27 ` consolidate tagset / misc request_queue allocation Christoph Hellwig
  13 siblings, 2 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-20 17:16 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

Unused now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/nvme.h | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 6dec8a3bef1aa..4a845e9f17e47 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -976,14 +976,6 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
 }
 #endif
 
-static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
-{
-	ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
-	if (IS_ERR(ctrl->connect_q))
-		return PTR_ERR(ctrl->connect_q);
-	return 0;
-}
-
 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
 {
 	return dev_to_disk(dev)->private_data;
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 49+ messages in thread

* Re: [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-20 17:15 ` [PATCH 01/13] nvme: add common helpers to allocate and free tagsets Christoph Hellwig
@ 2022-09-20 21:42   ` Chaitanya Kulkarni
  2022-09-21  3:37   ` Chao Leng
  2022-09-21  8:25   ` Sagi Grimberg
  2 siblings, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:42 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, Sagi Grimberg, Keith Busch, James Smart

On 9/20/22 10:15, Christoph Hellwig wrote:
> Add common helpers to allocate and tear down the admin and I/O tag sets,
> including the special queues allocated with them.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Thanks for doing this, I remember sending similar series, didn't get
a chance to peruse that to completion.

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 02/13] nvme-tcp: remove the unused queue_size member in nvme_tcp_queue
  2022-09-20 17:16 ` [PATCH 02/13] nvme-tcp: remove the unused queue_size member in nvme_tcp_queue Christoph Hellwig
@ 2022-09-20 21:43   ` Chaitanya Kulkarni
  2022-09-21  9:24   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:43 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, James Smart, Sagi Grimberg, Keith Busch

On 9/20/22 10:16, Christoph Hellwig wrote:
> ->nvme_tcp_queue is not used anywhere, so remove it.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
>


Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 03/13] nvme-tcp: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:16 ` [PATCH 03/13] nvme-tcp: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
@ 2022-09-20 21:43   ` Chaitanya Kulkarni
  2022-09-21  9:25   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:43 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, James Smart, Sagi Grimberg, Keith Busch

On 9/20/22 10:16, Christoph Hellwig wrote:
> Point the private data to the generic controller structure in preparation
> of using the common tagset init/exit code.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 04/13] nvme-tcp: use the tagset alloc/free helpers
  2022-09-20 17:16 ` [PATCH 04/13] nvme-tcp: use the tagset alloc/free helpers Christoph Hellwig
@ 2022-09-20 21:44   ` Chaitanya Kulkarni
  2022-09-21  9:26   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:44 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, James Smart, Sagi Grimberg, Keith Busch

On 9/20/22 10:16, Christoph Hellwig wrote:
> Use the common helpers to allocate and free the tagsets.  To make this
> work the generic nvme_ctrl now needs to be stored in the hctx private
> data instead of the nvme_tcp_ctrl.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>



^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 05/13] nvme-rdma: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:16 ` [PATCH 05/13] nvme-rdma: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
@ 2022-09-20 21:44   ` Chaitanya Kulkarni
  2022-09-21  9:26   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:44 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, James Smart, Sagi Grimberg, Keith Busch

On 9/20/22 10:16, Christoph Hellwig wrote:
> Point the private data to the generic controller structure in preparation
> of using the common tagset init/exit code.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 06/13] nvme-rdma: use the tagset alloc/free helpers
  2022-09-20 17:16 ` [PATCH 06/13] nvme-rdma: use the tagset alloc/free helpers Christoph Hellwig
@ 2022-09-20 21:45   ` Chaitanya Kulkarni
  2022-09-21  9:29   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:45 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, James Smart, Sagi Grimberg, Keith Busch

On 9/20/22 10:16, Christoph Hellwig wrote:
> Use the common helpers to allocate and free the tagsets.  To make this
> work the generic nvme_ctrl now needs to be stored in the hctx private
> data instead of the nvme_rdma_ctrl.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>



^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 10/13] nvme-loop: initialize sqsize later
  2022-09-20 17:16 ` [PATCH 10/13] nvme-loop: initialize sqsize later Christoph Hellwig
@ 2022-09-20 21:45   ` Chaitanya Kulkarni
  2022-09-21  9:33   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:45 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, James Smart, Sagi Grimberg, Keith Busch

On 9/20/22 10:16, Christoph Hellwig wrote:
> Defer initializing the sqsize field from the options until it has been
> capped by MAXCMD.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 11/13] nvme-loop: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:16 ` [PATCH 11/13] nvme-loop: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
@ 2022-09-20 21:46   ` Chaitanya Kulkarni
  2022-09-21  9:34   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:46 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, James Smart, Keith Busch, Sagi Grimberg

On 9/20/22 10:16, Christoph Hellwig wrote:
> Point the private data to the generic controller structure in preparation
> of using the common tagset init/exit code.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 12/13] nvme-loop: use the tagset alloc/free helpers
  2022-09-20 17:16 ` [PATCH 12/13] nvme-loop: use the tagset alloc/free helpers Christoph Hellwig
@ 2022-09-20 21:46   ` Chaitanya Kulkarni
  2022-09-21  9:34   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:46 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, James Smart, Sagi Grimberg, Keith Busch

On 9/20/22 10:16, Christoph Hellwig wrote:
> Use the common helpers to allocate and free the tagsets.  To make this
> work the generic nvme_ctrl now needs to be stored in the hctx private
> data instead of the nvme_loop_ctrl.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>



^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 13/13] nvme: remove nvme_ctrl_init_connect_q
  2022-09-20 17:16 ` [PATCH 13/13] nvme: remove nvme_ctrl_init_connect_q Christoph Hellwig
@ 2022-09-20 21:46   ` Chaitanya Kulkarni
  2022-09-21  9:34   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Chaitanya Kulkarni @ 2022-09-20 21:46 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, James Smart, Sagi Grimberg, Keith Busch

On 9/20/22 10:16, Christoph Hellwig wrote:
> Unused now.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
>

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>



^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-20 17:15 ` [PATCH 01/13] nvme: add common helpers to allocate and free tagsets Christoph Hellwig
  2022-09-20 21:42   ` Chaitanya Kulkarni
@ 2022-09-21  3:37   ` Chao Leng
  2022-09-22  5:45     ` Christoph Hellwig
  2022-09-21  8:25   ` Sagi Grimberg
  2 siblings, 1 reply; 49+ messages in thread
From: Chao Leng @ 2022-09-21  3:37 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme



On 2022/9/21 1:15, Christoph Hellwig wrote:
> Add common helpers to allocate and tear down the admin and I/O tag sets,
> including the special queues allocated with them.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/nvme/host/core.c | 100 +++++++++++++++++++++++++++++++++++++++
>   drivers/nvme/host/nvme.h |   8 ++++
>   2 files changed, 108 insertions(+)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 8c9c1176624da..f8d9f32adc87c 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -4800,6 +4800,106 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
>   }
>   EXPORT_SYMBOL_GPL(nvme_complete_async_event);
>   
> +int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
> +		const struct blk_mq_ops *ops, unsigned int flags,
> +		unsigned int cmd_size)
> +{
> +	int ret;
> +
> +	memset(set, 0, sizeof(*set));
> +	set->ops = ops;
> +	set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
> +	if (ctrl->ops->flags & NVME_F_FABRICS)
> +		set->reserved_tags = NVMF_RESERVED_TAGS;
> +	set->numa_node = ctrl->numa_node;
> +	set->flags = flags;
> +	set->cmd_size = cmd_size;
> +	set->driver_data = ctrl;
> +	set->nr_hw_queues = 1;
> +	set->timeout = NVME_ADMIN_TIMEOUT;
> +	ret = blk_mq_alloc_tag_set(set);
> +	if (ret)
> +		return ret;
> +
> +	ctrl->admin_q = blk_mq_init_queue(set);
> +	if (IS_ERR(ctrl->admin_q)) {
> +		ret = PTR_ERR(ctrl->admin_q);
> +		goto out_free_tagset;
> +	}
> +
> +	if (ctrl->ops->flags & NVME_F_FABRICS) {
> +		ctrl->fabrics_q = blk_mq_init_queue(set);
> +		if (IS_ERR(ctrl->fabrics_q)) {
> +			ret = PTR_ERR(ctrl->fabrics_q);
> +			goto out_cleanup_admin_q;
> +		}
> +	}
> +
> +	ctrl->admin_tagset = set;
> +	return 0;
> +
> +out_cleanup_admin_q:
> +	blk_mq_destroy_queue(ctrl->fabrics_q);
> +out_free_tagset:
> +	blk_mq_free_tag_set(ctrl->admin_tagset);
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
> +
> +void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
> +{
> +	blk_mq_destroy_queue(ctrl->admin_q);
> +	if (ctrl->ops->flags & NVME_F_FABRICS)
> +		blk_mq_destroy_queue(ctrl->fabrics_q);
> +	blk_mq_free_tag_set(ctrl->admin_tagset);
> +}
> +EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
> +
> +int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
> +		const struct blk_mq_ops *ops, unsigned int flags,
> +		unsigned int cmd_size)
> +{
> +	int ret;
> +
> +	memset(set, 0, sizeof(*set));
> +	set->ops = ops;
> +	set->queue_depth = ctrl->sqsize + 1;
> +	set->reserved_tags = NVMF_RESERVED_TAGS;
> +	set->numa_node = ctrl->numa_node;
> +	set->flags = flags;
> +	set->cmd_size = cmd_size,
> +	set->driver_data = ctrl;
> +	set->nr_hw_queues = ctrl->queue_count - 1;
> +	set->timeout = NVME_IO_TIMEOUT;
> +	if (ops->map_queues)
> +		set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
> +	ret = blk_mq_alloc_tag_set(set);
> +	if (ret)
> +		return ret;
> +
> +	ctrl->connect_q = blk_mq_init_queue(set);
> +        if (IS_ERR(ctrl->connect_q)) {
> +		ret = PTR_ERR(ctrl->connect_q);
> +		goto out_free_tag_set;
> +	}
Maybe we should move the connect_q related code to nvme_alloc_admin_tag_set.
Thus we don't need to set NVMF_RESERVED_TAGS for tagset,
if there are large amount of ns queues, it will save resources.
At the same time, it will simplify tagset-based implementation.
According to the behavior logic, connect_q is a management resource.
However, connect_q use the I/O tagset.
As a result, it is difficult to process some logic based on tagset.
> +
> +	ctrl->tagset = set;
> +	return 0;
> +
> +out_free_tag_set:
> +	blk_mq_free_tag_set(set);
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
> +
> +void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
> +{
> +	if (ctrl->ops->flags & NVME_F_FABRICS)
> +		blk_mq_destroy_queue(ctrl->connect_q);
> +	blk_mq_free_tag_set(ctrl->tagset);
> +}
> +EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
> +
>   void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
>   {
>   	nvme_mpath_stop(ctrl);
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index 1bdf714dcd9e4..6dec8a3bef1aa 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -722,6 +722,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
>   void nvme_start_ctrl(struct nvme_ctrl *ctrl);
>   void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
>   int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
> +int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
> +		const struct blk_mq_ops *ops, unsigned int flags,
> +		unsigned int cmd_size);
> +void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
> +int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
> +		const struct blk_mq_ops *ops, unsigned int flags,
> +		unsigned int cmd_size);
> +void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
>   
>   void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
>   
> 


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-20 17:15 ` [PATCH 01/13] nvme: add common helpers to allocate and free tagsets Christoph Hellwig
  2022-09-20 21:42   ` Chaitanya Kulkarni
  2022-09-21  3:37   ` Chao Leng
@ 2022-09-21  8:25   ` Sagi Grimberg
  2022-09-22  5:48     ` Christoph Hellwig
  2 siblings, 1 reply; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  8:25 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme



On 9/20/22 20:15, Christoph Hellwig wrote:
> Add common helpers to allocate and tear down the admin and I/O tag sets,
> including the special queues allocated with them.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/nvme/host/core.c | 100 +++++++++++++++++++++++++++++++++++++++
>   drivers/nvme/host/nvme.h |   8 ++++
>   2 files changed, 108 insertions(+)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 8c9c1176624da..f8d9f32adc87c 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -4800,6 +4800,106 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
>   }
>   EXPORT_SYMBOL_GPL(nvme_complete_async_event);
>   
> +int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
> +		const struct blk_mq_ops *ops, unsigned int flags,
> +		unsigned int cmd_size)
> +{
> +	int ret;
> +
> +	memset(set, 0, sizeof(*set));
> +	set->ops = ops;
> +	set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
> +	if (ctrl->ops->flags & NVME_F_FABRICS)
> +		set->reserved_tags = NVMF_RESERVED_TAGS;
> +	set->numa_node = ctrl->numa_node;
> +	set->flags = flags;
> +	set->cmd_size = cmd_size;
> +	set->driver_data = ctrl;
> +	set->nr_hw_queues = 1;
> +	set->timeout = NVME_ADMIN_TIMEOUT;
> +	ret = blk_mq_alloc_tag_set(set);
> +	if (ret)
> +		return ret;
> +
> +	ctrl->admin_q = blk_mq_init_queue(set);
> +	if (IS_ERR(ctrl->admin_q)) {
> +		ret = PTR_ERR(ctrl->admin_q);
> +		goto out_free_tagset;
> +	}
> +
> +	if (ctrl->ops->flags & NVME_F_FABRICS) {
> +		ctrl->fabrics_q = blk_mq_init_queue(set);
> +		if (IS_ERR(ctrl->fabrics_q)) {
> +			ret = PTR_ERR(ctrl->fabrics_q);
> +			goto out_cleanup_admin_q;
> +		}
> +	}
> +
> +	ctrl->admin_tagset = set;
> +	return 0;
> +
> +out_cleanup_admin_q:
> +	blk_mq_destroy_queue(ctrl->fabrics_q);
> +out_free_tagset:
> +	blk_mq_free_tag_set(ctrl->admin_tagset);
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
> +
> +void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
> +{
> +	blk_mq_destroy_queue(ctrl->admin_q);
> +	if (ctrl->ops->flags & NVME_F_FABRICS)
> +		blk_mq_destroy_queue(ctrl->fabrics_q);
> +	blk_mq_free_tag_set(ctrl->admin_tagset);
> +}
> +EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
> +
> +int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
> +		const struct blk_mq_ops *ops, unsigned int flags,
> +		unsigned int cmd_size)
> +{
> +	int ret;
> +
> +	memset(set, 0, sizeof(*set));
> +	set->ops = ops;
> +	set->queue_depth = ctrl->sqsize + 1;
> +	set->reserved_tags = NVMF_RESERVED_TAGS;
> +	set->numa_node = ctrl->numa_node;
> +	set->flags = flags;
> +	set->cmd_size = cmd_size,
> +	set->driver_data = ctrl;
> +	set->nr_hw_queues = ctrl->queue_count - 1;
> +	set->timeout = NVME_IO_TIMEOUT;
> +	if (ops->map_queues)
> +		set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
> +	ret = blk_mq_alloc_tag_set(set);
> +	if (ret)
> +		return ret;
> +

if (ctrl->ops->flags & NVME_F_FABRICS) {

> +	ctrl->connect_q = blk_mq_init_queue(set);
> +        if (IS_ERR(ctrl->connect_q)) {
> +		ret = PTR_ERR(ctrl->connect_q);
> +		goto out_free_tag_set;
> +	}

}

> +
> +	ctrl->tagset = set;
> +	return 0;
> +
> +out_free_tag_set:
> +	blk_mq_free_tag_set(set);
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 02/13] nvme-tcp: remove the unused queue_size member in nvme_tcp_queue
  2022-09-20 17:16 ` [PATCH 02/13] nvme-tcp: remove the unused queue_size member in nvme_tcp_queue Christoph Hellwig
  2022-09-20 21:43   ` Chaitanya Kulkarni
@ 2022-09-21  9:24   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:24 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 03/13] nvme-tcp: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:16 ` [PATCH 03/13] nvme-tcp: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
  2022-09-20 21:43   ` Chaitanya Kulkarni
@ 2022-09-21  9:25   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:25 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 04/13] nvme-tcp: use the tagset alloc/free helpers
  2022-09-20 17:16 ` [PATCH 04/13] nvme-tcp: use the tagset alloc/free helpers Christoph Hellwig
  2022-09-20 21:44   ` Chaitanya Kulkarni
@ 2022-09-21  9:26   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:26 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 05/13] nvme-rdma: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:16 ` [PATCH 05/13] nvme-rdma: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
  2022-09-20 21:44   ` Chaitanya Kulkarni
@ 2022-09-21  9:26   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:26 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 06/13] nvme-rdma: use the tagset alloc/free helpers
  2022-09-20 17:16 ` [PATCH 06/13] nvme-rdma: use the tagset alloc/free helpers Christoph Hellwig
  2022-09-20 21:45   ` Chaitanya Kulkarni
@ 2022-09-21  9:29   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:29 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 07/13] nvme-fc: keep ctrl->sqsize in sync with opts->queue_size
  2022-09-20 17:16 ` [PATCH 07/13] nvme-fc: keep ctrl->sqsize in sync with opts->queue_size Christoph Hellwig
@ 2022-09-21  9:30   ` Sagi Grimberg
  2022-09-22 22:48   ` James Smart
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:30 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 08/13] nvme-fc: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:16 ` [PATCH 08/13] nvme-fc: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
@ 2022-09-21  9:32   ` Sagi Grimberg
  2022-09-22 22:51   ` James Smart
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:32 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 09/13] nvme-fc: use the tagset alloc/free helpers
  2022-09-20 17:16 ` [PATCH 09/13] nvme-fc: use the tagset alloc/free helpers Christoph Hellwig
@ 2022-09-21  9:33   ` Sagi Grimberg
  2022-09-22 22:56   ` James Smart
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:33 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 10/13] nvme-loop: initialize sqsize later
  2022-09-20 17:16 ` [PATCH 10/13] nvme-loop: initialize sqsize later Christoph Hellwig
  2022-09-20 21:45   ` Chaitanya Kulkarni
@ 2022-09-21  9:33   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:33 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 11/13] nvme-loop: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:16 ` [PATCH 11/13] nvme-loop: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
  2022-09-20 21:46   ` Chaitanya Kulkarni
@ 2022-09-21  9:34   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:34 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 12/13] nvme-loop: use the tagset alloc/free helpers
  2022-09-20 17:16 ` [PATCH 12/13] nvme-loop: use the tagset alloc/free helpers Christoph Hellwig
  2022-09-20 21:46   ` Chaitanya Kulkarni
@ 2022-09-21  9:34   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:34 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 13/13] nvme: remove nvme_ctrl_init_connect_q
  2022-09-20 17:16 ` [PATCH 13/13] nvme: remove nvme_ctrl_init_connect_q Christoph Hellwig
  2022-09-20 21:46   ` Chaitanya Kulkarni
@ 2022-09-21  9:34   ` Sagi Grimberg
  1 sibling, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-21  9:34 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-21  3:37   ` Chao Leng
@ 2022-09-22  5:45     ` Christoph Hellwig
  2022-09-22  8:02       ` Chao Leng
  0 siblings, 1 reply; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-22  5:45 UTC (permalink / raw)
  To: Chao Leng
  Cc: Christoph Hellwig, Keith Busch, Sagi Grimberg, James Smart, linux-nvme

On Wed, Sep 21, 2022 at 11:37:08AM +0800, Chao Leng wrote:
>> +	ctrl->connect_q = blk_mq_init_queue(set);
>> +        if (IS_ERR(ctrl->connect_q)) {
>> +		ret = PTR_ERR(ctrl->connect_q);
>> +		goto out_free_tag_set;
>> +	}
> Maybe we should move the connect_q related code to nvme_alloc_admin_tag_set.
> Thus we don't need to set NVMF_RESERVED_TAGS for tagset,
> if there are large amount of ns queues, it will save resources.
> At the same time, it will simplify tagset-based implementation.
> According to the behavior logic, connect_q is a management resource.
> However, connect_q use the I/O tagset.
> As a result, it is difficult to process some logic based on tagset.

The fabrics connect command needs to be sent on each of the I/O
queues in addition to the admin queue, which means the admin tag_set
can't be used.



^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-21  8:25   ` Sagi Grimberg
@ 2022-09-22  5:48     ` Christoph Hellwig
  2022-09-22  8:09       ` Chao Leng
  0 siblings, 1 reply; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-22  5:48 UTC (permalink / raw)
  To: Sagi Grimberg; +Cc: Christoph Hellwig, Keith Busch, James Smart, linux-nvme

On Wed, Sep 21, 2022 at 11:25:11AM +0300, Sagi Grimberg wrote:
>> +	if (ops->map_queues)
>> +		set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
>> +	ret = blk_mq_alloc_tag_set(set);
>> +	if (ret)
>> +		return ret;
>> +
>
> if (ctrl->ops->flags & NVME_F_FABRICS) {

Incremental fixup:

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f8d9f32adc87c..91c2cb59c4eb6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4877,10 +4877,12 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
 	if (ret)
 		return ret;
 
-	ctrl->connect_q = blk_mq_init_queue(set);
-        if (IS_ERR(ctrl->connect_q)) {
-		ret = PTR_ERR(ctrl->connect_q);
-		goto out_free_tag_set;
+	if (ctrl->ops->flags & NVME_F_FABRICS) {
+		ctrl->connect_q = blk_mq_init_queue(set);
+        	if (IS_ERR(ctrl->connect_q)) {
+			ret = PTR_ERR(ctrl->connect_q);
+			goto out_free_tag_set;
+		}
 	}
 
 	ctrl->tagset = set;


^ permalink raw reply related	[flat|nested] 49+ messages in thread

* Re: [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-22  5:45     ` Christoph Hellwig
@ 2022-09-22  8:02       ` Chao Leng
  2022-09-22 14:18         ` Christoph Hellwig
  0 siblings, 1 reply; 49+ messages in thread
From: Chao Leng @ 2022-09-22  8:02 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Keith Busch, Sagi Grimberg, James Smart, linux-nvme



On 2022/9/22 13:45, Christoph Hellwig wrote:
> On Wed, Sep 21, 2022 at 11:37:08AM +0800, Chao Leng wrote:
>>> +	ctrl->connect_q = blk_mq_init_queue(set);
>>> +        if (IS_ERR(ctrl->connect_q)) {
>>> +		ret = PTR_ERR(ctrl->connect_q);
>>> +		goto out_free_tag_set;
>>> +	}
>> Maybe we should move the connect_q related code to nvme_alloc_admin_tag_set.
>> Thus we don't need to set NVMF_RESERVED_TAGS for tagset,
>> if there are large amount of ns queues, it will save resources.
>> At the same time, it will simplify tagset-based implementation.
>> According to the behavior logic, connect_q is a management resource.
>> However, connect_q use the I/O tagset.
>> As a result, it is difficult to process some logic based on tagset.
> 
> The fabrics connect command needs to be sent on each of the I/O
> queues in addition to the admin queue, which means the admin tag_set
> can't be used.
We can do some special processing in nvme_setup_cmd for connect command to
treat queue mapping.
Do you think that's okay?
> 
> 
> .
> 


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-22  5:48     ` Christoph Hellwig
@ 2022-09-22  8:09       ` Chao Leng
  2022-09-22 14:19         ` Christoph Hellwig
  0 siblings, 1 reply; 49+ messages in thread
From: Chao Leng @ 2022-09-22  8:09 UTC (permalink / raw)
  To: Christoph Hellwig, Sagi Grimberg; +Cc: Keith Busch, James Smart, linux-nvme



On 2022/9/22 13:48, Christoph Hellwig wrote:
> On Wed, Sep 21, 2022 at 11:25:11AM +0300, Sagi Grimberg wrote:
>>> +	if (ops->map_queues)
>>> +		set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
>>> +	ret = blk_mq_alloc_tag_set(set);
>>> +	if (ret)
>>> +		return ret;
>>> +
>>
>> if (ctrl->ops->flags & NVME_F_FABRICS) {
> 
> Incremental fixup:
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index f8d9f32adc87c..91c2cb59c4eb6 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -4877,10 +4877,12 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
>   	if (ret)
>   		return ret;
>   
> -	ctrl->connect_q = blk_mq_init_queue(set);
> -        if (IS_ERR(ctrl->connect_q)) {
> -		ret = PTR_ERR(ctrl->connect_q);
> -		goto out_free_tag_set;
> +	if (ctrl->ops->flags & NVME_F_FABRICS) {
> +		ctrl->connect_q = blk_mq_init_queue(set);
> +        	if (IS_ERR(ctrl->connect_q)) {
> +			ret = PTR_ERR(ctrl->connect_q);
> +			goto out_free_tag_set;
> +		}
>   	}
Maybe we should not add the checking of NVME_F_FABRICS.
+	if (ctrl->ops->flags & NVME_F_FABRICS) {
+		ctrl->fabrics_q = blk_mq_init_queue(set);
+		if (IS_ERR(ctrl->fabrics_q)) {
+			ret = PTR_ERR(ctrl->fabrics_q);
+			goto out_cleanup_admin_q;
+		}
+	}
nvme_alloc_admin_tag_set should not check NVME_F_FABRICS either.
The new helpers is just used for fabrics.
Maybe adding fabric to the function name would make it clearer.
Like this: nvme_alloc_fabric_admin_tag_set.
Thus can avoid confusion with nvme_alloc_admin_tags.
>   
>   	ctrl->tagset = set;
> 
> .
> 


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-22  8:02       ` Chao Leng
@ 2022-09-22 14:18         ` Christoph Hellwig
  0 siblings, 0 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-22 14:18 UTC (permalink / raw)
  To: Chao Leng
  Cc: Christoph Hellwig, Keith Busch, Sagi Grimberg, James Smart, linux-nvme

On Thu, Sep 22, 2022 at 04:02:01PM +0800, Chao Leng wrote:
> We can do some special processing in nvme_setup_cmd for connect command to
> treat queue mapping.
> Do you think that's okay?

nvme_setup_cmd does not help, as anything we could do there we could
in the code that submits the command.  The point is that it needs to
be sent out on the hctx for that particular I/O queue.


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 01/13] nvme: add common helpers to allocate and free tagsets
  2022-09-22  8:09       ` Chao Leng
@ 2022-09-22 14:19         ` Christoph Hellwig
  0 siblings, 0 replies; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-22 14:19 UTC (permalink / raw)
  To: Chao Leng
  Cc: Christoph Hellwig, Sagi Grimberg, Keith Busch, James Smart, linux-nvme

On Thu, Sep 22, 2022 at 04:09:14PM +0800, Chao Leng wrote:
> Maybe we should not add the checking of NVME_F_FABRICS.
> +	if (ctrl->ops->flags & NVME_F_FABRICS) {
> +		ctrl->fabrics_q = blk_mq_init_queue(set);
> +		if (IS_ERR(ctrl->fabrics_q)) {
> +			ret = PTR_ERR(ctrl->fabrics_q);
> +			goto out_cleanup_admin_q;
> +		}
> +	}
> nvme_alloc_admin_tag_set should not check NVME_F_FABRICS either.
> The new helpers is just used for fabrics.

Right now.  As mentioned in the cover letter I plan to use it for
PCIe nvme-apple as well.  But that will need more work, including
block layer work for request_queue refcounting first.


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 07/13] nvme-fc: keep ctrl->sqsize in sync with opts->queue_size
  2022-09-20 17:16 ` [PATCH 07/13] nvme-fc: keep ctrl->sqsize in sync with opts->queue_size Christoph Hellwig
  2022-09-21  9:30   ` Sagi Grimberg
@ 2022-09-22 22:48   ` James Smart
  1 sibling, 0 replies; 49+ messages in thread
From: James Smart @ 2022-09-22 22:48 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

On 9/20/2022 10:16 AM, Christoph Hellwig wrote:
> Also update the sqsize field when capping the queue size, and remove the
> check a queue size that is larger than sqsize given that sqsize is only
> initialized from opts->queue_size.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/nvme/host/fc.c | 10 +---------
>   1 file changed, 1 insertion(+), 9 deletions(-)
> 
> diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
> index 42767fb754552..ee376111f5610 100644

Reviewed-by: James Smart <jsmart2021@gmail.com>

-- james




^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 08/13] nvme-fc: store the generic nvme_ctrl in set->driver_data
  2022-09-20 17:16 ` [PATCH 08/13] nvme-fc: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
  2022-09-21  9:32   ` Sagi Grimberg
@ 2022-09-22 22:51   ` James Smart
  1 sibling, 0 replies; 49+ messages in thread
From: James Smart @ 2022-09-22 22:51 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

On 9/20/2022 10:16 AM, Christoph Hellwig wrote:
> Point the private data to the generic controller structure in preparation
> of using the common tagset init/exit code and use the chance the cleanup
> the init_hctx methods a bit.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/nvme/host/fc.c | 32 ++++++++++++--------------------
>   1 file changed, 12 insertions(+), 20 deletions(-)
> 

Reviewed-by: James Smart <jsmart2021@gmail.com>

-- james





^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: [PATCH 09/13] nvme-fc: use the tagset alloc/free helpers
  2022-09-20 17:16 ` [PATCH 09/13] nvme-fc: use the tagset alloc/free helpers Christoph Hellwig
  2022-09-21  9:33   ` Sagi Grimberg
@ 2022-09-22 22:56   ` James Smart
  1 sibling, 0 replies; 49+ messages in thread
From: James Smart @ 2022-09-22 22:56 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

On 9/20/2022 10:16 AM, Christoph Hellwig wrote:
> Use the common helpers to allocate and free the tagsets.  To make this
> work the generic nvme_ctrl now needs to be stored in the hctx private
> data instead of the nvme_fc_ctrl.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/nvme/host/fc.c | 83 +++++++++---------------------------------
>   1 file changed, 17 insertions(+), 66 deletions(-)
> 

Reviewed-by: James Smart <jsmart2021@gmail.com>

-- james



^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: consolidate tagset / misc request_queue allocation
  2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
                   ` (12 preceding siblings ...)
  2022-09-20 17:16 ` [PATCH 13/13] nvme: remove nvme_ctrl_init_connect_q Christoph Hellwig
@ 2022-09-27  7:27 ` Christoph Hellwig
  2022-09-28  6:47   ` Sagi Grimberg
  13 siblings, 1 reply; 49+ messages in thread
From: Christoph Hellwig @ 2022-09-27  7:27 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg, James Smart; +Cc: linux-nvme

I've pulled this series into nvme-6.1 with the additional F_FABRICS
check pointed out by Sagi.


^ permalink raw reply	[flat|nested] 49+ messages in thread

* Re: consolidate tagset / misc request_queue allocation
  2022-09-27  7:27 ` consolidate tagset / misc request_queue allocation Christoph Hellwig
@ 2022-09-28  6:47   ` Sagi Grimberg
  0 siblings, 0 replies; 49+ messages in thread
From: Sagi Grimberg @ 2022-09-28  6:47 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, James Smart; +Cc: linux-nvme


> I've pulled this series into nvme-6.1 with the additional F_FABRICS
> check pointed out by Sagi.

You can add my:
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 49+ messages in thread

end of thread, other threads:[~2022-09-28  6:47 UTC | newest]

Thread overview: 49+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-20 17:15 consolidate tagset / misc request_queue allocation Christoph Hellwig
2022-09-20 17:15 ` [PATCH 01/13] nvme: add common helpers to allocate and free tagsets Christoph Hellwig
2022-09-20 21:42   ` Chaitanya Kulkarni
2022-09-21  3:37   ` Chao Leng
2022-09-22  5:45     ` Christoph Hellwig
2022-09-22  8:02       ` Chao Leng
2022-09-22 14:18         ` Christoph Hellwig
2022-09-21  8:25   ` Sagi Grimberg
2022-09-22  5:48     ` Christoph Hellwig
2022-09-22  8:09       ` Chao Leng
2022-09-22 14:19         ` Christoph Hellwig
2022-09-20 17:16 ` [PATCH 02/13] nvme-tcp: remove the unused queue_size member in nvme_tcp_queue Christoph Hellwig
2022-09-20 21:43   ` Chaitanya Kulkarni
2022-09-21  9:24   ` Sagi Grimberg
2022-09-20 17:16 ` [PATCH 03/13] nvme-tcp: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
2022-09-20 21:43   ` Chaitanya Kulkarni
2022-09-21  9:25   ` Sagi Grimberg
2022-09-20 17:16 ` [PATCH 04/13] nvme-tcp: use the tagset alloc/free helpers Christoph Hellwig
2022-09-20 21:44   ` Chaitanya Kulkarni
2022-09-21  9:26   ` Sagi Grimberg
2022-09-20 17:16 ` [PATCH 05/13] nvme-rdma: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
2022-09-20 21:44   ` Chaitanya Kulkarni
2022-09-21  9:26   ` Sagi Grimberg
2022-09-20 17:16 ` [PATCH 06/13] nvme-rdma: use the tagset alloc/free helpers Christoph Hellwig
2022-09-20 21:45   ` Chaitanya Kulkarni
2022-09-21  9:29   ` Sagi Grimberg
2022-09-20 17:16 ` [PATCH 07/13] nvme-fc: keep ctrl->sqsize in sync with opts->queue_size Christoph Hellwig
2022-09-21  9:30   ` Sagi Grimberg
2022-09-22 22:48   ` James Smart
2022-09-20 17:16 ` [PATCH 08/13] nvme-fc: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
2022-09-21  9:32   ` Sagi Grimberg
2022-09-22 22:51   ` James Smart
2022-09-20 17:16 ` [PATCH 09/13] nvme-fc: use the tagset alloc/free helpers Christoph Hellwig
2022-09-21  9:33   ` Sagi Grimberg
2022-09-22 22:56   ` James Smart
2022-09-20 17:16 ` [PATCH 10/13] nvme-loop: initialize sqsize later Christoph Hellwig
2022-09-20 21:45   ` Chaitanya Kulkarni
2022-09-21  9:33   ` Sagi Grimberg
2022-09-20 17:16 ` [PATCH 11/13] nvme-loop: store the generic nvme_ctrl in set->driver_data Christoph Hellwig
2022-09-20 21:46   ` Chaitanya Kulkarni
2022-09-21  9:34   ` Sagi Grimberg
2022-09-20 17:16 ` [PATCH 12/13] nvme-loop: use the tagset alloc/free helpers Christoph Hellwig
2022-09-20 21:46   ` Chaitanya Kulkarni
2022-09-21  9:34   ` Sagi Grimberg
2022-09-20 17:16 ` [PATCH 13/13] nvme: remove nvme_ctrl_init_connect_q Christoph Hellwig
2022-09-20 21:46   ` Chaitanya Kulkarni
2022-09-21  9:34   ` Sagi Grimberg
2022-09-27  7:27 ` consolidate tagset / misc request_queue allocation Christoph Hellwig
2022-09-28  6:47   ` Sagi Grimberg

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.