All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID
@ 2019-04-10  8:00 Yufen Yu
  2019-04-10  8:00 ` [PATCH RFC 1/6] nvme: add new flags NVME_ADMIN_QID for nvme admin queue Yufen Yu
                   ` (7 more replies)
  0 siblings, 8 replies; 13+ messages in thread
From: Yufen Yu @ 2019-04-10  8:00 UTC (permalink / raw)


Hi, all

This patchset introduces a new flag NVME_ADMIN_QID for admin queue.
Now that, all drivers in host or target are use index '0' as admin
queues. Thus, we can use a flag NVME_ADMIN_QID instead of using '0',
which can make code more easy to read and understand.

This patchset is RFC, and it just use NVME_ADMIN_QID for drivers in
nvme host. I am not sure whether this patchset actually make sense. 
Any suggestion is welcome and thanks in advance.

Yufen Yu (6):
  nvme: add new flags NVME_ADMIN_QID for nvme admin queue
  nvme-pci: use NVME_ADMIN_QID for admin queue
  nvme-rdma: use flag NVME_ADMIN_QID for admin queue
  nvme-tcp: use flag NVME_ADMIN_QID for admin queue
  nvme-fc: use flag NVME_ADMIN_QID for admin queue
  nvme-fabrics: use flag NVME_ADMIN_QID

 drivers/nvme/host/fabrics.c |  2 +-
 drivers/nvme/host/fc.c      | 16 ++++++++--------
 drivers/nvme/host/pci.c     | 18 +++++++++---------
 drivers/nvme/host/rdma.c    | 22 +++++++++++-----------
 drivers/nvme/host/tcp.c     | 24 ++++++++++++------------
 include/linux/nvme.h        |  3 +++
 6 files changed, 44 insertions(+), 41 deletions(-)

-- 
2.16.2.dirty

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH RFC 1/6] nvme: add new flags NVME_ADMIN_QID for nvme admin queue
  2019-04-10  8:00 [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID Yufen Yu
@ 2019-04-10  8:00 ` Yufen Yu
  2019-04-10 16:54   ` Heitke, Kenneth
  2019-04-10  8:00 ` [PATCH RFC 2/6] nvme-pci: use NVME_ADMIN_QID for " Yufen Yu
                   ` (6 subsequent siblings)
  7 siblings, 1 reply; 13+ messages in thread
From: Yufen Yu @ 2019-04-10  8:00 UTC (permalink / raw)


Signed-off-by: Yufen Yu <yuyufen at huawei.com>
---
 include/linux/nvme.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index baa49e6a23cc..9f09c0b764ca 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -24,6 +24,9 @@
 
 #define NVME_RDMA_IP_PORT	4420
 
+/* nvme admin queue qid */
+#define NVME_ADMIN_QID 	0
+
 #define NVME_NSID_ALL		0xffffffff
 
 enum nvme_subsys_type {
-- 
2.16.2.dirty

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC 2/6] nvme-pci: use NVME_ADMIN_QID for admin queue
  2019-04-10  8:00 [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID Yufen Yu
  2019-04-10  8:00 ` [PATCH RFC 1/6] nvme: add new flags NVME_ADMIN_QID for nvme admin queue Yufen Yu
@ 2019-04-10  8:00 ` Yufen Yu
  2019-04-10  8:00 ` [PATCH RFC 3/6] nvme-rdma: use flag " Yufen Yu
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Yufen Yu @ 2019-04-10  8:00 UTC (permalink / raw)


Signed-off-by: Yufen Yu <yuyufen at huawei.com>
---
 drivers/nvme/host/pci.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a84a31525097..ecbe7b9cba19 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -424,7 +424,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 				unsigned int hctx_idx)
 {
 	struct nvme_dev *dev = data;
-	struct nvme_queue *nvmeq = &dev->queues[0];
+	struct nvme_queue *nvmeq = &dev->queues[NVME_ADMIN_QID];
 
 	WARN_ON(hctx_idx != 0);
 	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
@@ -1122,7 +1122,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx)
 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
 {
 	struct nvme_dev *dev = to_nvme_dev(ctrl);
-	struct nvme_queue *nvmeq = &dev->queues[0];
+	struct nvme_queue *nvmeq = &dev->queues[NVME_ADMIN_QID];
 	struct nvme_command c;
 
 	memset(&c, 0, sizeof(c));
@@ -1421,13 +1421,13 @@ static void nvme_suspend_io_queues(struct nvme_dev *dev)
 {
 	int i;
 
-	for (i = dev->ctrl.queue_count - 1; i > 0; i--)
+	for (i = dev->ctrl.queue_count - 1; i > NVME_ADMIN_QID; i--)
 		nvme_suspend_queue(&dev->queues[i]);
 }
 
 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 {
-	struct nvme_queue *nvmeq = &dev->queues[0];
+	struct nvme_queue *nvmeq = &dev->queues[NVME_ADMIN_QID];
 
 	if (shutdown)
 		nvme_shutdown_ctrl(&dev->ctrl);
@@ -1710,11 +1710,11 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
 	if (result < 0)
 		return result;
 
-	result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
+	result = nvme_alloc_queue(dev, NVME_ADMIN_QID, NVME_AQ_DEPTH);
 	if (result)
 		return result;
 
-	nvmeq = &dev->queues[0];
+	nvmeq = &dev->queues[NVME_ADMIN_QID];
 	aqa = nvmeq->q_depth - 1;
 	aqa |= aqa << 16;
 
@@ -1727,7 +1727,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
 		return result;
 
 	nvmeq->cq_vector = 0;
-	nvme_init_queue(nvmeq, 0);
+	nvme_init_queue(nvmeq, NVME_ADMIN_QID);
 	result = queue_request_irq(nvmeq);
 	if (result) {
 		nvmeq->cq_vector = -1;
@@ -2112,7 +2112,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
 
 static int nvme_setup_io_queues(struct nvme_dev *dev)
 {
-	struct nvme_queue *adminq = &dev->queues[0];
+	struct nvme_queue *adminq = &dev->queues[NVME_ADMIN_QID];
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	int result, nr_io_queues;
 	unsigned long size;
@@ -2428,7 +2428,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 		nvme_disable_admin_queue(dev, shutdown);
 	}
 	nvme_suspend_io_queues(dev);
-	nvme_suspend_queue(&dev->queues[0]);
+	nvme_suspend_queue(&dev->queues[NVME_ADMIN_QID]);
 	nvme_pci_disable(dev);
 
 	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
-- 
2.16.2.dirty

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC 3/6] nvme-rdma: use flag NVME_ADMIN_QID for admin queue
  2019-04-10  8:00 [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID Yufen Yu
  2019-04-10  8:00 ` [PATCH RFC 1/6] nvme: add new flags NVME_ADMIN_QID for nvme admin queue Yufen Yu
  2019-04-10  8:00 ` [PATCH RFC 2/6] nvme-pci: use NVME_ADMIN_QID for " Yufen Yu
@ 2019-04-10  8:00 ` Yufen Yu
  2019-04-10  8:00 ` [PATCH RFC 4/6] nvme-tcp: " Yufen Yu
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Yufen Yu @ 2019-04-10  8:00 UTC (permalink / raw)


---
 drivers/nvme/host/rdma.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 11a5ecae78c8..c1249e7a1790 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -323,9 +323,9 @@ static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
 	struct nvme_rdma_ctrl *ctrl = data;
-	struct nvme_rdma_queue *queue = &ctrl->queues[0];
+	struct nvme_rdma_queue *queue = &ctrl->queues[NVME_ADMIN_QID];
 
-	BUG_ON(hctx_idx != 0);
+	BUG_ON(hctx_idx != NVME_ADMIN_QID);
 
 	hctx->driver_data = queue;
 	return 0;
@@ -776,7 +776,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
 				sizeof(struct nvme_command), DMA_TO_DEVICE);
 		ctrl->async_event_sqe.data = NULL;
 	}
-	nvme_rdma_free_queue(&ctrl->queues[0]);
+	nvme_rdma_free_queue(&ctrl->queues[NVME_ADMIN_QID]);
 }
 
 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
@@ -784,11 +784,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 {
 	int error;
 
-	error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+	error = nvme_rdma_alloc_queue(ctrl, NVME_ADMIN_QID, NVME_AQ_DEPTH);
 	if (error)
 		return error;
 
-	ctrl->device = ctrl->queues[0].device;
+	ctrl->device = ctrl->queues[NVME_ADMIN_QID].device;
 	ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device);
 
 	ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
@@ -812,7 +812,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 		}
 	}
 
-	error = nvme_rdma_start_queue(ctrl, 0);
+	error = nvme_rdma_start_queue(ctrl, NVME_ADMIN_QID);
 	if (error)
 		goto out_cleanup_queue;
 
@@ -841,7 +841,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 	return 0;
 
 out_stop_queue:
-	nvme_rdma_stop_queue(&ctrl->queues[0]);
+	nvme_rdma_stop_queue(&ctrl->queues[NVME_ADMIN_QID]);
 out_cleanup_queue:
 	if (new)
 		blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -853,7 +853,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 		sizeof(struct nvme_command), DMA_TO_DEVICE);
 	ctrl->async_event_sqe.data = NULL;
 out_free_queue:
-	nvme_rdma_free_queue(&ctrl->queues[0]);
+	nvme_rdma_free_queue(&ctrl->queues[NVME_ADMIN_QID]);
 	return error;
 }
 
@@ -913,7 +913,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
 		bool remove)
 {
 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
-	nvme_rdma_stop_queue(&ctrl->queues[0]);
+	nvme_rdma_stop_queue(&ctrl->queues[NVME_ADMIN_QID]);
 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request,
 			&ctrl->ctrl);
 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1027,7 +1027,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
 	if (ctrl->ctrl.queue_count > 1)
 		nvme_rdma_destroy_io_queues(ctrl, new);
 destroy_admin:
-	nvme_rdma_stop_queue(&ctrl->queues[0]);
+	nvme_rdma_stop_queue(&ctrl->queues[NVME_ADMIN_QID]);
 	nvme_rdma_destroy_admin_queue(ctrl, new);
 	return ret;
 }
@@ -1406,7 +1406,7 @@ static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
 {
 	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
-	struct nvme_rdma_queue *queue = &ctrl->queues[0];
+	struct nvme_rdma_queue *queue = &ctrl->queues[NVME_ADMIN_QID];
 	struct ib_device *dev = queue->device->dev;
 	struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
 	struct nvme_command *cmd = sqe->data;
-- 
2.16.2.dirty

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC 4/6] nvme-tcp: use flag NVME_ADMIN_QID for admin queue
  2019-04-10  8:00 [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID Yufen Yu
                   ` (2 preceding siblings ...)
  2019-04-10  8:00 ` [PATCH RFC 3/6] nvme-rdma: use flag " Yufen Yu
@ 2019-04-10  8:00 ` Yufen Yu
  2019-04-10  8:00 ` [PATCH RFC 5/6] nvme-fc: " Yufen Yu
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Yufen Yu @ 2019-04-10  8:00 UTC (permalink / raw)


Signed-off-by: Yufen Yu <yuyufen at huawei.com>
---
 drivers/nvme/host/tcp.c | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 68c49dd67210..0ac59198792f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -391,7 +391,7 @@ static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
 	struct nvme_tcp_ctrl *ctrl = data;
-	struct nvme_tcp_queue *queue = &ctrl->queues[0];
+	struct nvme_tcp_queue *queue = &ctrl->queues[NVME_ADMIN_QID];
 
 	hctx->driver_data = queue;
 	return 0;
@@ -1107,7 +1107,7 @@ static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
 
 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
 {
-	struct nvme_tcp_queue *queue = &ctrl->queues[0];
+	struct nvme_tcp_queue *queue = &ctrl->queues[NVME_ADMIN_QID];
 	struct nvme_tcp_request *async = &ctrl->async_req;
 	u8 hdgst = nvme_tcp_hdgst_len(queue);
 
@@ -1117,7 +1117,7 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
 	if (!async->pdu)
 		return -ENOMEM;
 
-	async->queue = &ctrl->queues[0];
+	async->queue = &ctrl->queues[NVME_ADMIN_QID];
 	return 0;
 }
 
@@ -1479,7 +1479,7 @@ static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
 		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
 	}
 
-	nvme_tcp_free_queue(ctrl, 0);
+	nvme_tcp_free_queue(ctrl, NVME_ADMIN_QID);
 }
 
 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
@@ -1520,7 +1520,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
 {
 	int ret;
 
-	ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+	ret = nvme_tcp_alloc_queue(ctrl, NVME_ADMIN_QID, NVME_AQ_DEPTH);
 	if (ret)
 		return ret;
 
@@ -1531,7 +1531,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
 	return 0;
 
 out_free_queue:
-	nvme_tcp_free_queue(ctrl, 0);
+	nvme_tcp_free_queue(ctrl, NVME_ADMIN_QID);
 	return ret;
 }
 
@@ -1639,7 +1639,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
 
 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
 {
-	nvme_tcp_stop_queue(ctrl, 0);
+	nvme_tcp_stop_queue(ctrl, NVME_ADMIN_QID);
 	if (remove) {
 		blk_cleanup_queue(ctrl->admin_q);
 		blk_mq_free_tag_set(ctrl->admin_tagset);
@@ -1669,7 +1669,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
 		}
 	}
 
-	error = nvme_tcp_start_queue(ctrl, 0);
+	error = nvme_tcp_start_queue(ctrl, NVME_ADMIN_QID);
 	if (error)
 		goto out_cleanup_queue;
 
@@ -1693,7 +1693,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
 	return 0;
 
 out_stop_queue:
-	nvme_tcp_stop_queue(ctrl, 0);
+	nvme_tcp_stop_queue(ctrl, NVME_ADMIN_QID);
 out_cleanup_queue:
 	if (new)
 		blk_cleanup_queue(ctrl->admin_q);
@@ -1709,7 +1709,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
 		bool remove)
 {
 	blk_mq_quiesce_queue(ctrl->admin_q);
-	nvme_tcp_stop_queue(ctrl, 0);
+	nvme_tcp_stop_queue(ctrl, NVME_ADMIN_QID);
 	blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl);
 	blk_mq_unquiesce_queue(ctrl->admin_q);
 	nvme_tcp_destroy_admin_queue(ctrl, remove);
@@ -1794,7 +1794,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
 	if (ctrl->queue_count > 1)
 		nvme_tcp_destroy_io_queues(ctrl, new);
 destroy_admin:
-	nvme_tcp_stop_queue(ctrl, 0);
+	nvme_tcp_stop_queue(ctrl, NVME_ADMIN_QID);
 	nvme_tcp_destroy_admin_queue(ctrl, new);
 	return ret;
 }
@@ -1937,7 +1937,7 @@ static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
 {
 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
-	struct nvme_tcp_queue *queue = &ctrl->queues[0];
+	struct nvme_tcp_queue *queue = &ctrl->queues[NVME_ADMIN_QID];
 	struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
 	struct nvme_command *cmd = &pdu->cmd;
 	u8 hdgst = nvme_tcp_hdgst_len(queue);
-- 
2.16.2.dirty

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC 5/6] nvme-fc: use flag NVME_ADMIN_QID for admin queue
  2019-04-10  8:00 [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID Yufen Yu
                   ` (3 preceding siblings ...)
  2019-04-10  8:00 ` [PATCH RFC 4/6] nvme-tcp: " Yufen Yu
@ 2019-04-10  8:00 ` Yufen Yu
  2019-04-10 16:52   ` Heitke, Kenneth
  2019-04-10  8:00 ` [PATCH RFC 6/6] nvme-fabrics: use flag NVME_ADMIN_QID Yufen Yu
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 13+ messages in thread
From: Yufen Yu @ 2019-04-10  8:00 UTC (permalink / raw)


Signed-off-by: Yufen Yu <yuyufen at huawei.com>
---
 drivers/nvme/host/fc.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index f3b9d91ba0df..25388e61098d 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1767,7 +1767,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
 
 		cmdiu = &aen_op->cmd_iu;
 		sqe = &cmdiu->sqe;
-		ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
+		ret = __nvme_fc_init_request(ctrl, &ctrl->queues[NVME_ADMIN_QID],
 				aen_op, (struct request *)NULL,
 				(NVME_AQ_BLK_MQ_DEPTH + i));
 		if (ret) {
@@ -2601,12 +2601,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	 * Create the admin queue
 	 */
 
-	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
+	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[NVME_ADMIN_QID], 0,
 				NVME_AQ_DEPTH);
 	if (ret)
 		goto out_free_queue;
 
-	ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
+	ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[NVME_ADMIN_QID],
 				NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
 	if (ret)
 		goto out_delete_hw_queue;
@@ -2617,7 +2617,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	if (ret)
 		goto out_disconnect_admin_queue;
 
-	set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+	set_bit(NVME_FC_Q_LIVE, &ctrl->queues[NVME_ADMIN_QID].flags);
 
 	/*
 	 * Check controller capabilities
@@ -2707,9 +2707,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	/* send a Disconnect(association) LS to fc-nvme target */
 	nvme_fc_xmt_disconnect_assoc(ctrl);
 out_delete_hw_queue:
-	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
+	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[NVME_ADMIN_QID], 0);
 out_free_queue:
-	nvme_fc_free_queue(&ctrl->queues[0]);
+	nvme_fc_free_queue(&ctrl->queues[NVME_ADMIN_QID]);
 	ctrl->assoc_active = false;
 	nvme_fc_ctlr_inactive_on_rport(ctrl);
 
@@ -2800,8 +2800,8 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
 		nvme_fc_free_io_queues(ctrl);
 	}
 
-	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
-	nvme_fc_free_queue(&ctrl->queues[0]);
+	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[NVME_ADMIN_QID], 0);
+	nvme_fc_free_queue(&ctrl->queues[NVME_ADMIN_QID]);
 
 	/* re-enable the admin_q so anything new can fast fail */
 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
-- 
2.16.2.dirty

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC 6/6] nvme-fabrics: use flag NVME_ADMIN_QID
  2019-04-10  8:00 [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID Yufen Yu
                   ` (4 preceding siblings ...)
  2019-04-10  8:00 ` [PATCH RFC 5/6] nvme-fc: " Yufen Yu
@ 2019-04-10  8:00 ` Yufen Yu
  2019-04-10 17:12 ` [PATCH RFC 0/6] nvme: introduce a new " Keith Busch
  2019-04-10 20:14 ` James Smart
  7 siblings, 0 replies; 13+ messages in thread
From: Yufen Yu @ 2019-04-10  8:00 UTC (permalink / raw)


Signed-off-by: Yufen Yu <yuyufen at huawei.com>
---
 drivers/nvme/host/fabrics.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index d4cb826f58ff..bd15fc616ca8 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -374,7 +374,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	memset(&cmd, 0, sizeof(cmd));
 	cmd.connect.opcode = nvme_fabrics_command;
 	cmd.connect.fctype = nvme_fabrics_type_connect;
-	cmd.connect.qid = 0;
+	cmd.connect.qid = NVME_ADMIN_QID;
 	cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
 
 	/*
-- 
2.16.2.dirty

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RFC 5/6] nvme-fc: use flag NVME_ADMIN_QID for admin queue
  2019-04-10  8:00 ` [PATCH RFC 5/6] nvme-fc: " Yufen Yu
@ 2019-04-10 16:52   ` Heitke, Kenneth
  0 siblings, 0 replies; 13+ messages in thread
From: Heitke, Kenneth @ 2019-04-10 16:52 UTC (permalink / raw)




On 4/10/2019 2:00 AM, Yufen Yu wrote:
> Signed-off-by: Yufen Yu <yuyufen at huawei.com>
> ---
>   drivers/nvme/host/fc.c | 16 ++++++++--------
>   1 file changed, 8 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
> index f3b9d91ba0df..25388e61098d 100644
> --- a/drivers/nvme/host/fc.c
> +++ b/drivers/nvme/host/fc.c
> @@ -1767,7 +1767,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
>   
>   		cmdiu = &aen_op->cmd_iu;
>   		sqe = &cmdiu->sqe;
> -		ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
> +		ret = __nvme_fc_init_request(ctrl, &ctrl->queues[NVME_ADMIN_QID],

This line is now over 80 characters so you'll need to reformat it

>   				aen_op, (struct request *)NULL,
>   				(NVME_AQ_BLK_MQ_DEPTH + i));
>   		if (ret) {
> @@ -2601,12 +2601,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
>   	 * Create the admin queue
>   	 */
>   
> -	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
> +	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[NVME_ADMIN_QID], 0,
>   				NVME_AQ_DEPTH);
>   	if (ret)
>   		goto out_free_queue;
>   
> -	ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
> +	ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[NVME_ADMIN_QID],
>   				NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
>   	if (ret)
>   		goto out_delete_hw_queue;
> @@ -2617,7 +2617,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
>   	if (ret)
>   		goto out_disconnect_admin_queue;
>   
> -	set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
> +	set_bit(NVME_FC_Q_LIVE, &ctrl->queues[NVME_ADMIN_QID].flags);
>   
>   	/*
>   	 * Check controller capabilities
> @@ -2707,9 +2707,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
>   	/* send a Disconnect(association) LS to fc-nvme target */
>   	nvme_fc_xmt_disconnect_assoc(ctrl);
>   out_delete_hw_queue:
> -	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
> +	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[NVME_ADMIN_QID], 0);
>   out_free_queue:
> -	nvme_fc_free_queue(&ctrl->queues[0]);
> +	nvme_fc_free_queue(&ctrl->queues[NVME_ADMIN_QID]);
>   	ctrl->assoc_active = false;
>   	nvme_fc_ctlr_inactive_on_rport(ctrl);
>   
> @@ -2800,8 +2800,8 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
>   		nvme_fc_free_io_queues(ctrl);
>   	}
>   
> -	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
> -	nvme_fc_free_queue(&ctrl->queues[0]);
> +	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[NVME_ADMIN_QID], 0);
> +	nvme_fc_free_queue(&ctrl->queues[NVME_ADMIN_QID]);
>   
>   	/* re-enable the admin_q so anything new can fast fail */
>   	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH RFC 1/6] nvme: add new flags NVME_ADMIN_QID for nvme admin queue
  2019-04-10  8:00 ` [PATCH RFC 1/6] nvme: add new flags NVME_ADMIN_QID for nvme admin queue Yufen Yu
@ 2019-04-10 16:54   ` Heitke, Kenneth
  0 siblings, 0 replies; 13+ messages in thread
From: Heitke, Kenneth @ 2019-04-10 16:54 UTC (permalink / raw)




On 4/10/2019 2:00 AM, Yufen Yu wrote:
> Signed-off-by: Yufen Yu <yuyufen at huawei.com>
> ---
>   include/linux/nvme.h | 3 +++
>   1 file changed, 3 insertions(+)
> 
> diff --git a/include/linux/nvme.h b/include/linux/nvme.h
> index baa49e6a23cc..9f09c0b764ca 100644
> --- a/include/linux/nvme.h
> +++ b/include/linux/nvme.h
> @@ -24,6 +24,9 @@
>   
>   #define NVME_RDMA_IP_PORT	4420
>   
> +/* nvme admin queue qid */
> +#define NVME_ADMIN_QID 	0

There is a space before the tab. You should remove the space.

> +
>   #define NVME_NSID_ALL		0xffffffff
>   
>   enum nvme_subsys_type {
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID
  2019-04-10  8:00 [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID Yufen Yu
                   ` (5 preceding siblings ...)
  2019-04-10  8:00 ` [PATCH RFC 6/6] nvme-fabrics: use flag NVME_ADMIN_QID Yufen Yu
@ 2019-04-10 17:12 ` Keith Busch
  2019-04-10 18:39   ` Chaitanya Kulkarni
  2019-04-10 20:14 ` James Smart
  7 siblings, 1 reply; 13+ messages in thread
From: Keith Busch @ 2019-04-10 17:12 UTC (permalink / raw)


On Wed, Apr 10, 2019@01:00:29AM -0700, Yufen Yu wrote:
> Hi, all
> 
> This patchset introduces a new flag NVME_ADMIN_QID for admin queue.
> Now that, all drivers in host or target are use index '0' as admin
> queues. Thus, we can use a flag NVME_ADMIN_QID instead of using '0',
> which can make code more easy to read and understand.
> 
> This patchset is RFC, and it just use NVME_ADMIN_QID for drivers in
> nvme host. I am not sure whether this patchset actually make sense. 
> Any suggestion is welcome and thanks in advance.

IMO, this makes it harder to read anything that's using the 0 index in a
loop.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID
  2019-04-10 17:12 ` [PATCH RFC 0/6] nvme: introduce a new " Keith Busch
@ 2019-04-10 18:39   ` Chaitanya Kulkarni
  2019-04-24 16:41     ` Sagi Grimberg
  0 siblings, 1 reply; 13+ messages in thread
From: Chaitanya Kulkarni @ 2019-04-10 18:39 UTC (permalink / raw)


On 04/10/2019 10:11 AM, Keith Busch wrote:
> On Wed, Apr 10, 2019@01:00:29AM -0700, Yufen Yu wrote:
>> Hi, all
>>
>> This patchset introduces a new flag NVME_ADMIN_QID for admin queue.
>> Now that, all drivers in host or target are use index '0' as admin
>> queues. Thus, we can use a flag NVME_ADMIN_QID instead of using '0',
>> which can make code more easy to read and understand.
>>
>> This patchset is RFC, and it just use NVME_ADMIN_QID for drivers in
>> nvme host. I am not sure whether this patchset actually make sense.
>> Any suggestion is welcome and thanks in advance.
>
> IMO, this makes it harder to read anything that's using the 0 index in a
> loop.
>
Yes it does, how about using 0 in the loop and keeping the macro as it 
is in the rest of places ? or that is too confusing ?
> _______________________________________________
> Linux-nvme mailing list
> Linux-nvme at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-nvme
>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID
  2019-04-10  8:00 [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID Yufen Yu
                   ` (6 preceding siblings ...)
  2019-04-10 17:12 ` [PATCH RFC 0/6] nvme: introduce a new " Keith Busch
@ 2019-04-10 20:14 ` James Smart
  7 siblings, 0 replies; 13+ messages in thread
From: James Smart @ 2019-04-10 20:14 UTC (permalink / raw)


On 4/10/2019 1:00 AM, Yufen Yu wrote:
> Hi, all
>
> This patchset introduces a new flag NVME_ADMIN_QID for admin queue.
> Now that, all drivers in host or target are use index '0' as admin
> queues. Thus, we can use a flag NVME_ADMIN_QID instead of using '0',
> which can make code more easy to read and understand.
>

Does it really clarify things that much ??? If so, why isn't '1' 
replaced by FIRST_IO_QUEUE ??? And in some cases I think it actually 
makes it less clear - like the looping indexes.

in most of the cases, the surrounding context is very clear it's working 
on an admin queue, and the nvme standards are rather clear about queue 
id=0 and its meaning.

-- james

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID
  2019-04-10 18:39   ` Chaitanya Kulkarni
@ 2019-04-24 16:41     ` Sagi Grimberg
  0 siblings, 0 replies; 13+ messages in thread
From: Sagi Grimberg @ 2019-04-24 16:41 UTC (permalink / raw)



>>> Hi, all
>>>
>>> This patchset introduces a new flag NVME_ADMIN_QID for admin queue.
>>> Now that, all drivers in host or target are use index '0' as admin
>>> queues. Thus, we can use a flag NVME_ADMIN_QID instead of using '0',
>>> which can make code more easy to read and understand.
>>>
>>> This patchset is RFC, and it just use NVME_ADMIN_QID for drivers in
>>> nvme host. I am not sure whether this patchset actually make sense.
>>> Any suggestion is welcome and thanks in advance.
>>
>> IMO, this makes it harder to read anything that's using the 0 index in a
>> loop.
>>
> Yes it does, how about using 0 in the loop and keeping the macro as it
> is in the rest of places ? or that is too confusing ?

Both are more confusing than what we already have IMO

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2019-04-24 16:41 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-10  8:00 [PATCH RFC 0/6] nvme: introduce a new flag NVME_ADMIN_QID Yufen Yu
2019-04-10  8:00 ` [PATCH RFC 1/6] nvme: add new flags NVME_ADMIN_QID for nvme admin queue Yufen Yu
2019-04-10 16:54   ` Heitke, Kenneth
2019-04-10  8:00 ` [PATCH RFC 2/6] nvme-pci: use NVME_ADMIN_QID for " Yufen Yu
2019-04-10  8:00 ` [PATCH RFC 3/6] nvme-rdma: use flag " Yufen Yu
2019-04-10  8:00 ` [PATCH RFC 4/6] nvme-tcp: " Yufen Yu
2019-04-10  8:00 ` [PATCH RFC 5/6] nvme-fc: " Yufen Yu
2019-04-10 16:52   ` Heitke, Kenneth
2019-04-10  8:00 ` [PATCH RFC 6/6] nvme-fabrics: use flag NVME_ADMIN_QID Yufen Yu
2019-04-10 17:12 ` [PATCH RFC 0/6] nvme: introduce a new " Keith Busch
2019-04-10 18:39   ` Chaitanya Kulkarni
2019-04-24 16:41     ` Sagi Grimberg
2019-04-10 20:14 ` James Smart

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.