All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/7] Few patches extracted from the centralization set
@ 2017-06-29 11:09 Sagi Grimberg
  2017-06-29 11:09 ` [PATCH 1/7] nvme: move queue_count to the nvme_ctrl Sagi Grimberg
                   ` (7 more replies)
  0 siblings, 8 replies; 33+ messages in thread
From: Sagi Grimberg @ 2017-06-29 11:09 UTC (permalink / raw)


Feedback was that they made sense on their own.
Plus also few patches that I came up with.

The repetition of patches seem to highlight the fact that we
will benefit centralization of shared logic.

Sagi Grimberg (7):
  nvme: move queue_count to the nvme_ctrl
  nvme: move ctrl cap to struct nvme_ctrl
  nvme-pci: rename to nvme_pci_configure_admin_queue
  nvme-fc: don't override opts->nr_io_queues
  nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting
  nvme-loop: update tagset nr_hw_queues after reconnecting/resetting
  nvme-fc: update tagset nr_hw_queues after queues reinit

 drivers/nvme/host/fc.c     | 55 ++++++++++++++++++++++++----------------------
 drivers/nvme/host/nvme.h   |  2 ++
 drivers/nvme/host/pci.c    | 37 ++++++++++++++-----------------
 drivers/nvme/host/rdma.c   | 55 +++++++++++++++++++++++++---------------------
 drivers/nvme/target/loop.c | 25 +++++++++++----------
 5 files changed, 91 insertions(+), 83 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 1/7] nvme: move queue_count to the nvme_ctrl
  2017-06-29 11:09 [PATCH 0/7] Few patches extracted from the centralization set Sagi Grimberg
@ 2017-06-29 11:09 ` Sagi Grimberg
  2017-06-29 11:17   ` Johannes Thumshirn
                     ` (4 more replies)
  2017-06-29 11:09 ` [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl Sagi Grimberg
                   ` (6 subsequent siblings)
  7 siblings, 5 replies; 33+ messages in thread
From: Sagi Grimberg @ 2017-06-29 11:09 UTC (permalink / raw)


all drivers use it exactly the same, move it
to nvme_ctrl. Looking further ahead, nvme core
will really maintain it.

Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
---
 drivers/nvme/host/fc.c     | 35 +++++++++++++++++------------------
 drivers/nvme/host/nvme.h   |  1 +
 drivers/nvme/host/pci.c    | 15 +++++++--------
 drivers/nvme/host/rdma.c   | 39 +++++++++++++++++++--------------------
 drivers/nvme/target/loop.c | 15 +++++++--------
 5 files changed, 51 insertions(+), 54 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index ed87214fdc0e..7eb006427caf 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -148,7 +148,6 @@ struct nvme_fc_ctrl {
 	struct device		*dev;
 	struct nvme_fc_lport	*lport;
 	struct nvme_fc_rport	*rport;
-	u32			queue_count;
 	u32			cnum;
 
 	u64			association_id;
@@ -1614,7 +1613,7 @@ nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
 {
 	int i;
 
-	for (i = 1; i < ctrl->queue_count; i++)
+	for (i = 1; i < ctrl->ctrl.queue_count; i++)
 		nvme_fc_free_queue(&ctrl->queues[i]);
 }
 
@@ -1635,10 +1634,10 @@ __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
 static void
 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
 {
-	struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
+	struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
 	int i;
 
-	for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
+	for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
 		__nvme_fc_delete_hw_queue(ctrl, queue, i);
 }
 
@@ -1648,7 +1647,7 @@ nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
 	struct nvme_fc_queue *queue = &ctrl->queues[1];
 	int i, ret;
 
-	for (i = 1; i < ctrl->queue_count; i++, queue++) {
+	for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
 		ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
 		if (ret)
 			goto delete_queues;
@@ -1667,7 +1666,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
 {
 	int i, ret = 0;
 
-	for (i = 1; i < ctrl->queue_count; i++) {
+	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
 		ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
 					(qsize / 5));
 		if (ret)
@@ -1685,7 +1684,7 @@ nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
 {
 	int i;
 
-	for (i = 1; i < ctrl->queue_count; i++)
+	for (i = 1; i < ctrl->ctrl.queue_count; i++)
 		nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
 }
 
@@ -2187,7 +2186,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
 		return ret;
 	}
 
-	ctrl->queue_count = opts->nr_io_queues + 1;
+	ctrl->ctrl.queue_count = opts->nr_io_queues + 1;
 	if (!opts->nr_io_queues)
 		return 0;
 
@@ -2204,7 +2203,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
 						sizeof(struct scatterlist)) +
 					ctrl->lport->ops->fcprqst_priv_sz;
 	ctrl->tag_set.driver_data = ctrl;
-	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
 
 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
@@ -2258,7 +2257,7 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
 	}
 
 	/* check for io queues existing */
-	if (ctrl->queue_count == 1)
+	if (ctrl->ctrl.queue_count == 1)
 		return 0;
 
 	nvme_fc_init_io_queues(ctrl);
@@ -2381,7 +2380,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	 * Create the io queues
 	 */
 
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		if (ctrl->ctrl.state == NVME_CTRL_NEW)
 			ret = nvme_fc_create_io_queues(ctrl);
 		else
@@ -2395,7 +2394,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 
 	ctrl->ctrl.nr_reconnects = 0;
 
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		nvme_start_queues(&ctrl->ctrl);
 		nvme_queue_scan(&ctrl->ctrl);
 		nvme_queue_async_events(&ctrl->ctrl);
@@ -2447,7 +2446,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
 	 * io requests back to the block layer as part of normal completions
 	 * (but with error status).
 	 */
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		nvme_stop_queues(&ctrl->ctrl);
 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
 				nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -2702,18 +2701,18 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 	spin_lock_init(&ctrl->lock);
 
 	/* io queue count */
-	ctrl->queue_count = min_t(unsigned int,
+	ctrl->ctrl.queue_count = min_t(unsigned int,
 				opts->nr_io_queues,
 				lport->ops->max_hw_queues);
-	opts->nr_io_queues = ctrl->queue_count;	/* so opts has valid value */
-	ctrl->queue_count++;	/* +1 for admin queue */
+	opts->nr_io_queues = ctrl->ctrl.queue_count;	/* so opts has valid value */
+	ctrl->ctrl.queue_count++;	/* +1 for admin queue */
 
 	ctrl->ctrl.sqsize = opts->queue_size - 1;
 	ctrl->ctrl.kato = opts->kato;
 
 	ret = -ENOMEM;
-	ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
-				GFP_KERNEL);
+	ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
+				sizeof(struct nvme_fc_queue), GFP_KERNEL);
 	if (!ctrl->queues)
 		goto out_free_ida;
 
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d70ff0fdd36b..6c51d92b7fab 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -142,6 +142,7 @@ struct nvme_ctrl {
 	u16 cntlid;
 
 	u32 ctrl_config;
+	u32 queue_count;
 
 	u32 page_size;
 	u32 max_hw_sectors;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 32a98e2740ad..0c83bfaa63e6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -74,7 +74,6 @@ struct nvme_dev {
 	struct device *dev;
 	struct dma_pool *prp_page_pool;
 	struct dma_pool *prp_small_pool;
-	unsigned queue_count;
 	unsigned online_queues;
 	unsigned max_qid;
 	int q_depth;
@@ -1099,9 +1098,9 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
 {
 	int i;
 
-	for (i = dev->queue_count - 1; i >= lowest; i--) {
+	for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
 		struct nvme_queue *nvmeq = dev->queues[i];
-		dev->queue_count--;
+		dev->ctrl.queue_count--;
 		dev->queues[i] = NULL;
 		nvme_free_queue(nvmeq);
 	}
@@ -1221,7 +1220,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
 	nvmeq->qid = qid;
 	nvmeq->cq_vector = -1;
 	dev->queues[qid] = nvmeq;
-	dev->queue_count++;
+	dev->ctrl.queue_count++;
 
 	return nvmeq;
 
@@ -1441,7 +1440,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
 	unsigned i, max;
 	int ret = 0;
 
-	for (i = dev->queue_count; i <= dev->max_qid; i++) {
+	for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
 		/* vector == qid - 1, match nvme_create_queue */
 		if (!nvme_alloc_queue(dev, i, dev->q_depth,
 		     pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
@@ -1450,7 +1449,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
 		}
 	}
 
-	max = min(dev->max_qid, dev->queue_count - 1);
+	max = min(dev->max_qid, dev->ctrl.queue_count - 1);
 	for (i = dev->online_queues; i <= max; i++) {
 		ret = nvme_create_queue(dev->queues[i], i);
 		if (ret)
@@ -1995,7 +1994,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 	nvme_stop_queues(&dev->ctrl);
 
 	queues = dev->online_queues - 1;
-	for (i = dev->queue_count - 1; i > 0; i--)
+	for (i = dev->ctrl.queue_count - 1; i > 0; i--)
 		nvme_suspend_queue(dev->queues[i]);
 
 	if (dead) {
@@ -2003,7 +2002,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 		 * probe, before the admin queue is configured. Thus,
 		 * queue_count can be 0 here.
 		 */
-		if (dev->queue_count)
+		if (dev->ctrl.queue_count)
 			nvme_suspend_queue(dev->queues[0]);
 	} else {
 		nvme_disable_io_queues(dev, queues);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 6d4119dfbdaa..c4bacad3fe23 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -103,7 +103,6 @@ struct nvme_rdma_queue {
 struct nvme_rdma_ctrl {
 	/* read only in the hot path */
 	struct nvme_rdma_queue	*queues;
-	u32			queue_count;
 
 	/* other member variables */
 	struct blk_mq_tag_set	tag_set;
@@ -349,7 +348,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 	struct nvme_rdma_ctrl *ctrl = data;
 	struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
 
-	BUG_ON(hctx_idx >= ctrl->queue_count);
+	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
 
 	hctx->driver_data = queue;
 	return 0;
@@ -587,7 +586,7 @@ static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
 {
 	int i;
 
-	for (i = 1; i < ctrl->queue_count; i++)
+	for (i = 1; i < ctrl->ctrl.queue_count; i++)
 		nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
 }
 
@@ -595,7 +594,7 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
 {
 	int i, ret = 0;
 
-	for (i = 1; i < ctrl->queue_count; i++) {
+	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
 		if (ret) {
 			dev_info(ctrl->ctrl.device,
@@ -623,14 +622,14 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
 	if (ret)
 		return ret;
 
-	ctrl->queue_count = nr_io_queues + 1;
-	if (ctrl->queue_count < 2)
+	ctrl->ctrl.queue_count = nr_io_queues + 1;
+	if (ctrl->ctrl.queue_count < 2)
 		return 0;
 
 	dev_info(ctrl->ctrl.device,
 		"creating %d I/O queues.\n", nr_io_queues);
 
-	for (i = 1; i < ctrl->queue_count; i++) {
+	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
 		ret = nvme_rdma_init_queue(ctrl, i,
 					   ctrl->ctrl.opts->queue_size);
 		if (ret) {
@@ -705,7 +704,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 
 	++ctrl->ctrl.nr_reconnects;
 
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		nvme_rdma_free_io_queues(ctrl);
 
 		ret = blk_mq_reinit_tagset(&ctrl->tag_set);
@@ -735,7 +734,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 
 	nvme_start_keep_alive(&ctrl->ctrl);
 
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		ret = nvme_rdma_init_io_queues(ctrl);
 		if (ret)
 			goto requeue;
@@ -749,7 +748,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 	WARN_ON_ONCE(!changed);
 	ctrl->ctrl.nr_reconnects = 0;
 
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		nvme_queue_scan(&ctrl->ctrl);
 		nvme_queue_async_events(&ctrl->ctrl);
 	}
@@ -772,15 +771,15 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
 	nvme_stop_keep_alive(&ctrl->ctrl);
 
-	for (i = 0; i < ctrl->queue_count; i++)
+	for (i = 0; i < ctrl->ctrl.queue_count; i++)
 		clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
 
-	if (ctrl->queue_count > 1)
+	if (ctrl->ctrl.queue_count > 1)
 		nvme_stop_queues(&ctrl->ctrl);
 	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
 
 	/* We must take care of fastfail/requeue all our inflight requests */
-	if (ctrl->queue_count > 1)
+	if (ctrl->ctrl.queue_count > 1)
 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
 					nvme_cancel_request, &ctrl->ctrl);
 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
@@ -1624,7 +1623,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
 	cancel_work_sync(&ctrl->err_work);
 	cancel_delayed_work_sync(&ctrl->reconnect_work);
 
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		nvme_stop_queues(&ctrl->ctrl);
 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
 					nvme_cancel_request, &ctrl->ctrl);
@@ -1716,7 +1715,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
 		goto del_dead_ctrl;
 	}
 
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		ret = blk_mq_reinit_tagset(&ctrl->tag_set);
 		if (ret)
 			goto del_dead_ctrl;
@@ -1733,7 +1732,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
 	WARN_ON_ONCE(!changed);
 
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		nvme_start_queues(&ctrl->ctrl);
 		nvme_queue_scan(&ctrl->ctrl);
 		nvme_queue_async_events(&ctrl->ctrl);
@@ -1785,7 +1784,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
 	ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
 	ctrl->tag_set.driver_data = ctrl;
-	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
 
 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
@@ -1863,12 +1862,12 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 	INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
 
-	ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
+	ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
 	ctrl->ctrl.sqsize = opts->queue_size - 1;
 	ctrl->ctrl.kato = opts->kato;
 
 	ret = -ENOMEM;
-	ctrl->queues = kcalloc(ctrl->queue_count, sizeof(*ctrl->queues),
+	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
 				GFP_KERNEL);
 	if (!ctrl->queues)
 		goto out_uninit_ctrl;
@@ -1925,7 +1924,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 	list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
 	mutex_unlock(&nvme_rdma_ctrl_mutex);
 
-	if (opts->nr_io_queues) {
+	if (ctrl->ctrl.queue_count > 1) {
 		nvme_queue_scan(&ctrl->ctrl);
 		nvme_queue_async_events(&ctrl->ctrl);
 	}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 5f55c683b338..edf0e2ab19e3 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -44,7 +44,6 @@ struct nvme_loop_iod {
 
 struct nvme_loop_ctrl {
 	struct nvme_loop_queue	*queues;
-	u32			queue_count;
 
 	struct blk_mq_tag_set	admin_tag_set;
 
@@ -241,7 +240,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 	struct nvme_loop_ctrl *ctrl = data;
 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
 
-	BUG_ON(hctx_idx >= ctrl->queue_count);
+	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
 
 	hctx->driver_data = queue;
 	return 0;
@@ -307,7 +306,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
 {
 	int i;
 
-	for (i = 1; i < ctrl->queue_count; i++)
+	for (i = 1; i < ctrl->ctrl.queue_count; i++)
 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
 }
 
@@ -330,7 +329,7 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
 		if (ret)
 			goto out_destroy_queues;
 
-		ctrl->queue_count++;
+		ctrl->ctrl.queue_count++;
 	}
 
 	return 0;
@@ -344,7 +343,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
 {
 	int i, ret;
 
-	for (i = 1; i < ctrl->queue_count; i++) {
+	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
 		if (ret)
 			return ret;
@@ -372,7 +371,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
 	if (error)
 		return error;
-	ctrl->queue_count = 1;
+	ctrl->ctrl.queue_count = 1;
 
 	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
 	if (error)
@@ -426,7 +425,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
 {
 	nvme_stop_keep_alive(&ctrl->ctrl);
 
-	if (ctrl->queue_count > 1) {
+	if (ctrl->ctrl.queue_count > 1) {
 		nvme_stop_queues(&ctrl->ctrl);
 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
 					nvme_cancel_request, &ctrl->ctrl);
@@ -559,7 +558,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
 	ctrl->tag_set.driver_data = ctrl;
-	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
 	ctrl->ctrl.tagset = &ctrl->tag_set;
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl
  2017-06-29 11:09 [PATCH 0/7] Few patches extracted from the centralization set Sagi Grimberg
  2017-06-29 11:09 ` [PATCH 1/7] nvme: move queue_count to the nvme_ctrl Sagi Grimberg
@ 2017-06-29 11:09 ` Sagi Grimberg
  2017-06-29 11:18   ` Johannes Thumshirn
                     ` (2 more replies)
  2017-06-29 11:09 ` [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue Sagi Grimberg
                   ` (5 subsequent siblings)
  7 siblings, 3 replies; 33+ messages in thread
From: Sagi Grimberg @ 2017-06-29 11:09 UTC (permalink / raw)


instead of keeping it in every single driver
controller context.

Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
---
 drivers/nvme/host/fc.c     |  8 +++-----
 drivers/nvme/host/nvme.h   |  1 +
 drivers/nvme/host/pci.c    | 18 ++++++++----------
 drivers/nvme/host/rdma.c   | 10 +++++-----
 drivers/nvme/target/loop.c |  7 +++----
 5 files changed, 20 insertions(+), 24 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7eb006427caf..2f990d979037 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -152,8 +152,6 @@ struct nvme_fc_ctrl {
 
 	u64			association_id;
 
-	u64			cap;
-
 	struct list_head	ctrl_list;	/* rport->ctrl_list */
 
 	struct blk_mq_tag_set	admin_tag_set;
@@ -2328,7 +2326,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	 * prior connection values
 	 */
 
-	ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+	ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
 	if (ret) {
 		dev_err(ctrl->ctrl.device,
 			"prop_get NVME_REG_CAP failed\n");
@@ -2336,9 +2334,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	}
 
 	ctrl->ctrl.sqsize =
-		min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
 
-	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
 	if (ret)
 		goto out_disconnect_admin_queue;
 
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 6c51d92b7fab..e0b83311d5de 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -144,6 +144,7 @@ struct nvme_ctrl {
 	u32 ctrl_config;
 	u32 queue_count;
 
+	u64 cap;
 	u32 page_size;
 	u32 max_hw_sectors;
 	u16 oncs;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0c83bfaa63e6..559c5df21c2a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1144,8 +1144,7 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 	if (shutdown)
 		nvme_shutdown_ctrl(&dev->ctrl);
 	else
-		nvme_disable_ctrl(&dev->ctrl, lo_hi_readq(
-						dev->bar + NVME_REG_CAP));
+		nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
 
 	spin_lock_irq(&nvmeq->q_lock);
 	nvme_process_cq(nvmeq);
@@ -1388,7 +1387,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 {
 	int result;
 	u32 aqa;
-	u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
 	struct nvme_queue *nvmeq;
 
 	result = nvme_remap_bar(dev, db_bar_size(dev, 0));
@@ -1396,13 +1394,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 		return result;
 
 	dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
-						NVME_CAP_NSSRC(cap) : 0;
+				NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
 
 	if (dev->subsystem &&
 	    (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
 		writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
 
-	result = nvme_disable_ctrl(&dev->ctrl, cap);
+	result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
 	if (result < 0)
 		return result;
 
@@ -1421,7 +1419,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 	lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
 	lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
 
-	result = nvme_enable_ctrl(&dev->ctrl, cap);
+	result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap);
 	if (result)
 		return result;
 
@@ -1865,7 +1863,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
 
 static int nvme_pci_enable(struct nvme_dev *dev)
 {
-	u64 cap;
 	int result = -ENOMEM;
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
@@ -1892,10 +1889,11 @@ static int nvme_pci_enable(struct nvme_dev *dev)
 	if (result < 0)
 		return result;
 
-	cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
+	dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
 
-	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
-	dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
+	dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
+				NVME_Q_DEPTH);
+	dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
 	dev->dbs = dev->bar + 4096;
 
 	/*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c4bacad3fe23..2ab0cdb4d881 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -118,7 +118,6 @@ struct nvme_rdma_ctrl {
 	struct blk_mq_tag_set	admin_tag_set;
 	struct nvme_rdma_device	*device;
 
-	u64			cap;
 	u32			max_fr_pages;
 
 	struct sockaddr_storage addr;
@@ -728,7 +727,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 
 	set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
 
-	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
 	if (ret)
 		goto requeue;
 
@@ -1573,7 +1572,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
 
 	set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
 
-	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP,
+			&ctrl->ctrl.cap);
 	if (error) {
 		dev_err(ctrl->ctrl.device,
 			"prop_get NVME_REG_CAP failed\n");
@@ -1581,9 +1581,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
 	}
 
 	ctrl->ctrl.sqsize =
-		min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
+		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
 
-	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
 	if (error)
 		goto out_cleanup_queue;
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index edf0e2ab19e3..568ed8625696 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -48,7 +48,6 @@ struct nvme_loop_ctrl {
 	struct blk_mq_tag_set	admin_tag_set;
 
 	struct list_head	list;
-	u64			cap;
 	struct blk_mq_tag_set	tag_set;
 	struct nvme_loop_iod	async_event_iod;
 	struct nvme_ctrl	ctrl;
@@ -387,7 +386,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 	if (error)
 		goto out_cleanup_queue;
 
-	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
 	if (error) {
 		dev_err(ctrl->ctrl.device,
 			"prop_get NVME_REG_CAP failed\n");
@@ -395,9 +394,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 	}
 
 	ctrl->ctrl.sqsize =
-		min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
+		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
 
-	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
 	if (error)
 		goto out_cleanup_queue;
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue
  2017-06-29 11:09 [PATCH 0/7] Few patches extracted from the centralization set Sagi Grimberg
  2017-06-29 11:09 ` [PATCH 1/7] nvme: move queue_count to the nvme_ctrl Sagi Grimberg
  2017-06-29 11:09 ` [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl Sagi Grimberg
@ 2017-06-29 11:09 ` Sagi Grimberg
  2017-06-29 11:21   ` Johannes Thumshirn
                     ` (3 more replies)
  2017-06-29 11:09 ` [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues Sagi Grimberg
                   ` (4 subsequent siblings)
  7 siblings, 4 replies; 33+ messages in thread
From: Sagi Grimberg @ 2017-06-29 11:09 UTC (permalink / raw)


we are going to need the name for the core routine...

Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
---
 drivers/nvme/host/pci.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 559c5df21c2a..b995bf0e0e75 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1383,7 +1383,7 @@ static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
 	return 0;
 }
 
-static int nvme_configure_admin_queue(struct nvme_dev *dev)
+static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
 {
 	int result;
 	u32 aqa;
@@ -2090,7 +2090,7 @@ static void nvme_reset_work(struct work_struct *work)
 	if (result)
 		goto out;
 
-	result = nvme_configure_admin_queue(dev);
+	result = nvme_pci_configure_admin_queue(dev);
 	if (result)
 		goto out;
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues
  2017-06-29 11:09 [PATCH 0/7] Few patches extracted from the centralization set Sagi Grimberg
                   ` (2 preceding siblings ...)
  2017-06-29 11:09 ` [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue Sagi Grimberg
@ 2017-06-29 11:09 ` Sagi Grimberg
  2017-06-29 11:29   ` Johannes Thumshirn
                     ` (3 more replies)
  2017-06-29 11:09 ` [PATCH 5/7] nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting Sagi Grimberg
                   ` (3 subsequent siblings)
  7 siblings, 4 replies; 33+ messages in thread
From: Sagi Grimberg @ 2017-06-29 11:09 UTC (permalink / raw)


Its what the user passed, so its probably a better
idea to keep it intact. Also, limit the number of
I/O queues to max online cpus as blk-mq doesnt know
how to handle more anyhow.

Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
---
 drivers/nvme/host/fc.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 2f990d979037..5ee821d9cb75 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2175,17 +2175,19 @@ static int
 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
 {
 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+	unsigned int nr_io_queues;
 	int ret;
 
-	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
 	if (ret) {
 		dev_info(ctrl->ctrl.device,
 			"set_queue_count failed: %d\n", ret);
 		return ret;
 	}
 
-	ctrl->ctrl.queue_count = opts->nr_io_queues + 1;
-	if (!opts->nr_io_queues)
+	ctrl->ctrl.queue_count = nr_io_queues + 1;
+	if (!nr_io_queues)
 		return 0;
 
 	nvme_fc_init_io_queues(ctrl);
@@ -2245,15 +2247,18 @@ static int
 nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
 {
 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+	unsigned int nr_io_queues;
 	int ret;
 
-	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
 	if (ret) {
 		dev_info(ctrl->ctrl.device,
 			"set_queue_count failed: %d\n", ret);
 		return ret;
 	}
 
+	ctrl->ctrl.queue_count = nr_io_queues + 1;
 	/* check for io queues existing */
 	if (ctrl->ctrl.queue_count == 1)
 		return 0;
@@ -2702,7 +2707,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 	ctrl->ctrl.queue_count = min_t(unsigned int,
 				opts->nr_io_queues,
 				lport->ops->max_hw_queues);
-	opts->nr_io_queues = ctrl->ctrl.queue_count;	/* so opts has valid value */
 	ctrl->ctrl.queue_count++;	/* +1 for admin queue */
 
 	ctrl->ctrl.sqsize = opts->queue_size - 1;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 5/7] nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting
  2017-06-29 11:09 [PATCH 0/7] Few patches extracted from the centralization set Sagi Grimberg
                   ` (3 preceding siblings ...)
  2017-06-29 11:09 ` [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues Sagi Grimberg
@ 2017-06-29 11:09 ` Sagi Grimberg
  2017-06-29 11:31   ` Johannes Thumshirn
  2017-06-29 14:53   ` Christoph Hellwig
  2017-06-29 11:09 ` [PATCH 6/7] nvme-loop: " Sagi Grimberg
                   ` (2 subsequent siblings)
  7 siblings, 2 replies; 33+ messages in thread
From: Sagi Grimberg @ 2017-06-29 11:09 UTC (permalink / raw)


We might have more/less queues once we reconnect/reset. For
example due to cpu going online/offline or controller constraints.
---
 drivers/nvme/host/rdma.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2ab0cdb4d881..cfb22531fc16 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -741,6 +741,9 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 		ret = nvme_rdma_connect_io_queues(ctrl);
 		if (ret)
 			goto requeue;
+
+		blk_mq_update_nr_hw_queues(&ctrl->tag_set,
+				ctrl->ctrl.queue_count - 1);
 	}
 
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -1727,6 +1730,9 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
 		ret = nvme_rdma_connect_io_queues(ctrl);
 		if (ret)
 			goto del_dead_ctrl;
+
+		blk_mq_update_nr_hw_queues(&ctrl->tag_set,
+				ctrl->ctrl.queue_count - 1);
 	}
 
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 6/7] nvme-loop: update tagset nr_hw_queues after reconnecting/resetting
  2017-06-29 11:09 [PATCH 0/7] Few patches extracted from the centralization set Sagi Grimberg
                   ` (4 preceding siblings ...)
  2017-06-29 11:09 ` [PATCH 5/7] nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting Sagi Grimberg
@ 2017-06-29 11:09 ` Sagi Grimberg
  2017-06-29 11:31   ` Johannes Thumshirn
  2017-06-29 14:53   ` Christoph Hellwig
  2017-06-29 11:09 ` [PATCH 7/7] nvme-fc: update tagset nr_hw_queues after queues reinit Sagi Grimberg
  2017-06-29 16:34 ` [PATCH 0/7] Few patches extracted from the centralization set Keith Busch
  7 siblings, 2 replies; 33+ messages in thread
From: Sagi Grimberg @ 2017-06-29 11:09 UTC (permalink / raw)


We might have more/less queues once we reconnect/reset. For
example due to cpu going online/offline

Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
---
 drivers/nvme/target/loop.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 568ed8625696..3d51341e62ee 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -508,6 +508,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 	if (ret)
 		goto out_destroy_io;
 
+	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
+			ctrl->ctrl.queue_count - 1);
+
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
 	WARN_ON_ONCE(!changed);
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 7/7] nvme-fc: update tagset nr_hw_queues after queues reinit
  2017-06-29 11:09 [PATCH 0/7] Few patches extracted from the centralization set Sagi Grimberg
                   ` (5 preceding siblings ...)
  2017-06-29 11:09 ` [PATCH 6/7] nvme-loop: " Sagi Grimberg
@ 2017-06-29 11:09 ` Sagi Grimberg
  2017-06-29 11:31   ` Johannes Thumshirn
  2017-06-29 14:53   ` Christoph Hellwig
  2017-06-29 16:34 ` [PATCH 0/7] Few patches extracted from the centralization set Keith Busch
  7 siblings, 2 replies; 33+ messages in thread
From: Sagi Grimberg @ 2017-06-29 11:09 UTC (permalink / raw)


We might have more/less queues once we reconnect/reset. For
example due to cpu going online/offline or controller constraints.

Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
---
 drivers/nvme/host/fc.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5ee821d9cb75..ffe0589969bd 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2277,6 +2277,8 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
 	if (ret)
 		goto out_delete_hw_queues;
 
+	blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+
 	return 0;
 
 out_delete_hw_queues:
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 1/7] nvme: move queue_count to the nvme_ctrl
  2017-06-29 11:09 ` [PATCH 1/7] nvme: move queue_count to the nvme_ctrl Sagi Grimberg
@ 2017-06-29 11:17   ` Johannes Thumshirn
  2017-06-29 13:33   ` Max Gurtovoy
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 33+ messages in thread
From: Johannes Thumshirn @ 2017-06-29 11:17 UTC (permalink / raw)



Looks good,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl
  2017-06-29 11:09 ` [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl Sagi Grimberg
@ 2017-06-29 11:18   ` Johannes Thumshirn
  2017-06-29 13:36   ` Max Gurtovoy
  2017-06-29 14:52   ` Christoph Hellwig
  2 siblings, 0 replies; 33+ messages in thread
From: Johannes Thumshirn @ 2017-06-29 11:18 UTC (permalink / raw)


Looks good,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue
  2017-06-29 11:09 ` [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue Sagi Grimberg
@ 2017-06-29 11:21   ` Johannes Thumshirn
  2017-06-29 13:36   ` Max Gurtovoy
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 33+ messages in thread
From: Johannes Thumshirn @ 2017-06-29 11:21 UTC (permalink / raw)


Looks good,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues
  2017-06-29 11:09 ` [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues Sagi Grimberg
@ 2017-06-29 11:29   ` Johannes Thumshirn
  2017-06-29 13:43   ` Max Gurtovoy
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 33+ messages in thread
From: Johannes Thumshirn @ 2017-06-29 11:29 UTC (permalink / raw)


Looks good,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 5/7] nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting
  2017-06-29 11:09 ` [PATCH 5/7] nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting Sagi Grimberg
@ 2017-06-29 11:31   ` Johannes Thumshirn
  2017-06-29 14:53   ` Christoph Hellwig
  1 sibling, 0 replies; 33+ messages in thread
From: Johannes Thumshirn @ 2017-06-29 11:31 UTC (permalink / raw)


Looks good,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 6/7] nvme-loop: update tagset nr_hw_queues after reconnecting/resetting
  2017-06-29 11:09 ` [PATCH 6/7] nvme-loop: " Sagi Grimberg
@ 2017-06-29 11:31   ` Johannes Thumshirn
  2017-06-29 14:53   ` Christoph Hellwig
  1 sibling, 0 replies; 33+ messages in thread
From: Johannes Thumshirn @ 2017-06-29 11:31 UTC (permalink / raw)


Looks good,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 7/7] nvme-fc: update tagset nr_hw_queues after queues reinit
  2017-06-29 11:09 ` [PATCH 7/7] nvme-fc: update tagset nr_hw_queues after queues reinit Sagi Grimberg
@ 2017-06-29 11:31   ` Johannes Thumshirn
  2017-06-29 14:53   ` Christoph Hellwig
  1 sibling, 0 replies; 33+ messages in thread
From: Johannes Thumshirn @ 2017-06-29 11:31 UTC (permalink / raw)


Looks good,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 1/7] nvme: move queue_count to the nvme_ctrl
  2017-06-29 11:09 ` [PATCH 1/7] nvme: move queue_count to the nvme_ctrl Sagi Grimberg
  2017-06-29 11:17   ` Johannes Thumshirn
@ 2017-06-29 13:33   ` Max Gurtovoy
  2017-06-29 14:51   ` Christoph Hellwig
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 33+ messages in thread
From: Max Gurtovoy @ 2017-06-29 13:33 UTC (permalink / raw)


Looks good,

Reviewed-by: Max Gurtovoy <maxg at mellanox.com>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl
  2017-06-29 11:09 ` [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl Sagi Grimberg
  2017-06-29 11:18   ` Johannes Thumshirn
@ 2017-06-29 13:36   ` Max Gurtovoy
  2017-06-29 14:52   ` Christoph Hellwig
  2 siblings, 0 replies; 33+ messages in thread
From: Max Gurtovoy @ 2017-06-29 13:36 UTC (permalink / raw)


Looks good,

Reviewed-by: Max Gurtovoy <maxg at mellanox.com>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue
  2017-06-29 11:09 ` [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue Sagi Grimberg
  2017-06-29 11:21   ` Johannes Thumshirn
@ 2017-06-29 13:36   ` Max Gurtovoy
  2017-06-29 14:52   ` Christoph Hellwig
  2017-07-05 16:23   ` Keith Busch
  3 siblings, 0 replies; 33+ messages in thread
From: Max Gurtovoy @ 2017-06-29 13:36 UTC (permalink / raw)


Looks good,

Reviewed-by: Max Gurtovoy <maxg at mellanox.com>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues
  2017-06-29 11:09 ` [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues Sagi Grimberg
  2017-06-29 11:29   ` Johannes Thumshirn
@ 2017-06-29 13:43   ` Max Gurtovoy
  2017-06-29 14:52   ` Christoph Hellwig
  2017-06-29 15:11   ` James Smart
  3 siblings, 0 replies; 33+ messages in thread
From: Max Gurtovoy @ 2017-06-29 13:43 UTC (permalink / raw)


Looks good,

Reviewed-by: Max Gurtovoy <maxg at mellanox.com>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 1/7] nvme: move queue_count to the nvme_ctrl
  2017-06-29 11:09 ` [PATCH 1/7] nvme: move queue_count to the nvme_ctrl Sagi Grimberg
  2017-06-29 11:17   ` Johannes Thumshirn
  2017-06-29 13:33   ` Max Gurtovoy
@ 2017-06-29 14:51   ` Christoph Hellwig
  2017-06-29 15:03   ` James Smart
  2017-07-05 16:23   ` Keith Busch
  4 siblings, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2017-06-29 14:51 UTC (permalink / raw)


On Thu, Jun 29, 2017@02:09:06PM +0300, Sagi Grimberg wrote:
> all drivers use it exactly the same, move it
> to nvme_ctrl. Looking further ahead, nvme core
> will really maintain it.

All all transports use the queue_count in exactly the same, so move it to
the generic struct nvme_ctrl. In the future it will also be maintained by
the core.

Otherwise this looks fine:

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl
  2017-06-29 11:09 ` [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl Sagi Grimberg
  2017-06-29 11:18   ` Johannes Thumshirn
  2017-06-29 13:36   ` Max Gurtovoy
@ 2017-06-29 14:52   ` Christoph Hellwig
  2 siblings, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2017-06-29 14:52 UTC (permalink / raw)


changelog could use some tweaks, otherwise looks fine:

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue
  2017-06-29 11:09 ` [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue Sagi Grimberg
  2017-06-29 11:21   ` Johannes Thumshirn
  2017-06-29 13:36   ` Max Gurtovoy
@ 2017-06-29 14:52   ` Christoph Hellwig
  2017-07-05 16:23   ` Keith Busch
  3 siblings, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2017-06-29 14:52 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues
  2017-06-29 11:09 ` [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues Sagi Grimberg
  2017-06-29 11:29   ` Johannes Thumshirn
  2017-06-29 13:43   ` Max Gurtovoy
@ 2017-06-29 14:52   ` Christoph Hellwig
  2017-06-29 15:11   ` James Smart
  3 siblings, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2017-06-29 14:52 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 5/7] nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting
  2017-06-29 11:09 ` [PATCH 5/7] nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting Sagi Grimberg
  2017-06-29 11:31   ` Johannes Thumshirn
@ 2017-06-29 14:53   ` Christoph Hellwig
  1 sibling, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2017-06-29 14:53 UTC (permalink / raw)


On Thu, Jun 29, 2017@02:09:10PM +0300, Sagi Grimberg wrote:
> We might have more/less queues once we reconnect/reset. For
> example due to cpu going online/offline or controller constraints.

Looks good,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 6/7] nvme-loop: update tagset nr_hw_queues after reconnecting/resetting
  2017-06-29 11:09 ` [PATCH 6/7] nvme-loop: " Sagi Grimberg
  2017-06-29 11:31   ` Johannes Thumshirn
@ 2017-06-29 14:53   ` Christoph Hellwig
  1 sibling, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2017-06-29 14:53 UTC (permalink / raw)


Looks good,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 7/7] nvme-fc: update tagset nr_hw_queues after queues reinit
  2017-06-29 11:09 ` [PATCH 7/7] nvme-fc: update tagset nr_hw_queues after queues reinit Sagi Grimberg
  2017-06-29 11:31   ` Johannes Thumshirn
@ 2017-06-29 14:53   ` Christoph Hellwig
  1 sibling, 0 replies; 33+ messages in thread
From: Christoph Hellwig @ 2017-06-29 14:53 UTC (permalink / raw)


Looks good,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 1/7] nvme: move queue_count to the nvme_ctrl
  2017-06-29 11:09 ` [PATCH 1/7] nvme: move queue_count to the nvme_ctrl Sagi Grimberg
                     ` (2 preceding siblings ...)
  2017-06-29 14:51   ` Christoph Hellwig
@ 2017-06-29 15:03   ` James Smart
  2017-07-05 16:23   ` Keith Busch
  4 siblings, 0 replies; 33+ messages in thread
From: James Smart @ 2017-06-29 15:03 UTC (permalink / raw)


On 6/29/2017 4:09 AM, Sagi Grimberg wrote:
> all drivers use it exactly the same, move it
> to nvme_ctrl. Looking further ahead, nvme core
> will really maintain it.
>
> Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
> ---
>   drivers/nvme/host/fc.c     | 35 +++++++++++++++++------------------
>   drivers/nvme/host/nvme.h   |  1 +
>   drivers/nvme/host/pci.c    | 15 +++++++--------
>   drivers/nvme/host/rdma.c   | 39 +++++++++++++++++++--------------------
>   drivers/nvme/target/loop.c | 15 +++++++--------
>   5 files changed, 51 insertions(+), 54 deletions(-)
>
> diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
>
Looks fine

Reviewed-By: James Smart <james.smart at broadcom.com>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues
  2017-06-29 11:09 ` [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues Sagi Grimberg
                     ` (2 preceding siblings ...)
  2017-06-29 14:52   ` Christoph Hellwig
@ 2017-06-29 15:11   ` James Smart
  2017-07-02  8:10     ` Sagi Grimberg
  3 siblings, 1 reply; 33+ messages in thread
From: James Smart @ 2017-06-29 15:11 UTC (permalink / raw)


On 6/29/2017 4:09 AM, Sagi Grimberg wrote:
> Its what the user passed, so its probably a better
> idea to keep it intact. Also, limit the number of
> I/O queues to max online cpus as blk-mq doesnt know
> how to handle more anyhow.
>
> Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
> ---
>   drivers/nvme/host/fc.c | 14 +++++++++-----
>   1 file changed, 9 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
> index 2f990d979037..5ee821d9cb75 100644
> --- a/drivers/nvme/host/fc.c
> +++ b/drivers/nvme/host/fc.c
> @@ -2175,17 +2175,19 @@ static int
>   nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
>   {
>   	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
> +	unsigned int nr_io_queues;
>   	int ret;
>   
> -	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
> +	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());

I had assumed the check vs online cpu count was done in fabrics.c - but 
as we're talking about a later reconnect, the cpu count may have 
changed, so this looks good.

however, the snippet obsoletes the "min_t(unsigned int, 
opts->nr_io_queues, lport->ops->max_hw_queues)" that was done in the 
next snippet.

> @@ -2702,7 +2707,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
>   	ctrl->ctrl.queue_count = min_t(unsigned int,
>   				opts->nr_io_queues,
>   				lport->ops->max_hw_queues);
> -	opts->nr_io_queues = ctrl->ctrl.queue_count;	/* so opts has valid value */
>   	ctrl->ctrl.queue_count++;	/* +1 for admin queue */
>   
>   	ctrl->ctrl.sqsize = opts->queue_size - 1;

The simple fix is to add the min statement right after the added min for 
online_cpu's - to both the create_io_queues and the reinit_io_queues.

The longer fix is to add some multiplexing code to go from blk-mq hw 
queue to fc hw queue.

-- james

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 0/7] Few patches extracted from the centralization set
  2017-06-29 11:09 [PATCH 0/7] Few patches extracted from the centralization set Sagi Grimberg
                   ` (6 preceding siblings ...)
  2017-06-29 11:09 ` [PATCH 7/7] nvme-fc: update tagset nr_hw_queues after queues reinit Sagi Grimberg
@ 2017-06-29 16:34 ` Keith Busch
  7 siblings, 0 replies; 33+ messages in thread
From: Keith Busch @ 2017-06-29 16:34 UTC (permalink / raw)


On Thu, Jun 29, 2017@02:09:05PM +0300, Sagi Grimberg wrote:
> Feedback was that they made sense on their own.
> Plus also few patches that I came up with.
> 
> The repetition of patches seem to highlight the fact that we
> will benefit centralization of shared logic.
> 
> Sagi Grimberg (7):
>   nvme: move queue_count to the nvme_ctrl
>   nvme: move ctrl cap to struct nvme_ctrl
>   nvme-pci: rename to nvme_pci_configure_admin_queue
>   nvme-fc: don't override opts->nr_io_queues
>   nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting
>   nvme-loop: update tagset nr_hw_queues after reconnecting/resetting
>   nvme-fc: update tagset nr_hw_queues after queues reinit

All of these look fine to me.

Reviewed-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues
  2017-06-29 15:11   ` James Smart
@ 2017-07-02  8:10     ` Sagi Grimberg
  2017-07-03 17:46       ` James Smart
  0 siblings, 1 reply; 33+ messages in thread
From: Sagi Grimberg @ 2017-07-02  8:10 UTC (permalink / raw)




On 29/06/17 18:11, James Smart wrote:
> On 6/29/2017 4:09 AM, Sagi Grimberg wrote:
>> Its what the user passed, so its probably a better
>> idea to keep it intact. Also, limit the number of
>> I/O queues to max online cpus as blk-mq doesnt know
>> how to handle more anyhow.
>>
>> Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
>> ---
>>   drivers/nvme/host/fc.c | 14 +++++++++-----
>>   1 file changed, 9 insertions(+), 5 deletions(-)
>>
>> diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
>> index 2f990d979037..5ee821d9cb75 100644
>> --- a/drivers/nvme/host/fc.c
>> +++ b/drivers/nvme/host/fc.c
>> @@ -2175,17 +2175,19 @@ static int
>>   nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
>>   {
>>       struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
>> +    unsigned int nr_io_queues;
>>       int ret;
>> -    ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
>> +    nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
> 
> I had assumed the check vs online cpu count was done in fabrics.c - but 
> as we're talking about a later reconnect, the cpu count may have 
> changed, so this looks good.
> 
> however, the snippet obsoletes the "min_t(unsigned int, 
> opts->nr_io_queues, lport->ops->max_hw_queues)" that was done in the 
> next snippet.

Not unless we min against it again (which we could, what's your
preference?)

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues
  2017-07-02  8:10     ` Sagi Grimberg
@ 2017-07-03 17:46       ` James Smart
  0 siblings, 0 replies; 33+ messages in thread
From: James Smart @ 2017-07-03 17:46 UTC (permalink / raw)


On 7/2/2017 1:10 AM, Sagi Grimberg wrote:
>
>
> On 29/06/17 18:11, James Smart wrote:
>> On 6/29/2017 4:09 AM, Sagi Grimberg wrote:
>>> Its what the user passed, so its probably a better
>>> idea to keep it intact. Also, limit the number of
>>> I/O queues to max online cpus as blk-mq doesnt know
>>> how to handle more anyhow.
>>>
>>> Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
>>> ---
>>>   drivers/nvme/host/fc.c | 14 +++++++++-----
>>>   1 file changed, 9 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
>>> index 2f990d979037..5ee821d9cb75 100644
>>> --- a/drivers/nvme/host/fc.c
>>> +++ b/drivers/nvme/host/fc.c
>>> @@ -2175,17 +2175,19 @@ static int
>>>   nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
>>>   {
>>>       struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
>>> +    unsigned int nr_io_queues;
>>>       int ret;
>>> -    ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
>>> +    nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
>>
>> I had assumed the check vs online cpu count was done in fabrics.c - 
>> but as we're talking about a later reconnect, the cpu count may have 
>> changed, so this looks good.
>>
>> however, the snippet obsoletes the "min_t(unsigned int, 
>> opts->nr_io_queues, lport->ops->max_hw_queues)" that was done in the 
>> next snippet.
>
> Not unless we min against it again (which we could, what's your
> preference?)

please add a line to min it against it again.

-- james

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 1/7] nvme: move queue_count to the nvme_ctrl
  2017-06-29 11:09 ` [PATCH 1/7] nvme: move queue_count to the nvme_ctrl Sagi Grimberg
                     ` (3 preceding siblings ...)
  2017-06-29 15:03   ` James Smart
@ 2017-07-05 16:23   ` Keith Busch
  4 siblings, 0 replies; 33+ messages in thread
From: Keith Busch @ 2017-07-05 16:23 UTC (permalink / raw)


Patch looks good.

Reviewed-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue
  2017-06-29 11:09 ` [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue Sagi Grimberg
                     ` (2 preceding siblings ...)
  2017-06-29 14:52   ` Christoph Hellwig
@ 2017-07-05 16:23   ` Keith Busch
  3 siblings, 0 replies; 33+ messages in thread
From: Keith Busch @ 2017-07-05 16:23 UTC (permalink / raw)


Looks fine.

Reviewed-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 33+ messages in thread

end of thread, other threads:[~2017-07-05 16:23 UTC | newest]

Thread overview: 33+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-06-29 11:09 [PATCH 0/7] Few patches extracted from the centralization set Sagi Grimberg
2017-06-29 11:09 ` [PATCH 1/7] nvme: move queue_count to the nvme_ctrl Sagi Grimberg
2017-06-29 11:17   ` Johannes Thumshirn
2017-06-29 13:33   ` Max Gurtovoy
2017-06-29 14:51   ` Christoph Hellwig
2017-06-29 15:03   ` James Smart
2017-07-05 16:23   ` Keith Busch
2017-06-29 11:09 ` [PATCH 2/7] nvme: move ctrl cap to struct nvme_ctrl Sagi Grimberg
2017-06-29 11:18   ` Johannes Thumshirn
2017-06-29 13:36   ` Max Gurtovoy
2017-06-29 14:52   ` Christoph Hellwig
2017-06-29 11:09 ` [PATCH 3/7] nvme-pci: rename to nvme_pci_configure_admin_queue Sagi Grimberg
2017-06-29 11:21   ` Johannes Thumshirn
2017-06-29 13:36   ` Max Gurtovoy
2017-06-29 14:52   ` Christoph Hellwig
2017-07-05 16:23   ` Keith Busch
2017-06-29 11:09 ` [PATCH 4/7] nvme-fc: don't override opts->nr_io_queues Sagi Grimberg
2017-06-29 11:29   ` Johannes Thumshirn
2017-06-29 13:43   ` Max Gurtovoy
2017-06-29 14:52   ` Christoph Hellwig
2017-06-29 15:11   ` James Smart
2017-07-02  8:10     ` Sagi Grimberg
2017-07-03 17:46       ` James Smart
2017-06-29 11:09 ` [PATCH 5/7] nvme-rdma: update tagset nr_hw_queues after reconnecting/resetting Sagi Grimberg
2017-06-29 11:31   ` Johannes Thumshirn
2017-06-29 14:53   ` Christoph Hellwig
2017-06-29 11:09 ` [PATCH 6/7] nvme-loop: " Sagi Grimberg
2017-06-29 11:31   ` Johannes Thumshirn
2017-06-29 14:53   ` Christoph Hellwig
2017-06-29 11:09 ` [PATCH 7/7] nvme-fc: update tagset nr_hw_queues after queues reinit Sagi Grimberg
2017-06-29 11:31   ` Johannes Thumshirn
2017-06-29 14:53   ` Christoph Hellwig
2017-06-29 16:34 ` [PATCH 0/7] Few patches extracted from the centralization set Keith Busch

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.