All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes
@ 2017-05-16  0:10 James Smart
  2017-05-16  0:10 ` [PATCH v2 01/10] nvme_fc: get rid of local reconnect_delay James Smart
                   ` (10 more replies)
  0 siblings, 11 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


This set contains outstanding fixes for the nvmf fc transport.
All but the last patch has been previously posted.

v2: Revise patch 08, for per_cpu_ptr, per review.

patches cut against nvme-4.12

James Smart (10):
  nvme_fc: get rid of local reconnect_delay
  nvme_fc: Support ctrl_loss_tmo
  nvme_fc: replace ioabort msleep loop with completion
  nvme_fc: revise comment on teardown
  nvme_fc: set logging level on resets/deletes
  nvmet_fc: Reduce work_q count
  nvmet_fc: Add queue create and delete callbacks in LLDD api
  nvme_fc: remove extra controller reference taken on reconnect
  nvme_fcloop: fix port deletes and callbacks
  nvme_fc: correct nvme status set on abort

 drivers/nvme/host/fc.c         | 160 +++++++++++++----------------
 drivers/nvme/target/fc.c       | 224 +++++++++++++++++++++++++++++++----------
 drivers/nvme/target/fcloop.c   |  66 ++++++------
 include/linux/nvme-fc-driver.h |  18 ++++
 4 files changed, 292 insertions(+), 176 deletions(-)

-- 
2.11.0

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 01/10] nvme_fc: get rid of local reconnect_delay
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16  7:32   ` Johannes Thumshirn
  2017-05-16 12:45   ` Christoph Hellwig
  2017-05-16  0:10 ` [PATCH v2 02/10] nvme_fc: Support ctrl_loss_tmo James Smart
                   ` (9 subsequent siblings)
  10 siblings, 2 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


Remove the local copy of reconnect_delay.
Use the value in the controller options directly.

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Hannes Reinecke <hare at suse.com>
---
 drivers/nvme/host/fc.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index dca7165fabcf..c3ab1043efbd 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -165,7 +165,6 @@ struct nvme_fc_ctrl {
 	struct work_struct	delete_work;
 	struct work_struct	reset_work;
 	struct delayed_work	connect_work;
-	int			reconnect_delay;
 	int			connect_attempts;
 
 	struct kref		ref;
@@ -2615,9 +2614,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
 
 		dev_warn(ctrl->ctrl.device,
 			"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
-			ctrl->cnum, ctrl->reconnect_delay);
+			ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
 		queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
-				ctrl->reconnect_delay * HZ);
+				ctrl->ctrl.opts->reconnect_delay * HZ);
 	} else
 		dev_info(ctrl->ctrl.device,
 			"NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
@@ -2695,9 +2694,9 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
 
 		dev_warn(ctrl->ctrl.device,
 			"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
-			ctrl->cnum, ctrl->reconnect_delay);
+			ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
 		queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
-				ctrl->reconnect_delay * HZ);
+				ctrl->ctrl.opts->reconnect_delay * HZ);
 	} else
 		dev_info(ctrl->ctrl.device,
 			"NVME-FC{%d}: controller reconnect complete\n",
@@ -2755,7 +2754,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 	INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
 	INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
 	INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
-	ctrl->reconnect_delay = opts->reconnect_delay;
 	spin_lock_init(&ctrl->lock);
 
 	/* io queue count */
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 02/10] nvme_fc: Support ctrl_loss_tmo
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
  2017-05-16  0:10 ` [PATCH v2 01/10] nvme_fc: get rid of local reconnect_delay James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16  7:33   ` Johannes Thumshirn
  2017-05-16 12:46   ` Christoph Hellwig
  2017-05-16  0:10 ` [PATCH v2 03/10] nvme_fc: replace ioabort msleep loop with completion James Smart
                   ` (8 subsequent siblings)
  10 siblings, 2 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


Sync with Sagi's recent addition of ctrl_loss_tmo in the core fabrics
layer.

Remove local connect limits and connect_attempts variable.
Use fabrics new nr_connects variable and use of nvmf_should_reconnect()
Refactor duplicate reconnect failure code.

Addresses review comment by Sagi on controller reset support:
http://lists.infradead.org/pipermail/linux-nvme/2017-April/009261.html

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Hannes Reinecke <hare at suse.com>
---
 drivers/nvme/host/fc.c | 116 +++++++++++++++++++++----------------------------
 1 file changed, 49 insertions(+), 67 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index c3ab1043efbd..a0f05d5e966c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -45,8 +45,6 @@ enum nvme_fc_queue_flags {
 
 #define NVMEFC_QUEUE_DELAY	3		/* ms units */
 
-#define NVME_FC_MAX_CONNECT_ATTEMPTS	1
-
 struct nvme_fc_queue {
 	struct nvme_fc_ctrl	*ctrl;
 	struct device		*dev;
@@ -165,7 +163,6 @@ struct nvme_fc_ctrl {
 	struct work_struct	delete_work;
 	struct work_struct	reset_work;
 	struct delayed_work	connect_work;
-	int			connect_attempts;
 
 	struct kref		ref;
 	u32			flags;
@@ -2305,7 +2302,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	int ret;
 	bool changed;
 
-	ctrl->connect_attempts++;
+	++ctrl->ctrl.opts->nr_reconnects;
 
 	/*
 	 * Create the admin queue
@@ -2402,7 +2399,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
 	WARN_ON_ONCE(!changed);
 
-	ctrl->connect_attempts = 0;
+	ctrl->ctrl.opts->nr_reconnects = 0;
 
 	kref_get(&ctrl->ctrl.kref);
 
@@ -2545,16 +2542,22 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
 	nvme_put_ctrl(&ctrl->ctrl);
 }
 
-static int
-__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
+static bool
+__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
 {
 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
-		return -EBUSY;
+		return true;
 
 	if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
-		return -EBUSY;
+		return true;
 
-	return 0;
+	return false;
+}
+
+static int
+__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+	return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
 }
 
 /*
@@ -2580,6 +2583,35 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
 }
 
 static void
+nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
+{
+	/* If we are resetting/deleting then do nothing */
+	if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
+		WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
+			ctrl->ctrl.state == NVME_CTRL_LIVE);
+		return;
+	}
+
+	dev_warn(ctrl->ctrl.device,
+		"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
+		ctrl->cnum, status);
+
+	if (nvmf_should_reconnect(&ctrl->ctrl)) {
+		dev_info(ctrl->ctrl.device,
+			"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
+			ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
+		queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
+				ctrl->ctrl.opts->reconnect_delay * HZ);
+	} else {
+		dev_info(ctrl->ctrl.device,
+				"NVME-FC{%d}: Max reconnect attempts (%d) "
+				"reached. Removing controller\n",
+				ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
+		WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
+	}
+}
+
+static void
 nvme_fc_reset_ctrl_work(struct work_struct *work)
 {
 	struct nvme_fc_ctrl *ctrl =
@@ -2590,34 +2622,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
 	nvme_fc_delete_association(ctrl);
 
 	ret = nvme_fc_create_association(ctrl);
-	if (ret) {
-		dev_warn(ctrl->ctrl.device,
-			"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
-			ctrl->cnum, ret);
-		if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
-			dev_warn(ctrl->ctrl.device,
-				"NVME-FC{%d}: Max reconnect attempts (%d) "
-				"reached. Removing controller\n",
-				ctrl->cnum, ctrl->connect_attempts);
-
-			if (!nvme_change_ctrl_state(&ctrl->ctrl,
-				NVME_CTRL_DELETING)) {
-				dev_err(ctrl->ctrl.device,
-					"NVME-FC{%d}: failed to change state "
-					"to DELETING\n", ctrl->cnum);
-				return;
-			}
-
-			WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
-			return;
-		}
-
-		dev_warn(ctrl->ctrl.device,
-			"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
-			ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
-		queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
-				ctrl->ctrl.opts->reconnect_delay * HZ);
-	} else
+	if (ret)
+		nvme_fc_reconnect_or_delete(ctrl, ret);
+	else
 		dev_info(ctrl->ctrl.device,
 			"NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
 }
@@ -2670,34 +2677,9 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
 				struct nvme_fc_ctrl, connect_work);
 
 	ret = nvme_fc_create_association(ctrl);
-	if (ret) {
-		dev_warn(ctrl->ctrl.device,
-			"NVME-FC{%d}: Reconnect attempt failed (%d)\n",
-			ctrl->cnum, ret);
-		if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
-			dev_warn(ctrl->ctrl.device,
-				"NVME-FC{%d}: Max reconnect attempts (%d) "
-				"reached. Removing controller\n",
-				ctrl->cnum, ctrl->connect_attempts);
-
-			if (!nvme_change_ctrl_state(&ctrl->ctrl,
-				NVME_CTRL_DELETING)) {
-				dev_err(ctrl->ctrl.device,
-					"NVME-FC{%d}: failed to change state "
-					"to DELETING\n", ctrl->cnum);
-				return;
-			}
-
-			WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
-			return;
-		}
-
-		dev_warn(ctrl->ctrl.device,
-			"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
-			ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
-		queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
-				ctrl->ctrl.opts->reconnect_delay * HZ);
-	} else
+	if (ret)
+		nvme_fc_reconnect_or_delete(ctrl, ret);
+	else
 		dev_info(ctrl->ctrl.device,
 			"NVME-FC{%d}: controller reconnect complete\n",
 			ctrl->cnum);
@@ -2969,7 +2951,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
 static struct nvmf_transport_ops nvme_fc_transport = {
 	.name		= "fc",
 	.required_opts	= NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
-	.allowed_opts	= NVMF_OPT_RECONNECT_DELAY,
+	.allowed_opts	= NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
 	.create_ctrl	= nvme_fc_create_ctrl,
 };
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 03/10] nvme_fc: replace ioabort msleep loop with completion
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
  2017-05-16  0:10 ` [PATCH v2 01/10] nvme_fc: get rid of local reconnect_delay James Smart
  2017-05-16  0:10 ` [PATCH v2 02/10] nvme_fc: Support ctrl_loss_tmo James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16  7:33   ` Johannes Thumshirn
  2017-05-16 12:46   ` Christoph Hellwig
  2017-05-16  0:10 ` [PATCH v2 04/10] nvme_fc: revise comment on teardown James Smart
                   ` (7 subsequent siblings)
  10 siblings, 2 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


Per the recommendation by Sagi on:
http://lists.infradead.org/pipermail/linux-nvme/2017-April/009261.html

Wait for io aborts to complete wait converted from msleep look to
using a struct completion.

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Hannes Reinecke <hare at suse.com>
---
v2: removed unneeded inner braces
---
 drivers/nvme/host/fc.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index a0f05d5e966c..d2cf4e2e560b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -167,6 +167,7 @@ struct nvme_fc_ctrl {
 	struct kref		ref;
 	u32			flags;
 	u32			iocnt;
+	struct completion	ioaborts_done;
 
 	struct nvme_fc_fcp_op	aen_ops[NVME_FC_NR_AEN_COMMANDS];
 
@@ -1241,8 +1242,10 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
 
 	spin_lock_irqsave(&ctrl->lock, flags);
 	if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
-		if (ctrl->flags & FCCTRL_TERMIO)
-			ctrl->iocnt--;
+		if (ctrl->flags & FCCTRL_TERMIO) {
+			if (!--ctrl->iocnt)
+				complete(&ctrl->ioaborts_done);
+		}
 	}
 	if (op->flags & FCOP_FLAGS_RELEASED)
 		complete_rq = true;
@@ -2487,10 +2490,14 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
 
 	/* wait for all io that had to be aborted */
 	spin_lock_irqsave(&ctrl->lock, flags);
-	while (ctrl->iocnt) {
+	if (ctrl->iocnt) {
+		init_completion(&ctrl->ioaborts_done);
 		spin_unlock_irqrestore(&ctrl->lock, flags);
-		msleep(1000);
+
+		wait_for_completion(&ctrl->ioaborts_done);
+
 		spin_lock_irqsave(&ctrl->lock, flags);
+		WARN_ON(ctrl->iocnt);
 	}
 	ctrl->flags &= ~FCCTRL_TERMIO;
 	spin_unlock_irqrestore(&ctrl->lock, flags);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 04/10] nvme_fc: revise comment on teardown
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
                   ` (2 preceding siblings ...)
  2017-05-16  0:10 ` [PATCH v2 03/10] nvme_fc: replace ioabort msleep loop with completion James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16  7:33   ` Johannes Thumshirn
  2017-05-16 12:46   ` Christoph Hellwig
  2017-05-16  0:10 ` [PATCH v2 05/10] nvme_fc: set logging level on resets/deletes James Smart
                   ` (6 subsequent siblings)
  10 siblings, 2 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


Per the recommendation by Sagi on:
http://lists.infradead.org/pipermail/linux-nvme/2017-April/009261.html

An extra reference was pointed out.  There's no issue with the
references, but rather a literal interpretation of what the comment
is saying.

Reword the comment to avoid confusion.

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
Reviewed-by: Hannes Reinecke <hare at suse.com>
---
 drivers/nvme/host/fc.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d2cf4e2e560b..2708c0caee14 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2539,10 +2539,10 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
 
 	/*
 	 * tear down the controller
-	 * This will result in the last reference on the nvme ctrl to
-	 * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback.
-	 * From there, the transport will tear down it's logical queues and
-	 * association.
+	 * After the last reference on the nvme ctrl is removed,
+	 * the transport nvme_fc_nvme_ctrl_freed() callback will be
+	 * invoked. From there, the transport will tear down it's
+	 * logical queues and association.
 	 */
 	nvme_uninit_ctrl(&ctrl->ctrl);
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 05/10] nvme_fc: set logging level on resets/deletes
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
                   ` (3 preceding siblings ...)
  2017-05-16  0:10 ` [PATCH v2 04/10] nvme_fc: revise comment on teardown James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16  7:35   ` Johannes Thumshirn
  2017-05-16 12:47   ` Christoph Hellwig
  2017-05-16  0:10 ` [PATCH v2 06/10] nvmet_fc: Reduce work_q count James Smart
                   ` (5 subsequent siblings)
  10 siblings, 2 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


Per the review by Sagi on:
http://lists.infradead.org/pipermail/linux-nvme/2017-April/009261.html

Looked at existing warn vs info vs err dev_xxx levels for the messages
printed on reconnects and deletes:
- Resets due to error and resets transitioned to deletes are dev_warn
- Other reset/disconnect messages are dev_info
- Removed chatty io queue related messages

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Hannes Reinecke <hare at suse.com>
---
v2: remove admin-requested delete info message as deletes are common
 for discovery controllers
---
 drivers/nvme/host/fc.c | 14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 2708c0caee14..8a721aa11387 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1750,7 +1750,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
 	dev_warn(ctrl->ctrl.device,
 		"NVME-FC{%d}: transport association error detected: %s\n",
 		ctrl->cnum, errmsg);
-	dev_info(ctrl->ctrl.device,
+	dev_warn(ctrl->ctrl.device,
 		"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
 
 	/* stop the queues on error, cleanup is in reset thread */
@@ -2194,9 +2194,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
 	if (!opts->nr_io_queues)
 		return 0;
 
-	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
-			opts->nr_io_queues);
-
 	nvme_fc_init_io_queues(ctrl);
 
 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
@@ -2267,9 +2264,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
 	if (ctrl->queue_count == 1)
 		return 0;
 
-	dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
-			opts->nr_io_queues);
-
 	nvme_fc_init_io_queues(ctrl);
 
 	ret = blk_mq_reinit_tagset(&ctrl->tag_set);
@@ -2599,7 +2593,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 		return;
 	}
 
-	dev_warn(ctrl->ctrl.device,
+	dev_info(ctrl->ctrl.device,
 		"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
 		ctrl->cnum, status);
 
@@ -2610,7 +2604,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 		queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
 				ctrl->ctrl.opts->reconnect_delay * HZ);
 	} else {
-		dev_info(ctrl->ctrl.device,
+		dev_warn(ctrl->ctrl.device,
 				"NVME-FC{%d}: Max reconnect attempts (%d) "
 				"reached. Removing controller\n",
 				ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
@@ -2645,7 +2639,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
 {
 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
 
-	dev_warn(ctrl->ctrl.device,
+	dev_info(ctrl->ctrl.device,
 		"NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
 
 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 06/10] nvmet_fc: Reduce work_q count
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
                   ` (4 preceding siblings ...)
  2017-05-16  0:10 ` [PATCH v2 05/10] nvme_fc: set logging level on resets/deletes James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16  7:38   ` Johannes Thumshirn
  2017-05-16 12:48   ` Christoph Hellwig
  2017-05-16  0:10 ` [PATCH v2 07/10] nvmet_fc: Add queue create and delete callbacks in LLDD api James Smart
                   ` (4 subsequent siblings)
  10 siblings, 2 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


Instead of a work_q per controller queue, make 1 per cpu and have
controller queues post work elements to the work_q.

Signed-off-by: James Smart <james.smart at broadcom.com>
---
v2:
converted to use DEFINE_PER_CPU()
reworked do {} while into more readable for loop in
 nvmet_fc_do_work_on_cpu()
renamed create/delete_threads to create/delete_workqueues

v3:recut on nvme-4.12

v4:use per_cpu_ptr() instead of per_cpu()
---
 drivers/nvme/target/fc.c | 205 +++++++++++++++++++++++++++++++++++------------
 1 file changed, 154 insertions(+), 51 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 2006fae61980..93d4714e7070 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -20,6 +20,7 @@
 #include <linux/blk-mq.h>
 #include <linux/parser.h>
 #include <linux/random.h>
+#include <linux/threads.h>
 #include <uapi/scsi/fc/fc_fs.h>
 #include <uapi/scsi/fc/fc_els.h>
 
@@ -81,6 +82,7 @@ struct nvmet_fc_fcp_iod {
 	u32				offset;
 	enum nvmet_fcp_datadir		io_dir;
 	bool				active;
+	bool				started;
 	bool				abort;
 	bool				aborted;
 	bool				writedataactive;
@@ -88,12 +90,12 @@ struct nvmet_fc_fcp_iod {
 
 	struct nvmet_req		req;
 	struct work_struct		work;
-	struct work_struct		done_work;
 
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_tgt_queue	*queue;
 
-	struct list_head		fcp_list;	/* tgtport->fcp_list */
+	struct list_head		fcp_list;	/* queue->fod_list */
+	struct list_head		work_list;	/* workcpu->work_list */
 };
 
 struct nvmet_fc_tgtport {
@@ -132,7 +134,6 @@ struct nvmet_fc_tgt_queue {
 	struct nvmet_fc_tgt_assoc	*assoc;
 	struct nvmet_fc_fcp_iod		*fod;		/* array of fcp_iods */
 	struct list_head		fod_list;
-	struct workqueue_struct		*work_q;
 	struct kref			ref;
 } __aligned(sizeof(unsigned long long));
 
@@ -145,6 +146,20 @@ struct nvmet_fc_tgt_assoc {
 	struct kref			ref;
 };
 
+enum nvmet_fc_workcpu_flags {
+	NVMET_FC_CPU_RUNNING		= (1 << 0),
+	NVMET_FC_CPU_TERMINATING	= (1 << 1),
+};
+
+struct nvmet_fc_work_by_cpu {
+	struct workqueue_struct		*work_q;
+	struct list_head		fod_list;
+	spinlock_t			clock;
+	int				cpu;
+	bool				running;
+	struct work_struct		cpu_work;
+};
+
 
 static inline int
 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
@@ -213,10 +228,11 @@ static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
 static LIST_HEAD(nvmet_fc_target_list);
 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
 
+static u32 nvmet_fc_cpu_cnt;
+static DEFINE_PER_CPU(struct nvmet_fc_work_by_cpu, nvmet_fc_cpu_workcpu);
+#define nvmet_fc_workcpu(cpu)	per_cpu_ptr(&nvmet_fc_cpu_workcpu, cpu)
 
 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
-static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
-static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
@@ -417,11 +433,10 @@ nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
 	int i;
 
 	for (i = 0; i < queue->sqsize; fod++, i++) {
-		INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
-		INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
 		fod->tgtport = tgtport;
 		fod->queue = queue;
 		fod->active = false;
+		fod->started = false;
 		fod->abort = false;
 		fod->aborted = false;
 		fod->fcpreq = NULL;
@@ -498,6 +513,7 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
 	spin_lock_irqsave(&queue->qlock, flags);
 	list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
 	fod->active = false;
+	fod->started = false;
 	fod->abort = false;
 	fod->aborted = false;
 	fod->writedataactive = false;
@@ -556,12 +572,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
 	if (!nvmet_fc_tgt_a_get(assoc))
 		goto out_free_queue;
 
-	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
-				assoc->tgtport->fc_target_port.port_num,
-				assoc->a_id, qid);
-	if (!queue->work_q)
-		goto out_a_put;
-
 	queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
 	queue->qid = qid;
 	queue->sqsize = sqsize;
@@ -591,8 +601,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
 
 out_fail_iodlist:
 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
-	destroy_workqueue(queue->work_q);
-out_a_put:
 	nvmet_fc_tgt_a_put(assoc);
 out_free_queue:
 	kfree(queue);
@@ -615,8 +623,6 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
 
 	nvmet_fc_tgt_a_put(queue->assoc);
 
-	destroy_workqueue(queue->work_q);
-
 	kfree(queue);
 }
 
@@ -668,8 +674,6 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
 	}
 	spin_unlock_irqrestore(&queue->qlock, flags);
 
-	flush_workqueue(queue->work_q);
-
 	if (disconnect)
 		nvmet_sq_destroy(&queue->nvme_sq);
 
@@ -1962,24 +1966,28 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
 }
 
 static void
-nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
-{
-	struct nvmet_fc_fcp_iod *fod =
-		container_of(work, struct nvmet_fc_fcp_iod, done_work);
-
-	nvmet_fc_fod_op_done(fod);
-}
-
-static void
 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
 {
 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
 	struct nvmet_fc_tgt_queue *queue = fod->queue;
-
-	if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
-		/* context switch so completion is not in ISR context */
-		queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
-	else
+	struct nvmet_fc_work_by_cpu *workcpu = nvmet_fc_workcpu(queue->cpu);
+	unsigned long flags;
+	bool running;
+
+	if (fod->tgtport->ops->target_features &
+				NVMET_FCTGTFEAT_OPDONE_IN_ISR) {
+		/* context switch for processing */
+
+		spin_lock_irqsave(&workcpu->clock, flags);
+		list_add_tail(&fod->work_list, &workcpu->fod_list);
+		running = workcpu->running;
+		workcpu->running = true;
+		spin_unlock_irqrestore(&workcpu->clock, flags);
+
+		if (!running)
+			queue_work_on(workcpu->cpu, workcpu->work_q,
+					&workcpu->cpu_work);
+	} else
 		nvmet_fc_fod_op_done(fod);
 }
 
@@ -2069,6 +2077,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
 	 * layer until we have both based on csn.
 	 */
 
+	fod->started = true;
 	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
 
 	fod->total_length = be32_to_cpu(cmdiu->data_len);
@@ -2144,19 +2153,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
 	nvmet_fc_abort_op(tgtport, fod);
 }
 
-/*
- * Actual processing routine for received FC-NVME LS Requests from the LLD
- */
-static void
-nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
-{
-	struct nvmet_fc_fcp_iod *fod =
-		container_of(work, struct nvmet_fc_fcp_iod, work);
-	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
-
-	nvmet_fc_handle_fcp_rqst(tgtport, fod);
-}
-
 /**
  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
  *                       upon the reception of a NVME FCP CMD IU.
@@ -2186,6 +2182,9 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
 	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
 	struct nvmet_fc_tgt_queue *queue;
 	struct nvmet_fc_fcp_iod *fod;
+	struct nvmet_fc_work_by_cpu *workcpu;
+	unsigned long flags;
+	bool running;
 
 	/* validate iu, so the connection id can be used to find the queue */
 	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
@@ -2223,9 +2222,21 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
 			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
 	memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
 
-	if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
-		queue_work_on(queue->cpu, queue->work_q, &fod->work);
-	else
+	if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) {
+		/* context switch for processing */
+
+		workcpu = nvmet_fc_workcpu(queue->cpu);
+
+		spin_lock_irqsave(&workcpu->clock, flags);
+		list_add_tail(&fod->work_list, &workcpu->fod_list);
+		running = workcpu->running;
+		workcpu->running = true;
+		spin_unlock_irqrestore(&workcpu->clock, flags);
+
+		if (!running)
+			queue_work_on(workcpu->cpu, workcpu->work_q,
+					&workcpu->cpu_work);
+	} else
 		nvmet_fc_handle_fcp_rqst(tgtport, fod);
 
 	return 0;
@@ -2391,13 +2402,17 @@ nvmet_fc_remove_port(struct nvmet_port *port)
 {
 	struct nvmet_fc_tgtport *tgtport = port->priv;
 	unsigned long flags;
+	bool matched = false;
 
 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
 	if (tgtport->port == port) {
-		nvmet_fc_tgtport_put(tgtport);
+		matched = true;
 		tgtport->port = NULL;
 	}
 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+	if (matched)
+		nvmet_fc_tgtport_put(tgtport);
 }
 
 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
@@ -2410,9 +2425,95 @@ static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
 	.delete_ctrl		= nvmet_fc_delete_ctrl,
 };
 
+static void
+nvmet_fc_do_work_on_cpu(struct work_struct *work)
+{
+	struct nvmet_fc_work_by_cpu *workcpu =
+		container_of(work, struct nvmet_fc_work_by_cpu, cpu_work);
+	struct nvmet_fc_fcp_iod *fod;
+	unsigned long flags;
+
+	spin_lock_irqsave(&workcpu->clock, flags);
+
+	fod = list_first_entry_or_null(&workcpu->fod_list,
+				struct nvmet_fc_fcp_iod, work_list);
+	for ( ; fod; ) {
+		list_del(&fod->work_list);
+
+		spin_unlock_irqrestore(&workcpu->clock, flags);
+
+		if (fod->started)
+			nvmet_fc_fod_op_done(fod);
+		else
+			nvmet_fc_handle_fcp_rqst(fod->tgtport, fod);
+
+		spin_lock_irqsave(&workcpu->clock, flags);
+
+		fod = list_first_entry_or_null(&workcpu->fod_list,
+					struct nvmet_fc_fcp_iod, work_list);
+	}
+
+	workcpu->running = false;
+
+	spin_unlock_irqrestore(&workcpu->clock, flags);
+}
+
+static int
+nvmet_fc_create_workqueues(void)
+{
+	struct nvmet_fc_work_by_cpu *workcpu;
+	int i;
+
+	nvmet_fc_cpu_cnt = num_active_cpus();
+	for (i = 0; i < nvmet_fc_cpu_cnt; i++, workcpu++) {
+		workcpu = nvmet_fc_workcpu(i);
+
+		workcpu->work_q = alloc_workqueue("nvmet-fc-cpu%d", 0, 0, i);
+		if (!workcpu->work_q)
+			return -ENOEXEC;
+		workcpu->cpu = i;
+		workcpu->running = false;
+		spin_lock_init(&workcpu->clock);
+		INIT_LIST_HEAD(&workcpu->fod_list);
+		INIT_WORK(&workcpu->cpu_work, nvmet_fc_do_work_on_cpu);
+	}
+
+	return 0;
+}
+
+static void
+nvmet_fc_delete_workqueues(void)
+{
+	struct nvmet_fc_work_by_cpu *workcpu;
+	int i;
+
+	for (i = 0; i < nvmet_fc_cpu_cnt; i++, workcpu++) {
+		workcpu = nvmet_fc_workcpu(i);
+
+		/* sanity check - all work should be removed */
+		if (!list_empty(&workcpu->fod_list))
+			pr_warn("%s: cpu %d worklist not empty\n", __func__, i);
+
+		if (workcpu->work_q)
+			destroy_workqueue(workcpu->work_q);
+	}
+}
+
 static int __init nvmet_fc_init_module(void)
 {
-	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
+	int ret;
+
+	ret = nvmet_fc_create_workqueues();
+	if (ret)
+		goto fail;
+
+	ret = nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
+	if (!ret)
+		return 0;
+
+fail:
+	nvmet_fc_delete_workqueues();
+	return -ENXIO;
 }
 
 static void __exit nvmet_fc_exit_module(void)
@@ -2423,6 +2524,8 @@ static void __exit nvmet_fc_exit_module(void)
 
 	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
 
+	nvmet_fc_delete_workqueues();
+
 	ida_destroy(&nvmet_fc_tgtport_cnt);
 }
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 07/10] nvmet_fc: Add queue create and delete callbacks in LLDD api
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
                   ` (5 preceding siblings ...)
  2017-05-16  0:10 ` [PATCH v2 06/10] nvmet_fc: Reduce work_q count James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16 12:48   ` Christoph Hellwig
  2017-05-16  0:10 ` [PATCH v2 08/10] nvme_fc: remove extra controller reference taken on reconnect James Smart
                   ` (3 subsequent siblings)
  10 siblings, 1 reply; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


To facilitate LLDD resource management on nvme queue creation and
deletion, add optional callbacks queue_create and queue_delete.

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
Reviewed-by: Hannes Reinecke <hare at suse.com>
---
v2: require queue_delete if queue_create entrypoint present
v3: recut on nvme-4.12
---
 drivers/nvme/target/fc.c       | 19 ++++++++++++++++++-
 include/linux/nvme-fc-driver.h | 18 ++++++++++++++++++
 2 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 93d4714e7070..43701c590bcd 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -556,6 +556,7 @@ static struct nvmet_fc_tgt_queue *
 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
 			u16 qid, u16 sqsize)
 {
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
 	struct nvmet_fc_tgt_queue *queue;
 	unsigned long flags;
 	int ret;
@@ -569,8 +570,14 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
 	if (!queue)
 		return NULL;
 
+	if (tgtport->ops->queue_create) {
+		if (tgtport->ops->queue_create(&tgtport->fc_target_port,
+				nvmet_fc_makeconnid(assoc, qid), sqsize))
+			goto out_free_queue;
+	}
+
 	if (!nvmet_fc_tgt_a_get(assoc))
-		goto out_free_queue;
+		goto out_queue_delete;
 
 	queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
 	queue->qid = qid;
@@ -602,6 +609,10 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
 out_fail_iodlist:
 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
 	nvmet_fc_tgt_a_put(assoc);
+out_queue_delete:
+	if (tgtport->ops->queue_delete)
+		tgtport->ops->queue_delete(&tgtport->fc_target_port,
+				nvmet_fc_makeconnid(assoc, qid), sqsize);
 out_free_queue:
 	kfree(queue);
 	return NULL;
@@ -677,6 +688,11 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
 	if (disconnect)
 		nvmet_sq_destroy(&queue->nvme_sq);
 
+	if (tgtport->ops->queue_delete)
+		tgtport->ops->queue_delete(&tgtport->fc_target_port,
+				nvmet_fc_makeconnid(queue->assoc, queue->qid),
+				queue->sqsize);
+
 	nvmet_fc_tgt_q_put(queue);
 }
 
@@ -863,6 +879,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
 	if (!template->xmt_ls_rsp || !template->fcp_op ||
 	    !template->fcp_abort ||
 	    !template->fcp_req_release || !template->targetport_delete ||
+	    (template->queue_create && !template->queue_delete) ||
 	    !template->max_hw_queues || !template->max_sgl_segments ||
 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
 		ret = -EINVAL;
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 6c8c5d8041b7..492e9f402223 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -806,6 +806,20 @@ struct nvmet_fc_target_port {
  *       to the LLDD after all operations on the fcp operation are complete.
  *       This may be due to the command completing or upon completion of
  *       abort cleanup.
+ *       Entrypoint is Mandatory.
+ *
+ * @queue_create:  Called by the transport to indicate the creation of an
+ *       nvme queue and to allow the LLDD to allocate resources for the
+ *       queue.
+ *       Returns 0 on successful allocation of resources for the queue.
+ *       -<errno> on failure.  Failure will result in failure of the
+ *       FC-NVME Create Association or Create Connection LS's.
+ *       Entrypoint is Optional.
+ *
+ * @queue_delete:  Called by the transport to indicate the deletion of an
+ *       nvme queue and to allow the LLDD to de-allocate resources for the
+ *       queue.
+ *       Entrypoint is Optional.
  *
  * @max_hw_queues:  indicates the maximum number of hw queues the LLDD
  *       supports for cpu affinitization.
@@ -846,6 +860,10 @@ struct nvmet_fc_target_template {
 				struct nvmefc_tgt_fcp_req *fcpreq);
 	void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
 				struct nvmefc_tgt_fcp_req *fcpreq);
+	int (*queue_create)(struct nvmet_fc_target_port *tgtport,
+				u64 connection_id, u16 qsize);
+	void (*queue_delete)(struct nvmet_fc_target_port *tgtport,
+				u64 connection_id, u16 qsize);
 
 	u32	max_hw_queues;
 	u16	max_sgl_segments;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 08/10] nvme_fc: remove extra controller reference taken on reconnect
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
                   ` (6 preceding siblings ...)
  2017-05-16  0:10 ` [PATCH v2 07/10] nvmet_fc: Add queue create and delete callbacks in LLDD api James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16  7:40   ` Johannes Thumshirn
  2017-05-16 12:49   ` Christoph Hellwig
  2017-05-16  0:10 ` [PATCH v2 09/10] nvme_fcloop: fix port deletes and callbacks James Smart
                   ` (2 subsequent siblings)
  10 siblings, 2 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


fix extra controller reference taken on reconnect by moving
reference to initial controller create

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Hannes Reinecke <hare at suse.com>
---
v2: pre review: moved location of where ref taken
---
 drivers/nvme/host/fc.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 8a721aa11387..78973407430a 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2398,8 +2398,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 
 	ctrl->ctrl.opts->nr_reconnects = 0;
 
-	kref_get(&ctrl->ctrl.kref);
-
 	if (ctrl->queue_count > 1) {
 		nvme_start_queues(&ctrl->ctrl);
 		nvme_queue_scan(&ctrl->ctrl);
@@ -2800,7 +2798,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 		ctrl->ctrl.opts = NULL;
 		/* initiate nvme ctrl ref counting teardown */
 		nvme_uninit_ctrl(&ctrl->ctrl);
-		nvme_put_ctrl(&ctrl->ctrl);
 
 		/* as we're past the point where we transition to the ref
 		 * counting teardown path, if we return a bad pointer here,
@@ -2816,6 +2813,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 		return ERR_PTR(ret);
 	}
 
+	kref_get(&ctrl->ctrl.kref);
+
 	dev_info(ctrl->ctrl.device,
 		"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
 		ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 09/10] nvme_fcloop: fix port deletes and callbacks
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
                   ` (7 preceding siblings ...)
  2017-05-16  0:10 ` [PATCH v2 08/10] nvme_fc: remove extra controller reference taken on reconnect James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16  7:41   ` Johannes Thumshirn
  2017-05-16 12:50   ` Christoph Hellwig
  2017-05-16  0:10 ` [PATCH v2 10/10] nvme_fc: correct nvme status set on abort James Smart
  2017-05-22 18:55 ` [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes Christoph Hellwig
  10 siblings, 2 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


Now that there are potentially long delays between when a
remoteport or targetport delete calls is made and when the
callback occurs (dev_loss_tmo timeout), no longer block in the
delete routines and move the final nport puts to the callbacks.

Moved the fcloop_nport_get/put/free routines to avoid forward
declarations.

Ensure port_info structs used in registrations are nulled in
case fields are not set (ex: devloss_tmo values).

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Hannes Reinecke <hare at suse.com>
---
v2: cut on nvme-4.12
---
 drivers/nvme/target/fcloop.c | 66 +++++++++++++++++++++-----------------------
 1 file changed, 32 insertions(+), 34 deletions(-)

diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 294a6611fb24..6a9dfbd87574 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -636,6 +636,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
 }
 
 static void
+fcloop_nport_free(struct kref *ref)
+{
+	struct fcloop_nport *nport =
+		container_of(ref, struct fcloop_nport, ref);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+	list_del(&nport->nport_list);
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	kfree(nport);
+}
+
+static void
+fcloop_nport_put(struct fcloop_nport *nport)
+{
+	kref_put(&nport->ref, fcloop_nport_free);
+}
+
+static int
+fcloop_nport_get(struct fcloop_nport *nport)
+{
+	return kref_get_unless_zero(&nport->ref);
+}
+
+static void
 fcloop_localport_delete(struct nvme_fc_local_port *localport)
 {
 	struct fcloop_lport *lport = localport->private;
@@ -651,6 +677,8 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
 
 	/* release any threads waiting for the unreg to complete */
 	complete(&rport->nport->rport_unreg_done);
+
+	fcloop_nport_put(rport->nport);
 }
 
 static void
@@ -660,6 +688,8 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
 
 	/* release any threads waiting for the unreg to complete */
 	complete(&tport->nport->tport_unreg_done);
+
+	fcloop_nport_put(tport->nport);
 }
 
 #define	FCLOOP_HW_QUEUES		4
@@ -727,6 +757,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
 		goto out_free_opts;
 	}
 
+	memset(&pinfo, 0, sizeof(pinfo));
 	pinfo.node_name = opts->wwnn;
 	pinfo.port_name = opts->wwpn;
 	pinfo.port_role = opts->roles;
@@ -809,32 +840,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
 	return ret ? ret : count;
 }
 
-static void
-fcloop_nport_free(struct kref *ref)
-{
-	struct fcloop_nport *nport =
-		container_of(ref, struct fcloop_nport, ref);
-	unsigned long flags;
-
-	spin_lock_irqsave(&fcloop_lock, flags);
-	list_del(&nport->nport_list);
-	spin_unlock_irqrestore(&fcloop_lock, flags);
-
-	kfree(nport);
-}
-
-static void
-fcloop_nport_put(struct fcloop_nport *nport)
-{
-	kref_put(&nport->ref, fcloop_nport_free);
-}
-
-static int
-fcloop_nport_get(struct fcloop_nport *nport)
-{
-	return kref_get_unless_zero(&nport->ref);
-}
-
 static struct fcloop_nport *
 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
 {
@@ -943,6 +948,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
 	if (!nport)
 		return -EIO;
 
+	memset(&pinfo, 0, sizeof(pinfo));
 	pinfo.node_name = nport->node_name;
 	pinfo.port_name = nport->port_name;
 	pinfo.port_role = nport->port_role;
@@ -997,10 +1003,6 @@ __wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
 	if (ret)
 		return ret;
 
-	wait_for_completion(&nport->rport_unreg_done);
-
-	fcloop_nport_put(nport);
-
 	return ret;
 }
 
@@ -1104,10 +1106,6 @@ __wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
 	if (ret)
 		return ret;
 
-	wait_for_completion(&nport->tport_unreg_done);
-
-	fcloop_nport_put(nport);
-
 	return ret;
 }
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 10/10] nvme_fc: correct nvme status set on abort
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
                   ` (8 preceding siblings ...)
  2017-05-16  0:10 ` [PATCH v2 09/10] nvme_fcloop: fix port deletes and callbacks James Smart
@ 2017-05-16  0:10 ` James Smart
  2017-05-16  7:42   ` Johannes Thumshirn
  2017-05-16 12:51   ` Christoph Hellwig
  2017-05-22 18:55 ` [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes Christoph Hellwig
  10 siblings, 2 replies; 32+ messages in thread
From: James Smart @ 2017-05-16  0:10 UTC (permalink / raw)


correct nvme status set on abort. Patch that changed status to being actual
nvme status crossed in the night with the patch that added abort values.

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Hannes Reinecke <hare at suse.com>
---
 drivers/nvme/host/fc.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 78973407430a..8dd06e97736d 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1375,9 +1375,9 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 	complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
 	if (!complete_rq) {
 		if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
-			status = cpu_to_le16(NVME_SC_ABORT_REQ);
+			status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
 			if (blk_queue_dying(rq->q))
-				status |= cpu_to_le16(NVME_SC_DNR);
+				status |= cpu_to_le16(NVME_SC_DNR << 1);
 		}
 		nvme_end_request(rq, status, result);
 	} else
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 01/10] nvme_fc: get rid of local reconnect_delay
  2017-05-16  0:10 ` [PATCH v2 01/10] nvme_fc: get rid of local reconnect_delay James Smart
@ 2017-05-16  7:32   ` Johannes Thumshirn
  2017-05-16 12:45   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Johannes Thumshirn @ 2017-05-16  7:32 UTC (permalink / raw)


On 05/16/2017 02:10 AM, James Smart wrote:
> Remove the local copy of reconnect_delay.
> Use the value in the controller options directly.
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> ---

Thanks,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>


-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 02/10] nvme_fc: Support ctrl_loss_tmo
  2017-05-16  0:10 ` [PATCH v2 02/10] nvme_fc: Support ctrl_loss_tmo James Smart
@ 2017-05-16  7:33   ` Johannes Thumshirn
  2017-05-16 12:46   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Johannes Thumshirn @ 2017-05-16  7:33 UTC (permalink / raw)


On 05/16/2017 02:10 AM, James Smart wrote:
> Sync with Sagi's recent addition of ctrl_loss_tmo in the core fabrics
> layer.
> 
> Remove local connect limits and connect_attempts variable.
> Use fabrics new nr_connects variable and use of nvmf_should_reconnect()
> Refactor duplicate reconnect failure code.
> 
> Addresses review comment by Sagi on controller reset support:
> http://lists.infradead.org/pipermail/linux-nvme/2017-April/009261.html
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> ---

Thanks,Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>


-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 03/10] nvme_fc: replace ioabort msleep loop with completion
  2017-05-16  0:10 ` [PATCH v2 03/10] nvme_fc: replace ioabort msleep loop with completion James Smart
@ 2017-05-16  7:33   ` Johannes Thumshirn
  2017-05-16 12:46   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Johannes Thumshirn @ 2017-05-16  7:33 UTC (permalink / raw)


On 05/16/2017 02:10 AM, James Smart wrote:
> Per the recommendation by Sagi on:
> http://lists.infradead.org/pipermail/linux-nvme/2017-April/009261.html
> 
> Wait for io aborts to complete wait converted from msleep look to
> using a struct completion.
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> ---

Thanks,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>


-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 04/10] nvme_fc: revise comment on teardown
  2017-05-16  0:10 ` [PATCH v2 04/10] nvme_fc: revise comment on teardown James Smart
@ 2017-05-16  7:33   ` Johannes Thumshirn
  2017-05-16 12:46   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Johannes Thumshirn @ 2017-05-16  7:33 UTC (permalink / raw)


On 05/16/2017 02:10 AM, James Smart wrote:
> Per the recommendation by Sagi on:
> http://lists.infradead.org/pipermail/linux-nvme/2017-April/009261.html
> 
> An extra reference was pointed out.  There's no issue with the
> references, but rather a literal interpretation of what the comment
> is saying.
> 
> Reword the comment to avoid confusion.
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> ---

Thanks,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>


-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 05/10] nvme_fc: set logging level on resets/deletes
  2017-05-16  0:10 ` [PATCH v2 05/10] nvme_fc: set logging level on resets/deletes James Smart
@ 2017-05-16  7:35   ` Johannes Thumshirn
  2017-05-16 12:47   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Johannes Thumshirn @ 2017-05-16  7:35 UTC (permalink / raw)


On 05/16/2017 02:10 AM, James Smart wrote:
> Per the review by Sagi on:
> http://lists.infradead.org/pipermail/linux-nvme/2017-April/009261.html
> 
> Looked at existing warn vs info vs err dev_xxx levels for the messages
> printed on reconnects and deletes:
> - Resets due to error and resets transitioned to deletes are dev_warn
> - Other reset/disconnect messages are dev_info
> - Removed chatty io queue related messages
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> ---

Thanks,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>

-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 06/10] nvmet_fc: Reduce work_q count
  2017-05-16  0:10 ` [PATCH v2 06/10] nvmet_fc: Reduce work_q count James Smart
@ 2017-05-16  7:38   ` Johannes Thumshirn
  2017-05-16 12:48   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Johannes Thumshirn @ 2017-05-16  7:38 UTC (permalink / raw)


On 05/16/2017 02:10 AM, James Smart wrote:
> Instead of a work_q per controller queue, make 1 per cpu and have
> controller queues post work elements to the work_q.
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> ---

Thanks,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>

-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 08/10] nvme_fc: remove extra controller reference taken on reconnect
  2017-05-16  0:10 ` [PATCH v2 08/10] nvme_fc: remove extra controller reference taken on reconnect James Smart
@ 2017-05-16  7:40   ` Johannes Thumshirn
  2017-05-16 12:49   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Johannes Thumshirn @ 2017-05-16  7:40 UTC (permalink / raw)


On 05/16/2017 02:10 AM, James Smart wrote:
> fix extra controller reference taken on reconnect by moving
> reference to initial controller create
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> ---

Thanks,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>

-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 09/10] nvme_fcloop: fix port deletes and callbacks
  2017-05-16  0:10 ` [PATCH v2 09/10] nvme_fcloop: fix port deletes and callbacks James Smart
@ 2017-05-16  7:41   ` Johannes Thumshirn
  2017-05-16 12:50   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Johannes Thumshirn @ 2017-05-16  7:41 UTC (permalink / raw)


On 05/16/2017 02:10 AM, James Smart wrote:
> Now that there are potentially long delays between when a
> remoteport or targetport delete calls is made and when the
> callback occurs (dev_loss_tmo timeout), no longer block in the
> delete routines and move the final nport puts to the callbacks.
> 
> Moved the fcloop_nport_get/put/free routines to avoid forward
> declarations.
> 
> Ensure port_info structs used in registrations are nulled in
> case fields are not set (ex: devloss_tmo values).
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> ---

Thanks,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>

-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 10/10] nvme_fc: correct nvme status set on abort
  2017-05-16  0:10 ` [PATCH v2 10/10] nvme_fc: correct nvme status set on abort James Smart
@ 2017-05-16  7:42   ` Johannes Thumshirn
  2017-05-16 12:51   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Johannes Thumshirn @ 2017-05-16  7:42 UTC (permalink / raw)


On 05/16/2017 02:10 AM, James Smart wrote:
> correct nvme status set on abort. Patch that changed status to being actual
> nvme status crossed in the night with the patch that added abort values.
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> ---

Thanks,
Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>

-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 01/10] nvme_fc: get rid of local reconnect_delay
  2017-05-16  0:10 ` [PATCH v2 01/10] nvme_fc: get rid of local reconnect_delay James Smart
  2017-05-16  7:32   ` Johannes Thumshirn
@ 2017-05-16 12:45   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:45 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 02/10] nvme_fc: Support ctrl_loss_tmo
  2017-05-16  0:10 ` [PATCH v2 02/10] nvme_fc: Support ctrl_loss_tmo James Smart
  2017-05-16  7:33   ` Johannes Thumshirn
@ 2017-05-16 12:46   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:46 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 03/10] nvme_fc: replace ioabort msleep loop with completion
  2017-05-16  0:10 ` [PATCH v2 03/10] nvme_fc: replace ioabort msleep loop with completion James Smart
  2017-05-16  7:33   ` Johannes Thumshirn
@ 2017-05-16 12:46   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:46 UTC (permalink / raw)


As said before - please always use a waitqueue if you wait for a
specific condition.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 04/10] nvme_fc: revise comment on teardown
  2017-05-16  0:10 ` [PATCH v2 04/10] nvme_fc: revise comment on teardown James Smart
  2017-05-16  7:33   ` Johannes Thumshirn
@ 2017-05-16 12:46   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:46 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 05/10] nvme_fc: set logging level on resets/deletes
  2017-05-16  0:10 ` [PATCH v2 05/10] nvme_fc: set logging level on resets/deletes James Smart
  2017-05-16  7:35   ` Johannes Thumshirn
@ 2017-05-16 12:47   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:47 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 06/10] nvmet_fc: Reduce work_q count
  2017-05-16  0:10 ` [PATCH v2 06/10] nvmet_fc: Reduce work_q count James Smart
  2017-05-16  7:38   ` Johannes Thumshirn
@ 2017-05-16 12:48   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:48 UTC (permalink / raw)


On Mon, May 15, 2017@05:10:20PM -0700, James Smart wrote:
> Instead of a work_q per controller queue, make 1 per cpu and have
> controller queues post work elements to the work_q.

Having per-cpu workqueues doesn't make any sense at all as workqueues
already are per-cpu underneath.  Please use a global workqueue
instead.  That also avoids the sort of messy interactions with
CPU hotplug that would have to be added to this patch otherwise.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 07/10] nvmet_fc: Add queue create and delete callbacks in LLDD api
  2017-05-16  0:10 ` [PATCH v2 07/10] nvmet_fc: Add queue create and delete callbacks in LLDD api James Smart
@ 2017-05-16 12:48   ` Christoph Hellwig
  2017-05-22 21:53     ` James Smart
  0 siblings, 1 reply; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:48 UTC (permalink / raw)


On Mon, May 15, 2017@05:10:21PM -0700, James Smart wrote:
> To facilitate LLDD resource management on nvme queue creation and
> deletion, add optional callbacks queue_create and queue_delete.

Please send these together with driver changes to implement them,
otherwise they are impossible to review.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 08/10] nvme_fc: remove extra controller reference taken on reconnect
  2017-05-16  0:10 ` [PATCH v2 08/10] nvme_fc: remove extra controller reference taken on reconnect James Smart
  2017-05-16  7:40   ` Johannes Thumshirn
@ 2017-05-16 12:49   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:49 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 09/10] nvme_fcloop: fix port deletes and callbacks
  2017-05-16  0:10 ` [PATCH v2 09/10] nvme_fcloop: fix port deletes and callbacks James Smart
  2017-05-16  7:41   ` Johannes Thumshirn
@ 2017-05-16 12:50   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:50 UTC (permalink / raw)


On Mon, May 15, 2017@05:10:23PM -0700, James Smart wrote:
> Now that there are potentially long delays between when a
> remoteport or targetport delete calls is made and when the
> callback occurs (dev_loss_tmo timeout), no longer block in the
> delete routines and move the final nport puts to the callbacks.
> 
> Moved the fcloop_nport_get/put/free routines to avoid forward
> declarations.
> 
> Ensure port_info structs used in registrations are nulled in
> case fields are not set (ex: devloss_tmo values).
> 
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> ---
> v2: cut on nvme-4.12
> ---
>  drivers/nvme/target/fcloop.c | 66 +++++++++++++++++++++-----------------------
>  1 file changed, 32 insertions(+), 34 deletions(-)
> 
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index 294a6611fb24..6a9dfbd87574 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -636,6 +636,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
>  }
>  
>  static void
> +fcloop_nport_free(struct kref *ref)
> +{
> +	struct fcloop_nport *nport =
> +		container_of(ref, struct fcloop_nport, ref);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&fcloop_lock, flags);
> +	list_del(&nport->nport_list);
> +	spin_unlock_irqrestore(&fcloop_lock, flags);
> +
> +	kfree(nport);
> +}
> +
> +static void
> +fcloop_nport_put(struct fcloop_nport *nport)
> +{
> +	kref_put(&nport->ref, fcloop_nport_free);
> +}
> +
> +static int
> +fcloop_nport_get(struct fcloop_nport *nport)
> +{
> +	return kref_get_unless_zero(&nport->ref);
> +}
> +
> +static void
>  fcloop_localport_delete(struct nvme_fc_local_port *localport)
>  {
>  	struct fcloop_lport *lport = localport->private;
> @@ -651,6 +677,8 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
>  
>  	/* release any threads waiting for the unreg to complete */
>  	complete(&rport->nport->rport_unreg_done);
> +
> +	fcloop_nport_put(rport->nport);
>  }
>  
>  static void
> @@ -660,6 +688,8 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
>  
>  	/* release any threads waiting for the unreg to complete */
>  	complete(&tport->nport->tport_unreg_done);
> +
> +	fcloop_nport_put(tport->nport);
>  }
>  
>  #define	FCLOOP_HW_QUEUES		4
> @@ -727,6 +757,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
>  		goto out_free_opts;
>  	}
>  
> +	memset(&pinfo, 0, sizeof(pinfo));
>  	pinfo.node_name = opts->wwnn;
>  	pinfo.port_name = opts->wwpn;
>  	pinfo.port_role = opts->roles;
> @@ -809,32 +840,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
>  	return ret ? ret : count;
>  }
>  
> -static void
> -fcloop_nport_free(struct kref *ref)
> -{
> -	struct fcloop_nport *nport =
> -		container_of(ref, struct fcloop_nport, ref);
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&fcloop_lock, flags);
> -	list_del(&nport->nport_list);
> -	spin_unlock_irqrestore(&fcloop_lock, flags);
> -
> -	kfree(nport);
> -}
> -
> -static void
> -fcloop_nport_put(struct fcloop_nport *nport)
> -{
> -	kref_put(&nport->ref, fcloop_nport_free);
> -}
> -
> -static int
> -fcloop_nport_get(struct fcloop_nport *nport)
> -{
> -	return kref_get_unless_zero(&nport->ref);
> -}
> -
>  static struct fcloop_nport *
>  fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
>  {
> @@ -943,6 +948,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
>  	if (!nport)
>  		return -EIO;
>  
> +	memset(&pinfo, 0, sizeof(pinfo));
>  	pinfo.node_name = nport->node_name;
>  	pinfo.port_name = nport->port_name;
>  	pinfo.port_role = nport->port_role;
> @@ -997,10 +1003,6 @@ __wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
>  	if (ret)
>  		return ret;
>  
> -	wait_for_completion(&nport->rport_unreg_done);

As far as I can tell no one will be waiting for this completion
and it could be removed now.  Or did I miss something in another
patch?

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 10/10] nvme_fc: correct nvme status set on abort
  2017-05-16  0:10 ` [PATCH v2 10/10] nvme_fc: correct nvme status set on abort James Smart
  2017-05-16  7:42   ` Johannes Thumshirn
@ 2017-05-16 12:51   ` Christoph Hellwig
  1 sibling, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-16 12:51 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes
  2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
                   ` (9 preceding siblings ...)
  2017-05-16  0:10 ` [PATCH v2 10/10] nvme_fc: correct nvme status set on abort James Smart
@ 2017-05-22 18:55 ` Christoph Hellwig
  10 siblings, 0 replies; 32+ messages in thread
From: Christoph Hellwig @ 2017-05-22 18:55 UTC (permalink / raw)


Applied 1,2,4,5,8,10 to nvme-4.12.  Please resend the rest per the
review comments.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 07/10] nvmet_fc: Add queue create and delete callbacks in LLDD api
  2017-05-16 12:48   ` Christoph Hellwig
@ 2017-05-22 21:53     ` James Smart
  0 siblings, 0 replies; 32+ messages in thread
From: James Smart @ 2017-05-22 21:53 UTC (permalink / raw)


On 5/16/2017 5:48 AM, Christoph Hellwig wrote:
> On Mon, May 15, 2017@05:10:21PM -0700, James Smart wrote:
>> To facilitate LLDD resource management on nvme queue creation and
>> deletion, add optional callbacks queue_create and queue_delete.
>
> Please send these together with driver changes to implement them,
> otherwise they are impossible to review.
>

Christoph,

The api was put in to support lpfc sizing async receive queues.  Idea 
was not to have the lldd snoop the LS payloads, thus the transport will 
call the lldd when the LS created or deleted a queue.

It turned out better for lpfc to dynamically grow its pools based on 
demand rather than queue sizing, so the api was moved away from. I 
continued to post it, thinking it may be useful for another lldd 
implementation, one that may actually create hw queues to map 1:1 to the 
queues.

Given there are no current consumers, I've removed it from the next posting.

-- james

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2017-05-22 21:53 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-16  0:10 [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes James Smart
2017-05-16  0:10 ` [PATCH v2 01/10] nvme_fc: get rid of local reconnect_delay James Smart
2017-05-16  7:32   ` Johannes Thumshirn
2017-05-16 12:45   ` Christoph Hellwig
2017-05-16  0:10 ` [PATCH v2 02/10] nvme_fc: Support ctrl_loss_tmo James Smart
2017-05-16  7:33   ` Johannes Thumshirn
2017-05-16 12:46   ` Christoph Hellwig
2017-05-16  0:10 ` [PATCH v2 03/10] nvme_fc: replace ioabort msleep loop with completion James Smart
2017-05-16  7:33   ` Johannes Thumshirn
2017-05-16 12:46   ` Christoph Hellwig
2017-05-16  0:10 ` [PATCH v2 04/10] nvme_fc: revise comment on teardown James Smart
2017-05-16  7:33   ` Johannes Thumshirn
2017-05-16 12:46   ` Christoph Hellwig
2017-05-16  0:10 ` [PATCH v2 05/10] nvme_fc: set logging level on resets/deletes James Smart
2017-05-16  7:35   ` Johannes Thumshirn
2017-05-16 12:47   ` Christoph Hellwig
2017-05-16  0:10 ` [PATCH v2 06/10] nvmet_fc: Reduce work_q count James Smart
2017-05-16  7:38   ` Johannes Thumshirn
2017-05-16 12:48   ` Christoph Hellwig
2017-05-16  0:10 ` [PATCH v2 07/10] nvmet_fc: Add queue create and delete callbacks in LLDD api James Smart
2017-05-16 12:48   ` Christoph Hellwig
2017-05-22 21:53     ` James Smart
2017-05-16  0:10 ` [PATCH v2 08/10] nvme_fc: remove extra controller reference taken on reconnect James Smart
2017-05-16  7:40   ` Johannes Thumshirn
2017-05-16 12:49   ` Christoph Hellwig
2017-05-16  0:10 ` [PATCH v2 09/10] nvme_fcloop: fix port deletes and callbacks James Smart
2017-05-16  7:41   ` Johannes Thumshirn
2017-05-16 12:50   ` Christoph Hellwig
2017-05-16  0:10 ` [PATCH v2 10/10] nvme_fc: correct nvme status set on abort James Smart
2017-05-16  7:42   ` Johannes Thumshirn
2017-05-16 12:51   ` Christoph Hellwig
2017-05-22 18:55 ` [PATCH v2 00/10] outstanding nvme_fc/nvmet_fc fixes Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.