All of lore.kernel.org
 help / color / mirror / Atom feed
From: Max Gurtovoy <mgurtovoy@nvidia.com>
To: <linux-nvme@lists.infradead.org>, <hch@lst.de>,
	<kbusch@kernel.org>, <sagi@grimberg.me>
Cc: <chaitanyak@nvidia.com>, <israelr@nvidia.com>, <oren@nvidia.com>,
	<jsmart2021@gmail.com>, Max Gurtovoy <mgurtovoy@nvidia.com>
Subject: [PATCH 1/7] nvme: add connect_work attribute to nvme ctrl
Date: Mon, 18 Oct 2021 16:40:14 +0300	[thread overview]
Message-ID: <20211018134020.33838-2-mgurtovoy@nvidia.com> (raw)
In-Reply-To: <20211018134020.33838-1-mgurtovoy@nvidia.com>

This structure is duplicated for all the fabric controllers. Move it to
common code.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Israel Rukshin <israelr@nvidia.com>
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
---
 drivers/nvme/host/fc.c   | 23 ++++++++++++-----------
 drivers/nvme/host/nvme.h |  1 +
 drivers/nvme/host/rdma.c | 10 ++++------
 drivers/nvme/host/tcp.c  |  9 ++++-----
 4 files changed, 21 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index aa14ad963d91..4c7dffa8126e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -167,7 +167,6 @@ struct nvme_fc_ctrl {
 	struct blk_mq_tag_set	tag_set;
 
 	struct work_struct	ioerr_work;
-	struct delayed_work	connect_work;
 
 	struct kref		ref;
 	unsigned long		flags;
@@ -567,7 +566,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
 			"NVME-FC{%d}: connectivity re-established. "
 			"Attempting reconnect\n", ctrl->cnum);
 
-		queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
+		queue_delayed_work(nvme_wq, &ctrl->ctrl.connect_work, 0);
 		break;
 
 	case NVME_CTRL_RESETTING:
@@ -3263,7 +3262,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
 
 	cancel_work_sync(&ctrl->ioerr_work);
-	cancel_delayed_work_sync(&ctrl->connect_work);
+	cancel_delayed_work_sync(&ctrl->ctrl.connect_work);
 	/*
 	 * kill the association on the link side.  this will block
 	 * waiting for io to terminate
@@ -3300,7 +3299,8 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 		else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
 			recon_delay = rport->dev_loss_end - jiffies;
 
-		queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
+		queue_delayed_work(nvme_wq, &ctrl->ctrl.connect_work,
+				   recon_delay);
 	} else {
 		if (portptr->port_state == FC_OBJSTATE_ONLINE) {
 			if (status > 0 && (status & NVME_SC_DNR))
@@ -3340,12 +3340,13 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
 			"to CONNECTING\n", ctrl->cnum);
 
 	if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
-		if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
+		if (!queue_delayed_work(nvme_wq, &ctrl->ctrl.connect_work,
+					0)) {
 			dev_err(ctrl->ctrl.device,
 				"NVME-FC{%d}: failed to schedule connect "
 				"after reset\n", ctrl->cnum);
 		} else {
-			flush_delayed_work(&ctrl->connect_work);
+			flush_delayed_work(&ctrl->ctrl.connect_work);
 		}
 	} else {
 		nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
@@ -3373,7 +3374,7 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
 
 	struct nvme_fc_ctrl *ctrl =
 			container_of(to_delayed_work(work),
-				struct nvme_fc_ctrl, connect_work);
+				struct nvme_fc_ctrl, ctrl.connect_work);
 
 	ret = nvme_fc_create_association(ctrl);
 	if (ret)
@@ -3485,7 +3486,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 	kref_init(&ctrl->ref);
 
 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
-	INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+	INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvme_fc_connect_ctrl_work);
 	INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
 	spin_lock_init(&ctrl->lock);
 
@@ -3561,14 +3562,14 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 		goto fail_ctrl;
 	}
 
-	if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
+	if (!queue_delayed_work(nvme_wq, &ctrl->ctrl.connect_work, 0)) {
 		dev_err(ctrl->ctrl.device,
 			"NVME-FC{%d}: failed to schedule initial connect\n",
 			ctrl->cnum);
 		goto fail_ctrl;
 	}
 
-	flush_delayed_work(&ctrl->connect_work);
+	flush_delayed_work(&ctrl->ctrl.connect_work);
 
 	dev_info(ctrl->ctrl.device,
 		"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
@@ -3580,7 +3581,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 	nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
 	cancel_work_sync(&ctrl->ioerr_work);
 	cancel_work_sync(&ctrl->ctrl.reset_work);
-	cancel_delayed_work_sync(&ctrl->connect_work);
+	cancel_delayed_work_sync(&ctrl->ctrl.connect_work);
 
 	ctrl->ctrl.opts = NULL;
 
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ed79a6c7e804..81ca5dd9b7f9 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -343,6 +343,7 @@ struct nvme_ctrl {
 	unsigned long flags;
 #define NVME_CTRL_FAILFAST_EXPIRED	0
 	struct nvmf_ctrl_options *opts;
+	struct delayed_work connect_work;
 
 	struct page *discard_page;
 	unsigned long discard_page_busy;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0498801542eb..fbfa18a47bd8 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -110,8 +110,6 @@ struct nvme_rdma_ctrl {
 
 	struct nvme_rdma_qe	async_event_sqe;
 
-	struct delayed_work	reconnect_work;
-
 	struct list_head	list;
 
 	struct blk_mq_tag_set	admin_tag_set;
@@ -1078,7 +1076,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
 	if (nvmf_should_reconnect(&ctrl->ctrl)) {
 		dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
 			ctrl->ctrl.opts->reconnect_delay);
-		queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
+		queue_delayed_work(nvme_wq, &ctrl->ctrl.connect_work,
 				ctrl->ctrl.opts->reconnect_delay * HZ);
 	} else {
 		nvme_delete_ctrl(&ctrl->ctrl);
@@ -1166,7 +1164,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
 static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 {
 	struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
-			struct nvme_rdma_ctrl, reconnect_work);
+			struct nvme_rdma_ctrl, ctrl.connect_work);
 
 	++ctrl->ctrl.nr_reconnects;
 
@@ -2230,7 +2228,7 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
 {
 	cancel_work_sync(&ctrl->err_work);
-	cancel_delayed_work_sync(&ctrl->reconnect_work);
+	cancel_delayed_work_sync(&ctrl->ctrl.connect_work);
 
 	nvme_rdma_teardown_io_queues(ctrl, shutdown);
 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
@@ -2358,7 +2356,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 		goto out_free_ctrl;
 	}
 
-	INIT_DELAYED_WORK(&ctrl->reconnect_work,
+	INIT_DELAYED_WORK(&ctrl->ctrl.connect_work,
 			nvme_rdma_reconnect_ctrl_work);
 	INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 3c1c29dd3020..3ace20e39c86 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -127,7 +127,6 @@ struct nvme_tcp_ctrl {
 	struct nvme_ctrl	ctrl;
 
 	struct work_struct	err_work;
-	struct delayed_work	connect_work;
 	struct nvme_tcp_request async_req;
 	u32			io_queues[HCTX_MAX_TYPES];
 };
@@ -1983,7 +1982,7 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
 	if (nvmf_should_reconnect(ctrl)) {
 		dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
 			ctrl->opts->reconnect_delay);
-		queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
+		queue_delayed_work(nvme_wq, &ctrl->connect_work,
 				ctrl->opts->reconnect_delay * HZ);
 	} else {
 		dev_info(ctrl->device, "Removing controller...\n");
@@ -2066,7 +2065,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
 {
 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
-			struct nvme_tcp_ctrl, connect_work);
+			struct nvme_tcp_ctrl, ctrl.connect_work);
 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
 
 	++ctrl->nr_reconnects;
@@ -2113,7 +2112,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
 {
 	cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
-	cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
+	cancel_delayed_work_sync(&ctrl->connect_work);
 
 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
 	blk_mq_quiesce_queue(ctrl->admin_q);
@@ -2513,7 +2512,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
 	ctrl->ctrl.sqsize = opts->queue_size - 1;
 	ctrl->ctrl.kato = opts->kato;
 
-	INIT_DELAYED_WORK(&ctrl->connect_work,
+	INIT_DELAYED_WORK(&ctrl->ctrl.connect_work,
 			nvme_tcp_reconnect_ctrl_work);
 	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
-- 
2.18.1



  reply	other threads:[~2021-10-18 13:40 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-18 13:40 [PATCH v1 0/7] Centrelize common fabrics code to core drivers Max Gurtovoy
2021-10-18 13:40 ` Max Gurtovoy [this message]
2021-10-19 12:32   ` [PATCH 1/7] nvme: add connect_work attribute to nvme ctrl Sagi Grimberg
2021-10-19 13:20   ` Hannes Reinecke
2021-10-20 13:34   ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 2/7] nvme-fabrics: introduce nvmf_reconnect_or_remove API Max Gurtovoy
2021-10-19  6:26   ` Chaitanya Kulkarni
2021-10-19 12:36   ` Sagi Grimberg
2021-10-19 12:58     ` Max Gurtovoy
2021-10-19 13:21   ` Hannes Reinecke
2021-10-20 13:34   ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 3/7] nvme: add err_work attribute to nvme ctrl Max Gurtovoy
2021-10-19 12:36   ` Sagi Grimberg
2021-10-20 13:34   ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 4/7] nvme-fabrics: introduce nvmf_error_recovery API Max Gurtovoy
2021-10-19 13:27   ` Hannes Reinecke
2021-10-20 13:34   ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 5/7] nvme/nvme-fabrics: introduce nvmf_error_recovery_work API Max Gurtovoy
2021-10-19  6:29   ` Chaitanya Kulkarni
2021-10-19 12:43   ` Sagi Grimberg
2021-10-19 13:17     ` Max Gurtovoy
2021-10-19 13:34   ` Hannes Reinecke
2021-10-18 13:40 ` [PATCH 6/7] nvme/nvme-fabrics: introduce nvmf_reconnect_ctrl_work API Max Gurtovoy
2021-10-19  6:29   ` Chaitanya Kulkarni
2021-10-19 12:44   ` Sagi Grimberg
2021-10-19 13:18     ` Max Gurtovoy
2021-10-19 13:41   ` Hannes Reinecke
2021-10-18 13:40 ` [PATCH 7/7] nvme-fabrics: add nvmf_init_ctrl/nvmf_teardown_ctrl API Max Gurtovoy
2021-10-19 12:46   ` Sagi Grimberg
2021-10-19 13:20     ` Max Gurtovoy
2021-10-18 14:08 ` [PATCH v1 0/7] Centrelize common fabrics code to core drivers James Smart
2021-10-19  5:36   ` Christoph Hellwig
2021-10-19  6:24 ` Chaitanya Kulkarni
2021-10-19 12:32 ` Sagi Grimberg

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211018134020.33838-2-mgurtovoy@nvidia.com \
    --to=mgurtovoy@nvidia.com \
    --cc=chaitanyak@nvidia.com \
    --cc=hch@lst.de \
    --cc=israelr@nvidia.com \
    --cc=jsmart2021@gmail.com \
    --cc=kbusch@kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=oren@nvidia.com \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.