All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] nvmet: use a private workqueue instead of the system workqueue
@ 2022-03-21 11:57 Sagi Grimberg
  2022-03-21 20:41 ` Chaitanya Kulkarni
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Sagi Grimberg @ 2022-03-21 11:57 UTC (permalink / raw)
  To: linux-nvme, Christoph Hellwig, Keith Busch, Chaitanya Kulkarni
  Cc: James Smart, Max Gurtovoy

Any attempt to flush kernel-global WQs has possibility of deadlock
so we should simply stop using them, instead introduce nvmet_wq
which is the generic nvmet workqueue for work elements that
don't explicitly require a dedicated workqueue (by the mere fact
that they are using the system_wq).

Changes were done using the following replaces:
s/schedule_work(/queue_work(nvmet_wq, /g
s/schedule_delayed_work(/queue_delayed_work(nvmet_wq, /g
s/flush_scheduled_work()/flush_workqueue(nvmet_wq)/g

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
 drivers/nvme/target/admin-cmd.c     |  2 +-
 drivers/nvme/target/configfs.c      |  2 +-
 drivers/nvme/target/core.c          | 22 ++++++++++++++++------
 drivers/nvme/target/fc.c            |  8 ++++----
 drivers/nvme/target/fcloop.c        | 16 ++++++++--------
 drivers/nvme/target/io-cmd-file.c   |  6 +++---
 drivers/nvme/target/loop.c          |  4 ++--
 drivers/nvme/target/modules.builtin |  0
 drivers/nvme/target/nvmet.h         |  1 +
 drivers/nvme/target/passthru.c      |  2 +-
 drivers/nvme/target/rdma.c          | 12 ++++++------
 drivers/nvme/target/tcp.c           | 10 +++++-----
 12 files changed, 48 insertions(+), 37 deletions(-)
 create mode 100644 drivers/nvme/target/modules.builtin

diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 6fb24746de06..c3a9df5545cf 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -984,7 +984,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
 	mutex_unlock(&ctrl->lock);
 
-	schedule_work(&ctrl->async_event_work);
+	queue_work(nvmet_wq, &ctrl->async_event_work);
 }
 
 void nvmet_execute_keep_alive(struct nvmet_req *req)
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 091a0ca16361..173583ce40d2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1593,7 +1593,7 @@ static void nvmet_port_release(struct config_item *item)
 	struct nvmet_port *port = to_nvmet_port(item);
 
 	/* Let inflight controllers teardown complete */
-	flush_scheduled_work();
+	flush_workqueue(nvmet_wq);
 	list_del(&port->global_entry);
 
 	kfree(port->ana_state);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 724a6d373340..18444eaf35ab 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -17,6 +17,7 @@
 
 struct workqueue_struct *buffered_io_wq;
 struct workqueue_struct *zbd_wq;
+struct workqueue_struct *nvmet_wq;
 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
 static DEFINE_IDA(cntlid_ida);
 
@@ -205,7 +206,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
 	list_add_tail(&aen->entry, &ctrl->async_events);
 	mutex_unlock(&ctrl->lock);
 
-	schedule_work(&ctrl->async_event_work);
+	queue_work(nvmet_wq, &ctrl->async_event_work);
 }
 
 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -385,7 +386,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
 	if (reset_tbkas) {
 		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
 			ctrl->cntlid);
-		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+		queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
 		return;
 	}
 
@@ -403,7 +404,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
 	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
 		ctrl->cntlid, ctrl->kato);
 
-	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+	queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -1479,7 +1480,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
 	mutex_lock(&ctrl->lock);
 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
 		ctrl->csts |= NVME_CSTS_CFS;
-		schedule_work(&ctrl->fatal_err_work);
+		queue_work(nvmet_wq, &ctrl->fatal_err_work);
 	}
 	mutex_unlock(&ctrl->lock);
 }
@@ -1621,9 +1622,15 @@ static int __init nvmet_init(void)
 		goto out_free_zbd_work_queue;
 	}
 
+	nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+	if (!nvmet_wq) {
+		error = -ENOMEM;
+		goto out_free_buffered_work_queue;
+	}
+
 	error = nvmet_init_discovery();
 	if (error)
-		goto out_free_work_queue;
+		goto out_free_nvmet_work_queue;
 
 	error = nvmet_init_configfs();
 	if (error)
@@ -1632,7 +1639,9 @@ static int __init nvmet_init(void)
 
 out_exit_discovery:
 	nvmet_exit_discovery();
-out_free_work_queue:
+out_free_nvmet_work_queue:
+	destroy_workqueue(nvmet_wq);
+out_free_buffered_work_queue:
 	destroy_workqueue(buffered_io_wq);
 out_free_zbd_work_queue:
 	destroy_workqueue(zbd_wq);
@@ -1644,6 +1653,7 @@ static void __exit nvmet_exit(void)
 	nvmet_exit_configfs();
 	nvmet_exit_discovery();
 	ida_destroy(&cntlid_ida);
+	destroy_workqueue(nvmet_wq);
 	destroy_workqueue(buffered_io_wq);
 	destroy_workqueue(zbd_wq);
 
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index de90001fc5c4..ab2627e17bb9 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
-		if (!schedule_work(&assoc->del_work))
+		if (!queue_work(nvmet_wq, &assoc->del_work))
 			/* already deleting - release local reference */
 			nvmet_fc_tgt_a_put(assoc);
 	}
@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
 			continue;
 		assoc->hostport->invalid = 1;
 		noassoc = false;
-		if (!schedule_work(&assoc->del_work))
+		if (!queue_work(nvmet_wq, &assoc->del_work))
 			/* already deleting - release local reference */
 			nvmet_fc_tgt_a_put(assoc);
 	}
@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
 		nvmet_fc_tgtport_put(tgtport);
 
 		if (found_ctrl) {
-			if (!schedule_work(&assoc->del_work))
+			if (!queue_work(nvmet_wq, &assoc->del_work))
 				/* already deleting - release local reference */
 				nvmet_fc_tgt_a_put(assoc);
 			return;
@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
 	iod->rqstdatalen = lsreqbuf_len;
 	iod->hosthandle = hosthandle;
 
-	schedule_work(&iod->work);
+	queue_work(nvmet_wq, &iod->work);
 
 	return 0;
 }
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 54606f1872b4..5c16372f3b53 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
 		spin_lock(&rport->lock);
 		list_add_tail(&rport->ls_list, &tls_req->ls_list);
 		spin_unlock(&rport->lock);
-		schedule_work(&rport->ls_work);
+		queue_work(nvmet_wq, &rport->ls_work);
 		return ret;
 	}
 
@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
 		spin_lock(&rport->lock);
 		list_add_tail(&rport->ls_list, &tls_req->ls_list);
 		spin_unlock(&rport->lock);
-		schedule_work(&rport->ls_work);
+		queue_work(nvmet_wq, &rport->ls_work);
 	}
 
 	return 0;
@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
 		spin_lock(&tport->lock);
 		list_add_tail(&tport->ls_list, &tls_req->ls_list);
 		spin_unlock(&tport->lock);
-		schedule_work(&tport->ls_work);
+		queue_work(nvmet_wq, &tport->ls_work);
 		return ret;
 	}
 
@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
 		spin_lock(&tport->lock);
 		list_add_tail(&tport->ls_list, &tls_req->ls_list);
 		spin_unlock(&tport->lock);
-		schedule_work(&tport->ls_work);
+		queue_work(nvmet_wq, &tport->ls_work);
 	}
 
 	return 0;
@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
 	tgt_rscn->tport = tgtport->private;
 	INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
 
-	schedule_work(&tgt_rscn->work);
+	queue_work(nvmet_wq, &tgt_rscn->work);
 }
 
 static void
@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
 	INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
 	kref_init(&tfcp_req->ref);
 
-	schedule_work(&tfcp_req->fcp_rcv_work);
+	queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
 
 	return 0;
 }
@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
 {
 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
 
-	schedule_work(&tfcp_req->tio_done_work);
+	queue_work(nvmet_wq, &tfcp_req->tio_done_work);
 }
 
 static void
@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
 
 	if (abortio)
 		/* leave the reference while the work item is scheduled */
-		WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+		WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
 	else  {
 		/*
 		 * as the io has already had the done callback made,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 6485dc8eb974..f3d58abf11e0 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
 	if (!nvmet_check_transfer_len(req, 0))
 		return;
 	INIT_WORK(&req->f.work, nvmet_file_flush_work);
-	schedule_work(&req->f.work);
+	queue_work(nvmet_wq, &req->f.work);
 }
 
 static void nvmet_file_execute_discard(struct nvmet_req *req)
@@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
 	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
 		return;
 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
-	schedule_work(&req->f.work);
+	queue_work(nvmet_wq, &req->f.work);
 }
 
 static void nvmet_file_write_zeroes_work(struct work_struct *w)
@@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
 	if (!nvmet_check_transfer_len(req, 0))
 		return;
 	INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
-	schedule_work(&req->f.work);
+	queue_work(nvmet_wq, &req->f.work);
 }
 
 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 23f9d6f88804..59024af2da2e 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 		iod->req.transfer_len = blk_rq_payload_bytes(req);
 	}
 
-	schedule_work(&iod->work);
+	queue_work(nvmet_wq, &iod->work);
 	return BLK_STS_OK;
 }
 
@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
 		return;
 	}
 
-	schedule_work(&iod->work);
+	queue_work(nvmet_wq, &iod->work);
 }
 
 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
diff --git a/drivers/nvme/target/modules.builtin b/drivers/nvme/target/modules.builtin
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69637bf8f8e1..57d130d8a5bd 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -366,6 +366,7 @@ struct nvmet_req {
 
 extern struct workqueue_struct *buffered_io_wq;
 extern struct workqueue_struct *zbd_wq;
+extern struct workqueue_struct *nvmet_wq;
 
 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
 {
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index a810bf569fff..e350b6d5b0ed 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -282,7 +282,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
 	if (req->p.use_workqueue || effects) {
 		INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
 		req->p.rq = rq;
-		schedule_work(&req->p.work);
+		queue_work(nvmet_wq, &req->p.work);
 	} else {
 		rq->end_io_data = req;
 		blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 2446d0918a41..2fab0b219b25 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
 
 	if (queue->host_qid == 0) {
 		/* Let inflight controller teardown complete */
-		flush_scheduled_work();
+		flush_workqueue(nvmet_wq);
 	}
 
 	ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
 
 	if (disconnect) {
 		rdma_disconnect(queue->cm_id);
-		schedule_work(&queue->release_work);
+		queue_work(nvmet_wq, &queue->release_work);
 	}
 }
 
@@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
 	mutex_unlock(&nvmet_rdma_queue_mutex);
 
 	pr_err("failed to connect queue %d\n", queue->idx);
-	schedule_work(&queue->release_work);
+	queue_work(nvmet_wq, &queue->release_work);
 }
 
 /**
@@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
 		if (!queue) {
 			struct nvmet_rdma_port *port = cm_id->context;
 
-			schedule_delayed_work(&port->repair_work, 0);
+			queue_delayed_work(nvmet_wq, &port->repair_work, 0);
 			break;
 		}
 		fallthrough;
@@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
 	nvmet_rdma_disable_port(port);
 	ret = nvmet_rdma_enable_port(port);
 	if (ret)
-		schedule_delayed_work(&port->repair_work, 5 * HZ);
+		queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
 }
 
 static int nvmet_rdma_add_port(struct nvmet_port *nport)
@@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
 	}
 	mutex_unlock(&nvmet_rdma_queue_mutex);
 
-	flush_scheduled_work();
+	flush_workqueue(nvmet_wq);
 }
 
 static struct ib_client nvmet_rdma_ib_client = {
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 83ca577f72be..2793554e622e 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
 	spin_lock(&queue->state_lock);
 	if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
 		queue->state = NVMET_TCP_Q_DISCONNECTING;
-		schedule_work(&queue->release_work);
+		queue_work(nvmet_wq, &queue->release_work);
 	}
 	spin_unlock(&queue->state_lock);
 }
@@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
 		goto out;
 
 	if (sk->sk_state == TCP_LISTEN)
-		schedule_work(&port->accept_work);
+		queue_work(nvmet_wq, &port->accept_work);
 out:
 	read_unlock_bh(&sk->sk_callback_lock);
 }
@@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
 
 	if (sq->qid == 0) {
 		/* Let inflight controller teardown complete */
-		flush_scheduled_work();
+		flush_workqueue(nvmet_wq);
 	}
 
 	queue->nr_cmds = sq->size * 2;
@@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void)
 
 	nvmet_unregister_transport(&nvmet_tcp_ops);
 
-	flush_scheduled_work();
+	flush_workqueue(nvmet_wq);
 	mutex_lock(&nvmet_tcp_queue_mutex);
 	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
 	mutex_unlock(&nvmet_tcp_queue_mutex);
-	flush_scheduled_work();
+	flush_workqueue(nvmet_wq);
 
 	destroy_workqueue(nvmet_tcp_wq);
 }
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] nvmet: use a private workqueue instead of the system workqueue
  2022-03-21 11:57 [PATCH] nvmet: use a private workqueue instead of the system workqueue Sagi Grimberg
@ 2022-03-21 20:41 ` Chaitanya Kulkarni
  2022-03-22 11:15 ` Christoph Hellwig
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Chaitanya Kulkarni @ 2022-03-21 20:41 UTC (permalink / raw)
  To: Sagi Grimberg, linux-nvme, Christoph Hellwig, Keith Busch,
	Chaitanya Kulkarni
  Cc: James Smart, Max Gurtovoy

On 3/21/22 04:57, Sagi Grimberg wrote:
> Any attempt to flush kernel-global WQs has possibility of deadlock
> so we should simply stop using them, instead introduce nvmet_wq
> which is the generic nvmet workqueue for work elements that
> don't explicitly require a dedicated workqueue (by the mere fact
> that they are using the system_wq).
> 
> Changes were done using the following replaces:
> s/schedule_work(/queue_work(nvmet_wq, /g
> s/schedule_delayed_work(/queue_delayed_work(nvmet_wq, /g
> s/flush_scheduled_work()/flush_workqueue(nvmet_wq)/g
> 
> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>


Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

-ck



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nvmet: use a private workqueue instead of the system workqueue
  2022-03-21 11:57 [PATCH] nvmet: use a private workqueue instead of the system workqueue Sagi Grimberg
  2022-03-21 20:41 ` Chaitanya Kulkarni
@ 2022-03-22 11:15 ` Christoph Hellwig
  2022-03-22 12:07 ` Max Gurtovoy
  2022-03-23  8:20 ` Christoph Hellwig
  3 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2022-03-22 11:15 UTC (permalink / raw)
  To: Sagi Grimberg
  Cc: linux-nvme, Christoph Hellwig, Keith Busch, Chaitanya Kulkarni,
	James Smart, Max Gurtovoy

On Mon, Mar 21, 2022 at 01:57:27PM +0200, Sagi Grimberg wrote:
> Any attempt to flush kernel-global WQs has possibility of deadlock
> so we should simply stop using them, instead introduce nvmet_wq
> which is the generic nvmet workqueue for work elements that
> don't explicitly require a dedicated workqueue (by the mere fact
> that they are using the system_wq).
> 
> Changes were done using the following replaces:
> s/schedule_work(/queue_work(nvmet_wq, /g
> s/schedule_delayed_work(/queue_delayed_work(nvmet_wq, /g
> s/flush_scheduled_work()/flush_workqueue(nvmet_wq)/g

Should this have a Reported-by or Suggested-by for Tetsuo?


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nvmet: use a private workqueue instead of the system workqueue
  2022-03-21 11:57 [PATCH] nvmet: use a private workqueue instead of the system workqueue Sagi Grimberg
  2022-03-21 20:41 ` Chaitanya Kulkarni
  2022-03-22 11:15 ` Christoph Hellwig
@ 2022-03-22 12:07 ` Max Gurtovoy
  2022-03-23 10:49   ` Sagi Grimberg
  2022-03-23  8:20 ` Christoph Hellwig
  3 siblings, 1 reply; 7+ messages in thread
From: Max Gurtovoy @ 2022-03-22 12:07 UTC (permalink / raw)
  To: Sagi Grimberg, linux-nvme, Christoph Hellwig, Keith Busch,
	Chaitanya Kulkarni
  Cc: James Smart, Max Gurtovoy

Sagi,

On 3/21/2022 1:57 PM, Sagi Grimberg wrote:
> Any attempt to flush kernel-global WQs has possibility of deadlock
> so we should simply stop using them, instead introduce nvmet_wq
> which is the generic nvmet workqueue for work elements that
> don't explicitly require a dedicated workqueue (by the mere fact
> that they are using the system_wq).
>
> Changes were done using the following replaces:
> s/schedule_work(/queue_work(nvmet_wq, /g
> s/schedule_delayed_work(/queue_delayed_work(nvmet_wq, /g
> s/flush_scheduled_work()/flush_workqueue(nvmet_wq)/g

should we have the same commit for iser Target ?

it uses schedule_delayed_work & flush_scheduled_work as well..


> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
> ---
>   drivers/nvme/target/admin-cmd.c     |  2 +-
>   drivers/nvme/target/configfs.c      |  2 +-
>   drivers/nvme/target/core.c          | 22 ++++++++++++++++------
>   drivers/nvme/target/fc.c            |  8 ++++----
>   drivers/nvme/target/fcloop.c        | 16 ++++++++--------
>   drivers/nvme/target/io-cmd-file.c   |  6 +++---
>   drivers/nvme/target/loop.c          |  4 ++--
>   drivers/nvme/target/modules.builtin |  0
>   drivers/nvme/target/nvmet.h         |  1 +
>   drivers/nvme/target/passthru.c      |  2 +-
>   drivers/nvme/target/rdma.c          | 12 ++++++------
>   drivers/nvme/target/tcp.c           | 10 +++++-----
>   12 files changed, 48 insertions(+), 37 deletions(-)
>   create mode 100644 drivers/nvme/target/modules.builtin

Looks good,

Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>




^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nvmet: use a private workqueue instead of the system workqueue
  2022-03-21 11:57 [PATCH] nvmet: use a private workqueue instead of the system workqueue Sagi Grimberg
                   ` (2 preceding siblings ...)
  2022-03-22 12:07 ` Max Gurtovoy
@ 2022-03-23  8:20 ` Christoph Hellwig
  3 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2022-03-23  8:20 UTC (permalink / raw)
  To: Sagi Grimberg
  Cc: linux-nvme, Christoph Hellwig, Keith Busch, Chaitanya Kulkarni,
	James Smart, Max Gurtovoy

Thanks,

applied to nvme-5.18.


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nvmet: use a private workqueue instead of the system workqueue
  2022-03-22 12:07 ` Max Gurtovoy
@ 2022-03-23 10:49   ` Sagi Grimberg
  2022-03-23 15:30     ` Max Gurtovoy
  0 siblings, 1 reply; 7+ messages in thread
From: Sagi Grimberg @ 2022-03-23 10:49 UTC (permalink / raw)
  To: Max Gurtovoy, linux-nvme, Christoph Hellwig, Keith Busch,
	Chaitanya Kulkarni
  Cc: James Smart, Max Gurtovoy


> should we have the same commit for iser Target ?
> 
> it uses schedule_delayed_work & flush_scheduled_work as well..

We should. Can you send a patch? Or should I?


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nvmet: use a private workqueue instead of the system workqueue
  2022-03-23 10:49   ` Sagi Grimberg
@ 2022-03-23 15:30     ` Max Gurtovoy
  0 siblings, 0 replies; 7+ messages in thread
From: Max Gurtovoy @ 2022-03-23 15:30 UTC (permalink / raw)
  To: Sagi Grimberg, linux-nvme, Christoph Hellwig, Keith Busch,
	Chaitanya Kulkarni
  Cc: James Smart, Max Gurtovoy


On 3/23/2022 12:49 PM, Sagi Grimberg wrote:
>
>> should we have the same commit for iser Target ?
>>
>> it uses schedule_delayed_work & flush_scheduled_work as well..
>
> We should. Can you send a patch? Or should I?

I'll send a patch.




^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2022-03-23 15:31 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-21 11:57 [PATCH] nvmet: use a private workqueue instead of the system workqueue Sagi Grimberg
2022-03-21 20:41 ` Chaitanya Kulkarni
2022-03-22 11:15 ` Christoph Hellwig
2022-03-22 12:07 ` Max Gurtovoy
2022-03-23 10:49   ` Sagi Grimberg
2022-03-23 15:30     ` Max Gurtovoy
2022-03-23  8:20 ` Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.