All of lore.kernel.org
 help / color / mirror / Atom feed
* cleanup blk_execute_rq* v2
@ 2022-05-24 12:15 Christoph Hellwig
  2022-05-24 12:15 ` [PATCH 1/3] blk-mq: remove __blk_execute_rq_nowait Christoph Hellwig
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Christoph Hellwig @ 2022-05-24 12:15 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Ming Lei, linux-block, linux-nvme, linux-scsi, target-devel

Hi Jens,

this series cleans up the blk_execute_rq* helpers.  It simplifies the
plugging mess a bit, fixes the sparse __bitwise warnings and simplifies
the blk_execute_rq_nowait API a bit.

Changes since v1:
 - rebased to the current Linus tree with the nvme driver changes
   merged
 - fixed a trailing whitespace and odd formatting
 - fixed a mising semicolon in ufs

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/3] blk-mq: remove __blk_execute_rq_nowait
  2022-05-24 12:15 cleanup blk_execute_rq* v2 Christoph Hellwig
@ 2022-05-24 12:15 ` Christoph Hellwig
  2022-05-24 20:58   ` Chaitanya Kulkarni
  2022-05-28 12:20   ` Jens Axboe
  2022-05-24 12:15 ` [PATCH 2/3] blk-mq: avoid a mess of casts for blk_end_sync_rq Christoph Hellwig
  2022-05-24 12:15 ` [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait Christoph Hellwig
  2 siblings, 2 replies; 11+ messages in thread
From: Christoph Hellwig @ 2022-05-24 12:15 UTC (permalink / raw)
  To: Jens Axboe
  Cc: Ming Lei, linux-block, linux-nvme, linux-scsi, target-devel, Keith Busch

We don't want to plug for synchronous execution that where we immediately
wait for the request.  Once that is done not a whole lot of code is
shared, so just remove __blk_execute_rq_nowait.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
---
 block/blk-mq.c | 69 ++++++++++++++++++++++----------------------------
 1 file changed, 30 insertions(+), 39 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index ae116b7556482..31a89d1004b8f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1203,28 +1203,6 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
 	plug->rq_count++;
 }
 
-static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
-		rq_end_io_fn *done, bool use_plug)
-{
-	WARN_ON(irqs_disabled());
-	WARN_ON(!blk_rq_is_passthrough(rq));
-
-	rq->end_io = done;
-
-	blk_account_io_start(rq);
-
-	if (use_plug && current->plug) {
-		blk_add_rq_to_plug(current->plug, rq);
-		return;
-	}
-	/*
-	 * don't check dying flag for MQ because the request won't
-	 * be reused after dying flag is set
-	 */
-	blk_mq_sched_insert_request(rq, at_head, true, false);
-}
-
-
 /**
  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
  * @rq:		request to insert
@@ -1240,8 +1218,16 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
  */
 void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
 {
-	__blk_execute_rq_nowait(rq, at_head, done, true);
+	WARN_ON(irqs_disabled());
+	WARN_ON(!blk_rq_is_passthrough(rq));
 
+	rq->end_io = done;
+
+	blk_account_io_start(rq);
+	if (current->plug)
+		blk_add_rq_to_plug(current->plug, rq);
+	else
+		blk_mq_sched_insert_request(rq, at_head, true, false);
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
@@ -1277,27 +1263,32 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
 {
 	DECLARE_COMPLETION_ONSTACK(wait);
-	unsigned long hang_check;
 
-	/*
-	 * iopoll requires request to be submitted to driver, so can't
-	 * use plug
-	 */
+	WARN_ON(irqs_disabled());
+	WARN_ON(!blk_rq_is_passthrough(rq));
+
 	rq->end_io_data = &wait;
-	__blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq,
-			!blk_rq_is_poll(rq));
+	rq->end_io = blk_end_sync_rq;
 
-	/* Prevent hang_check timer from firing at us during very long I/O */
-	hang_check = sysctl_hung_task_timeout_secs;
+	blk_account_io_start(rq);
+	blk_mq_sched_insert_request(rq, at_head, true, false);
 
-	if (blk_rq_is_poll(rq))
+	if (blk_rq_is_poll(rq)) {
 		blk_rq_poll_completion(rq, &wait);
-	else if (hang_check)
-		while (!wait_for_completion_io_timeout(&wait,
-				hang_check * (HZ/2)))
-			;
-	else
-		wait_for_completion_io(&wait);
+	} else {
+		/*
+		 * Prevent hang_check timer from firing at us during very long
+		 * I/O
+		 */
+		unsigned long hang_check = sysctl_hung_task_timeout_secs;
+
+		if (hang_check)
+			while (!wait_for_completion_io_timeout(&wait,
+					hang_check * (HZ/2)))
+				;
+		else
+			wait_for_completion_io(&wait);
+	}
 
 	return (blk_status_t)(uintptr_t)rq->end_io_data;
 }
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/3] blk-mq: avoid a mess of casts for blk_end_sync_rq
  2022-05-24 12:15 cleanup blk_execute_rq* v2 Christoph Hellwig
  2022-05-24 12:15 ` [PATCH 1/3] blk-mq: remove __blk_execute_rq_nowait Christoph Hellwig
@ 2022-05-24 12:15 ` Christoph Hellwig
  2022-05-24 21:00   ` Chaitanya Kulkarni
  2022-05-24 12:15 ` [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait Christoph Hellwig
  2 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2022-05-24 12:15 UTC (permalink / raw)
  To: Jens Axboe
  Cc: Ming Lei, linux-block, linux-nvme, linux-scsi, target-devel, Keith Busch

Instead of trying to cast a __bitwise 32-bit integer to a larger integer
and then a pointer, just allow a struct with the blk_status_t and the
completion on stack and set the end_io_data to that.  Use the
opportunity to move the code to where it belongs and drop rather
confusing comments.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
---
 block/blk-mq.c | 43 ++++++++++++++++++++-----------------------
 1 file changed, 20 insertions(+), 23 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 31a89d1004b8f..28b3e6db98499 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1151,24 +1151,6 @@ void blk_mq_start_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_start_request);
 
-/**
- * blk_end_sync_rq - executes a completion event on a request
- * @rq: request to complete
- * @error: end I/O status of the request
- */
-static void blk_end_sync_rq(struct request *rq, blk_status_t error)
-{
-	struct completion *waiting = rq->end_io_data;
-
-	rq->end_io_data = (void *)(uintptr_t)error;
-
-	/*
-	 * complete last, if this is a stack request the process (and thus
-	 * the rq pointer) could be invalid right after this complete()
-	 */
-	complete(waiting);
-}
-
 /*
  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
  * queues. This is important for md arrays to benefit from merging
@@ -1231,6 +1213,19 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
+struct blk_rq_wait {
+	struct completion done;
+	blk_status_t ret;
+};
+
+static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
+{
+	struct blk_rq_wait *wait = rq->end_io_data;
+
+	wait->ret = ret;
+	complete(&wait->done);
+}
+
 static bool blk_rq_is_poll(struct request *rq)
 {
 	if (!rq->mq_hctx)
@@ -1262,7 +1257,9 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
  */
 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
 {
-	DECLARE_COMPLETION_ONSTACK(wait);
+	struct blk_rq_wait wait = {
+		.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
+	};
 
 	WARN_ON(irqs_disabled());
 	WARN_ON(!blk_rq_is_passthrough(rq));
@@ -1274,7 +1271,7 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head)
 	blk_mq_sched_insert_request(rq, at_head, true, false);
 
 	if (blk_rq_is_poll(rq)) {
-		blk_rq_poll_completion(rq, &wait);
+		blk_rq_poll_completion(rq, &wait.done);
 	} else {
 		/*
 		 * Prevent hang_check timer from firing at us during very long
@@ -1283,14 +1280,14 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head)
 		unsigned long hang_check = sysctl_hung_task_timeout_secs;
 
 		if (hang_check)
-			while (!wait_for_completion_io_timeout(&wait,
+			while (!wait_for_completion_io_timeout(&wait.done,
 					hang_check * (HZ/2)))
 				;
 		else
-			wait_for_completion_io(&wait);
+			wait_for_completion_io(&wait.done);
 	}
 
-	return (blk_status_t)(uintptr_t)rq->end_io_data;
+	return wait.ret;
 }
 EXPORT_SYMBOL(blk_execute_rq);
 
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait
  2022-05-24 12:15 cleanup blk_execute_rq* v2 Christoph Hellwig
  2022-05-24 12:15 ` [PATCH 1/3] blk-mq: remove __blk_execute_rq_nowait Christoph Hellwig
  2022-05-24 12:15 ` [PATCH 2/3] blk-mq: avoid a mess of casts for blk_end_sync_rq Christoph Hellwig
@ 2022-05-24 12:15 ` Christoph Hellwig
  2022-05-24 21:03   ` Chaitanya Kulkarni
  2 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2022-05-24 12:15 UTC (permalink / raw)
  To: Jens Axboe
  Cc: Ming Lei, linux-block, linux-nvme, linux-scsi, target-devel,
	Keith Busch, Kanchan Joshi

Let the caller set it together with the end_io_data instead of passing
a pointless argument.  Note the the target code did in fact already
set it and then just overrode it again by calling blk_execute_rq_nowait.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
---
 block/blk-mq.c                     |  5 +----
 drivers/block/sx8.c                |  4 ++--
 drivers/nvme/host/core.c           |  3 ++-
 drivers/nvme/host/ioctl.c          |  3 ++-
 drivers/nvme/host/pci.c            | 10 +++++++---
 drivers/nvme/target/passthru.c     |  3 ++-
 drivers/scsi/scsi_error.c          |  5 +++--
 drivers/scsi/sg.c                  |  3 ++-
 drivers/scsi/st.c                  |  3 ++-
 drivers/scsi/ufs/ufshpb.c          |  6 ++++--
 drivers/target/target_core_pscsi.c |  3 +--
 include/linux/blk-mq.h             |  3 +--
 12 files changed, 29 insertions(+), 22 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 28b3e6db98499..8e7860268f614 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1189,7 +1189,6 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
  * @rq:		request to insert
  * @at_head:    insert request at head or tail of queue
- * @done:	I/O completion handler
  *
  * Description:
  *    Insert a fully prepared request at the back of the I/O scheduler queue
@@ -1198,13 +1197,11 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
  * Note:
  *    This function will invoke @done directly if the queue is dead.
  */
-void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
+void blk_execute_rq_nowait(struct request *rq, bool at_head)
 {
 	WARN_ON(irqs_disabled());
 	WARN_ON(!blk_rq_is_passthrough(rq));
 
-	rq->end_io = done;
-
 	blk_account_io_start(rq);
 	if (current->plug)
 		blk_add_rq_to_plug(current->plug, rq);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index b361583944b94..63b4f6431d2e6 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
 	spin_unlock_irq(&host->lock);
 
 	DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
-	blk_execute_rq_nowait(rq, true, NULL);
+	blk_execute_rq_nowait(rq, true);
 
 	return 0;
 
@@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
 	crq->msg_bucket = (u32) rc;
 
 	DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
-	blk_execute_rq_nowait(rq, true, NULL);
+	blk_execute_rq_nowait(rq, true);
 
 	return 0;
 }
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 72f7c955c7078..727c12cbe3272 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1206,9 +1206,10 @@ static void nvme_keep_alive_work(struct work_struct *work)
 	nvme_init_request(rq, &ctrl->ka_cmd);
 
 	rq->timeout = ctrl->kato * HZ;
+	rq->end_io = nvme_keep_alive_end_io;
 	rq->end_io_data = ctrl;
 	rq->rq_flags |= RQF_QUIET;
-	blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
+	blk_execute_rq_nowait(rq, false);
 }
 
 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 096b1b47d750e..a2e89db1cd639 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 			blk_flags);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
+	req->end_io = nvme_uring_cmd_end_io;
 	req->end_io_data = ioucmd;
 
 	/* to free bio on completion, as req->bio will be null at that time */
@@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
 	pdu->meta_len = d.metadata_len;
 
-	blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
+	blk_execute_rq_nowait(req, false);
 	return -EIOCBQUEUED;
 }
 
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5a98a7de09642..0403b6d10bb48 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1438,9 +1438,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 	}
 	nvme_init_request(abort_req, &cmd);
 
+	abort_req->end_io = abort_endio;
 	abort_req->end_io_data = NULL;
 	abort_req->rq_flags |= RQF_QUIET;
-	blk_execute_rq_nowait(abort_req, false, abort_endio);
+	blk_execute_rq_nowait(abort_req, false);
 
 	/*
 	 * The aborted req will be completed on receiving the abort req.
@@ -2485,12 +2486,15 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
 		return PTR_ERR(req);
 	nvme_init_request(req, &cmd);
 
+	if (opcode == nvme_admin_delete_cq)
+		req->end_io = nvme_del_cq_end;
+	else
+		req->end_io = nvme_del_queue_end;
 	req->end_io_data = nvmeq;
 
 	init_completion(&nvmeq->delete_done);
 	req->rq_flags |= RQF_QUIET;
-	blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
-			nvme_del_cq_end : nvme_del_queue_end);
+	blk_execute_rq_nowait(req, false);
 	return 0;
 }
 
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 5247c24538eba..3cc4d6709c93c 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
 		req->p.rq = rq;
 		queue_work(nvmet_wq, &req->p.work);
 	} else {
+		rq->end_io = nvmet_passthru_req_done;
 		rq->end_io_data = req;
-		blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
+		blk_execute_rq_nowait(rq, false);
 	}
 
 	if (ns)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index cdaca13ac1f1c..49ef864df5816 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
 	scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
 	scmd->cmnd[5] = 0;
 	scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+	scmd->allowed = 5;
 
 	req->rq_flags |= RQF_QUIET;
 	req->timeout = 10 * HZ;
-	scmd->allowed = 5;
+	req->end_io = eh_lock_door_done;
 
-	blk_execute_rq_nowait(req, true, eh_lock_door_done);
+	blk_execute_rq_nowait(req, true);
 }
 
 /**
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cbffa712b9f3e..118c7b4a8af2c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
 
 	srp->rq->timeout = timeout;
 	kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
-	blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
+	srp->rq->end_io = sg_rq_end_io;
+	blk_execute_rq_nowait(srp->rq, at_head);
 	return 0;
 }
 
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 56a093a90b922..850172a2b8f14 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
 	memcpy(scmd->cmnd, cmd, scmd->cmd_len);
 	req->timeout = timeout;
 	scmd->allowed = retries;
+	req->end_io = st_scsi_execute_end;
 	req->end_io_data = SRpnt;
 
-	blk_execute_rq_nowait(req, true, st_scsi_execute_end);
+	blk_execute_rq_nowait(req, true);
 	return 0;
 }
 
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 588c0329b80ca..8a7809b9728df 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
 
 	req->timeout = 0;
 	req->end_io_data = umap_req;
+	req->end_io = ufshpb_umap_req_compl_fn;
 
 	ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
 	scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
 
-	blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
+	blk_execute_rq_nowait(req, true);
 
 	hpb->stats.umap_req_cnt++;
 }
@@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
 	blk_rq_append_bio(req, map_req->bio);
 
 	req->end_io_data = map_req;
+	req->end_io = ufshpb_map_req_compl_fn;
 
 	if (unlikely(last))
 		mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
@@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
 				map_req->rb.srgn_idx, mem_size);
 	scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
 
-	blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
+	blk_execute_rq_nowait(req, true);
 
 	hpb->stats.map_req_cnt++;
 	return 0;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index bb3fb18b2316d..e6a967ddc08ce 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -972,8 +972,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
 
 	cmd->priv = scmd->cmnd;
 
-	blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
-			pscsi_req_done);
+	blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
 
 	return 0;
 
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9f07061418db0..e2d9daf7e8dd0 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -969,8 +969,7 @@ int blk_rq_unmap_user(struct bio *);
 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
 		unsigned int, gfp_t);
 int blk_rq_append_bio(struct request *rq, struct bio *bio);
-void blk_execute_rq_nowait(struct request *rq, bool at_head,
-		rq_end_io_fn *end_io);
+void blk_execute_rq_nowait(struct request *rq, bool at_head);
 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
 
 struct req_iterator {
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] blk-mq: remove __blk_execute_rq_nowait
  2022-05-24 12:15 ` [PATCH 1/3] blk-mq: remove __blk_execute_rq_nowait Christoph Hellwig
@ 2022-05-24 20:58   ` Chaitanya Kulkarni
  2022-05-28 12:20   ` Jens Axboe
  1 sibling, 0 replies; 11+ messages in thread
From: Chaitanya Kulkarni @ 2022-05-24 20:58 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Ming Lei, linux-block, linux-nvme, linux-scsi, target-devel,
	Keith Busch, Jens Axboe

On 5/24/22 05:15, Christoph Hellwig wrote:
> We don't want to plug for synchronous execution that where we immediately
> wait for the request.  Once that is done not a whole lot of code is
> shared, so just remove __blk_execute_rq_nowait.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> Reviewed-by: Keith Busch <kbusch@kernel.org>
> ---
>

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

-ck



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/3] blk-mq: avoid a mess of casts for blk_end_sync_rq
  2022-05-24 12:15 ` [PATCH 2/3] blk-mq: avoid a mess of casts for blk_end_sync_rq Christoph Hellwig
@ 2022-05-24 21:00   ` Chaitanya Kulkarni
  0 siblings, 0 replies; 11+ messages in thread
From: Chaitanya Kulkarni @ 2022-05-24 21:00 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Ming Lei, Jens Axboe, linux-block, linux-nvme, linux-scsi,
	target-devel, Keith Busch

On 5/24/22 05:15, Christoph Hellwig wrote:
> Instead of trying to cast a __bitwise 32-bit integer to a larger integer
> and then a pointer, just allow a struct with the blk_status_t and the
> completion on stack and set the end_io_data to that.  Use the
> opportunity to move the code to where it belongs and drop rather
> confusing comments.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> Reviewed-by: Keith Busch <kbusch@kernel.org>
> ---

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

-ck



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait
  2022-05-24 12:15 ` [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait Christoph Hellwig
@ 2022-05-24 21:03   ` Chaitanya Kulkarni
  0 siblings, 0 replies; 11+ messages in thread
From: Chaitanya Kulkarni @ 2022-05-24 21:03 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe
  Cc: Ming Lei, linux-block, linux-nvme, linux-scsi, target-devel,
	Keith Busch, Kanchan Joshi

On 5/24/22 05:15, Christoph Hellwig wrote:
> Let the caller set it together with the end_io_data instead of passing
> a pointless argument.  Note the the target code did in fact already
> set it and then just overrode it again by calling blk_execute_rq_nowait.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> Reviewed-by: Keith Busch <kbusch@kernel.org>
> Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
> ---

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

-ck



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] blk-mq: remove __blk_execute_rq_nowait
  2022-05-24 12:15 ` [PATCH 1/3] blk-mq: remove __blk_execute_rq_nowait Christoph Hellwig
  2022-05-24 20:58   ` Chaitanya Kulkarni
@ 2022-05-28 12:20   ` Jens Axboe
  1 sibling, 0 replies; 11+ messages in thread
From: Jens Axboe @ 2022-05-28 12:20 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: linux-block, linux-nvme, kbusch, linux-scsi, ming.lei, target-devel

On Tue, 24 May 2022 14:15:28 +0200, Christoph Hellwig wrote:
> We don't want to plug for synchronous execution that where we immediately
> wait for the request.  Once that is done not a whole lot of code is
> shared, so just remove __blk_execute_rq_nowait.
> 
> 

Applied, thanks!

[1/3] blk-mq: remove __blk_execute_rq_nowait
      (no commit info)
[2/3] blk-mq: avoid a mess of casts for blk_end_sync_rq
      (no commit info)
[3/3] blk-mq: remove the done argument to blk_execute_rq_nowait
      (no commit info)

Best regards,
-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait
  2022-05-17  6:49 ` [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait Christoph Hellwig
  2022-05-18 23:23   ` kernel test robot
@ 2022-05-19  7:36   ` Kanchan Joshi
  1 sibling, 0 replies; 11+ messages in thread
From: Kanchan Joshi @ 2022-05-19  7:36 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Jens Axboe, Ming Lei, linux-block, linux-nvme, linux-scsi, target-devel

On Tue, May 17, 2022 at 12:27 PM Christoph Hellwig <hch@lst.de> wrote:
>
> Let the caller set it together with the end_io_data instead of passing
> a pointless argument.  Note the the target code did in fact already
> set it and then just overrode it again by calling blk_execute_rq_nowait.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  block/blk-mq.c                     |  5 +----
>  drivers/block/sx8.c                |  4 ++--
>  drivers/nvme/host/core.c           |  3 ++-
>  drivers/nvme/host/ioctl.c          |  3 ++-
>  drivers/nvme/host/pci.c            | 10 +++++++---
>  drivers/nvme/target/passthru.c     |  3 ++-
>  drivers/scsi/scsi_error.c          |  5 +++--
>  drivers/scsi/sg.c                  |  3 ++-
>  drivers/scsi/st.c                  |  3 ++-
>  drivers/scsi/ufs/ufshpb.c          |  6 ++++--
>  drivers/target/target_core_pscsi.c |  3 +--
>  include/linux/blk-mq.h             |  3 +--
>  12 files changed, 29 insertions(+), 22 deletions(-)
>
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 0169b624edda1..c832011bc90dd 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -1189,7 +1189,6 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
>   * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
>   * @rq:                request to insert
>   * @at_head:    insert request at head or tail of queue
> - * @done:      I/O completion handler
>   *
>   * Description:
>   *    Insert a fully prepared request at the back of the I/O scheduler queue
> @@ -1198,13 +1197,11 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
>   * Note:
>   *    This function will invoke @done directly if the queue is dead.
>   */
> -void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
> +void blk_execute_rq_nowait(struct request *rq, bool at_head)
>  {
>         WARN_ON(irqs_disabled());
>         WARN_ON(!blk_rq_is_passthrough(rq));
>
> -       rq->end_io = done;
> -
>         blk_account_io_start(rq);
>         if (current->plug)
>                 blk_add_rq_to_plug(current->plug, rq);
> diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
> index b361583944b94..63b4f6431d2e6 100644
> --- a/drivers/block/sx8.c
> +++ b/drivers/block/sx8.c
> @@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
>         spin_unlock_irq(&host->lock);
>
>         DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
> -       blk_execute_rq_nowait(rq, true, NULL);
> +       blk_execute_rq_nowait(rq, true);
>
>         return 0;
>
> @@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
>         crq->msg_bucket = (u32) rc;
>
>         DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
> -       blk_execute_rq_nowait(rq, true, NULL);
> +       blk_execute_rq_nowait(rq, true);
>
>         return 0;
>  }
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 510e3860358bb..22aa5780623da 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -1206,8 +1206,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
>         nvme_init_request(rq, &ctrl->ka_cmd);
>
>         rq->timeout = ctrl->kato * HZ;
> +       rq->end_io = nvme_keep_alive_end_io;
>         rq->end_io_data = ctrl;
> -       blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
> +       blk_execute_rq_nowait(rq, false);
>  }
>
>  static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
> diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
> index 7b0e2c9cdcae3..a92cc686ffbc0 100644
> --- a/drivers/nvme/host/ioctl.c
> +++ b/drivers/nvme/host/ioctl.c
> @@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
>                         blk_flags);
>         if (IS_ERR(req))
>                 return PTR_ERR(req);
> +       req->end_io = nvme_uring_cmd_end_io;
>         req->end_io_data = ioucmd;
>
>         /* to free bio on completion, as req->bio will be null at that time */
> @@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
>         pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
>         pdu->meta_len = d.metadata_len;
>
> -       blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
> +       blk_execute_rq_nowait(req, false);
>         return -EIOCBQUEUED;
>  }
>
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 3aacf1c0d5a5f..068dbb00c5ea9 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -1438,8 +1438,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
>         }
>         nvme_init_request(abort_req, &cmd);
>
> +       abort_req->end_io = abort_endio;
>         abort_req->end_io_data = NULL;
> -       blk_execute_rq_nowait(abort_req, false, abort_endio);
> +       blk_execute_rq_nowait(abort_req, false);
>
>         /*
>          * The aborted req will be completed on receiving the abort req.
> @@ -2483,11 +2484,14 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
>                 return PTR_ERR(req);
>         nvme_init_request(req, &cmd);
>
> +       if (opcode == nvme_admin_delete_cq)
> +               req->end_io = nvme_del_cq_end;
> +       else
> +               req->end_io = nvme_del_queue_end;
>         req->end_io_data = nvmeq;
>
>         init_completion(&nvmeq->delete_done);
> -       blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
> -                       nvme_del_cq_end : nvme_del_queue_end);
> +       blk_execute_rq_nowait(req, false);
>         return 0;
>  }
>
> diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
> index 5247c24538eba..3cc4d6709c93c 100644
> --- a/drivers/nvme/target/passthru.c
> +++ b/drivers/nvme/target/passthru.c
> @@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
>                 req->p.rq = rq;
>                 queue_work(nvmet_wq, &req->p.work);
>         } else {
> +               rq->end_io = nvmet_passthru_req_done;
>                 rq->end_io_data = req;
> -               blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
> +               blk_execute_rq_nowait(rq, false);
>         }
>
>         if (ns)
> diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
> index cdaca13ac1f1c..49ef864df5816 100644
> --- a/drivers/scsi/scsi_error.c
> +++ b/drivers/scsi/scsi_error.c
> @@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
>         scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
>         scmd->cmnd[5] = 0;
>         scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
> +       scmd->allowed = 5;
>
>         req->rq_flags |= RQF_QUIET;
>         req->timeout = 10 * HZ;
> -       scmd->allowed = 5;
> +       req->end_io = eh_lock_door_done;
>
> -       blk_execute_rq_nowait(req, true, eh_lock_door_done);
> +       blk_execute_rq_nowait(req, true);
>  }
>
>  /**
> diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
> index cbffa712b9f3e..118c7b4a8af2c 100644
> --- a/drivers/scsi/sg.c
> +++ b/drivers/scsi/sg.c
> @@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
>
>         srp->rq->timeout = timeout;
>         kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
> -       blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
> +       srp->rq->end_io = sg_rq_end_io;
> +       blk_execute_rq_nowait(srp->rq, at_head);
>         return 0;
>  }
>
> diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
> index 56a093a90b922..850172a2b8f14 100644
> --- a/drivers/scsi/st.c
> +++ b/drivers/scsi/st.c
> @@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
>         memcpy(scmd->cmnd, cmd, scmd->cmd_len);
>         req->timeout = timeout;
>         scmd->allowed = retries;
> +       req->end_io = st_scsi_execute_end;
>         req->end_io_data = SRpnt;
>
> -       blk_execute_rq_nowait(req, true, st_scsi_execute_end);
> +       blk_execute_rq_nowait(req, true);
>         return 0;
>  }
>
> diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
> index 81099b68bbfbd..796a9773bf3de 100644
> --- a/drivers/scsi/ufs/ufshpb.c
> +++ b/drivers/scsi/ufs/ufshpb.c
> @@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
>
>         req->timeout = 0;
>         req->end_io_data = umap_req;
> +       req->end_io = ufshpb_umap_req_compl_fn;
>
>         ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
>         scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
>
> -       blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
> +       blk_execute_rq_nowait(req, true);
>
>         hpb->stats.umap_req_cnt++;
>  }
> @@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
>         blk_rq_append_bio(req, map_req->bio);
>
>         req->end_io_data = map_req;
> +       req->end_io = ufshpb_map_req_compl_fn;
>
>         if (unlikely(last))
>                 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
> @@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
>                                 map_req->rb.srgn_idx, mem_size);
>         scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
>
> -       blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
> +       blk_execute_rq_nowait(req, true)

Missing semicolon here. Otherwise, looks good.

Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait
  2022-05-17  6:49 ` [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait Christoph Hellwig
@ 2022-05-18 23:23   ` kernel test robot
  2022-05-19  7:36   ` Kanchan Joshi
  1 sibling, 0 replies; 11+ messages in thread
From: kernel test robot @ 2022-05-18 23:23 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe
  Cc: kbuild-all, Ming Lei, linux-block, linux-nvme, linux-scsi, target-devel

Hi Christoph,

I love your patch! Yet something to improve:

[auto build test ERROR on axboe-block/for-next]
[also build test ERROR on next-20220518]
[cannot apply to mkp-scsi/for-next jejb-scsi/for-next linus/master v5.18-rc7]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/intel-lab-lkp/linux/commits/Christoph-Hellwig/blk-mq-remove-__blk_execute_rq_nowait/20220517-154900
base:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
config: arc-allyesconfig (https://download.01.org/0day-ci/archive/20220519/202205190712.zyCIh9kG-lkp@intel.com/config)
compiler: arceb-elf-gcc (GCC) 11.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/2dc03b4b4f1f1aa542a1ab6d6ff64be3d9db050c
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Christoph-Hellwig/blk-mq-remove-__blk_execute_rq_nowait/20220517-154900
        git checkout 2dc03b4b4f1f1aa542a1ab6d6ff64be3d9db050c
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.3.0 make.cross W=1 O=build_dir ARCH=arc SHELL=/bin/bash

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   drivers/scsi/ufs/ufshpb.c: In function 'ufshpb_execute_map_req':
>> drivers/scsi/ufs/ufshpb.c:721:41: error: expected ';' before 'hpb'
     721 |         blk_execute_rq_nowait(req, true)
         |                                         ^
         |                                         ;
     722 | 
     723 |         hpb->stats.map_req_cnt++;
         |         ~~~                              


vim +721 drivers/scsi/ufs/ufshpb.c

   683	
   684	static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
   685					  struct ufshpb_req *map_req, bool last)
   686	{
   687		struct request_queue *q;
   688		struct request *req;
   689		struct scsi_cmnd *scmd;
   690		int mem_size = hpb->srgn_mem_size;
   691		int ret = 0;
   692		int i;
   693	
   694		q = hpb->sdev_ufs_lu->request_queue;
   695		for (i = 0; i < hpb->pages_per_srgn; i++) {
   696			ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
   697					      PAGE_SIZE, 0);
   698			if (ret != PAGE_SIZE) {
   699				dev_err(&hpb->sdev_ufs_lu->sdev_dev,
   700					   "bio_add_pc_page fail %d - %d\n",
   701					   map_req->rb.rgn_idx, map_req->rb.srgn_idx);
   702				return ret;
   703			}
   704		}
   705	
   706		req = map_req->req;
   707	
   708		blk_rq_append_bio(req, map_req->bio);
   709	
   710		req->end_io_data = map_req;
   711		req->end_io = ufshpb_map_req_compl_fn;
   712	
   713		if (unlikely(last))
   714			mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
   715	
   716		scmd = blk_mq_rq_to_pdu(req);
   717		ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
   718					map_req->rb.srgn_idx, mem_size);
   719		scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
   720	
 > 721		blk_execute_rq_nowait(req, true)
   722	
   723		hpb->stats.map_req_cnt++;
   724		return 0;
   725	}
   726	

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait
  2022-05-17  6:48 cleanup blk_execute_rq* Christoph Hellwig
@ 2022-05-17  6:49 ` Christoph Hellwig
  2022-05-18 23:23   ` kernel test robot
  2022-05-19  7:36   ` Kanchan Joshi
  0 siblings, 2 replies; 11+ messages in thread
From: Christoph Hellwig @ 2022-05-17  6:49 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Ming Lei, linux-block, linux-nvme, linux-scsi, target-devel

Let the caller set it together with the end_io_data instead of passing
a pointless argument.  Note the the target code did in fact already
set it and then just overrode it again by calling blk_execute_rq_nowait.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c                     |  5 +----
 drivers/block/sx8.c                |  4 ++--
 drivers/nvme/host/core.c           |  3 ++-
 drivers/nvme/host/ioctl.c          |  3 ++-
 drivers/nvme/host/pci.c            | 10 +++++++---
 drivers/nvme/target/passthru.c     |  3 ++-
 drivers/scsi/scsi_error.c          |  5 +++--
 drivers/scsi/sg.c                  |  3 ++-
 drivers/scsi/st.c                  |  3 ++-
 drivers/scsi/ufs/ufshpb.c          |  6 ++++--
 drivers/target/target_core_pscsi.c |  3 +--
 include/linux/blk-mq.h             |  3 +--
 12 files changed, 29 insertions(+), 22 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0169b624edda1..c832011bc90dd 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1189,7 +1189,6 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
  * @rq:		request to insert
  * @at_head:    insert request at head or tail of queue
- * @done:	I/O completion handler
  *
  * Description:
  *    Insert a fully prepared request at the back of the I/O scheduler queue
@@ -1198,13 +1197,11 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
  * Note:
  *    This function will invoke @done directly if the queue is dead.
  */
-void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
+void blk_execute_rq_nowait(struct request *rq, bool at_head)
 {
 	WARN_ON(irqs_disabled());
 	WARN_ON(!blk_rq_is_passthrough(rq));
 
-	rq->end_io = done;
-
 	blk_account_io_start(rq);
 	if (current->plug)
 		blk_add_rq_to_plug(current->plug, rq);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index b361583944b94..63b4f6431d2e6 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
 	spin_unlock_irq(&host->lock);
 
 	DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
-	blk_execute_rq_nowait(rq, true, NULL);
+	blk_execute_rq_nowait(rq, true);
 
 	return 0;
 
@@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
 	crq->msg_bucket = (u32) rc;
 
 	DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
-	blk_execute_rq_nowait(rq, true, NULL);
+	blk_execute_rq_nowait(rq, true);
 
 	return 0;
 }
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 510e3860358bb..22aa5780623da 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1206,8 +1206,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
 	nvme_init_request(rq, &ctrl->ka_cmd);
 
 	rq->timeout = ctrl->kato * HZ;
+	rq->end_io = nvme_keep_alive_end_io;
 	rq->end_io_data = ctrl;
-	blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
+	blk_execute_rq_nowait(rq, false);
 }
 
 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 7b0e2c9cdcae3..a92cc686ffbc0 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 			blk_flags);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
+	req->end_io = nvme_uring_cmd_end_io;
 	req->end_io_data = ioucmd;
 
 	/* to free bio on completion, as req->bio will be null at that time */
@@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
 	pdu->meta_len = d.metadata_len;
 
-	blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
+	blk_execute_rq_nowait(req, false);
 	return -EIOCBQUEUED;
 }
 
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3aacf1c0d5a5f..068dbb00c5ea9 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1438,8 +1438,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 	}
 	nvme_init_request(abort_req, &cmd);
 
+	abort_req->end_io = abort_endio;
 	abort_req->end_io_data = NULL;
-	blk_execute_rq_nowait(abort_req, false, abort_endio);
+	blk_execute_rq_nowait(abort_req, false);
 
 	/*
 	 * The aborted req will be completed on receiving the abort req.
@@ -2483,11 +2484,14 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
 		return PTR_ERR(req);
 	nvme_init_request(req, &cmd);
 
+	if (opcode == nvme_admin_delete_cq)
+		req->end_io = nvme_del_cq_end;
+	else
+		req->end_io = nvme_del_queue_end;
 	req->end_io_data = nvmeq;
 
 	init_completion(&nvmeq->delete_done);
-	blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
-			nvme_del_cq_end : nvme_del_queue_end);
+	blk_execute_rq_nowait(req, false);
 	return 0;
 }
 
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 5247c24538eba..3cc4d6709c93c 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
 		req->p.rq = rq;
 		queue_work(nvmet_wq, &req->p.work);
 	} else {
+		rq->end_io = nvmet_passthru_req_done;
 		rq->end_io_data = req;
-		blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
+		blk_execute_rq_nowait(rq, false);
 	}
 
 	if (ns)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index cdaca13ac1f1c..49ef864df5816 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
 	scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
 	scmd->cmnd[5] = 0;
 	scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+	scmd->allowed = 5;
 
 	req->rq_flags |= RQF_QUIET;
 	req->timeout = 10 * HZ;
-	scmd->allowed = 5;
+	req->end_io = eh_lock_door_done;
 
-	blk_execute_rq_nowait(req, true, eh_lock_door_done);
+	blk_execute_rq_nowait(req, true);
 }
 
 /**
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cbffa712b9f3e..118c7b4a8af2c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
 
 	srp->rq->timeout = timeout;
 	kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
-	blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
+	srp->rq->end_io = sg_rq_end_io;
+	blk_execute_rq_nowait(srp->rq, at_head);
 	return 0;
 }
 
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 56a093a90b922..850172a2b8f14 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
 	memcpy(scmd->cmnd, cmd, scmd->cmd_len);
 	req->timeout = timeout;
 	scmd->allowed = retries;
+	req->end_io = st_scsi_execute_end;
 	req->end_io_data = SRpnt;
 
-	blk_execute_rq_nowait(req, true, st_scsi_execute_end);
+	blk_execute_rq_nowait(req, true);
 	return 0;
 }
 
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 81099b68bbfbd..796a9773bf3de 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
 
 	req->timeout = 0;
 	req->end_io_data = umap_req;
+	req->end_io = ufshpb_umap_req_compl_fn;
 
 	ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
 	scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
 
-	blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
+	blk_execute_rq_nowait(req, true);
 
 	hpb->stats.umap_req_cnt++;
 }
@@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
 	blk_rq_append_bio(req, map_req->bio);
 
 	req->end_io_data = map_req;
+	req->end_io = ufshpb_map_req_compl_fn;
 
 	if (unlikely(last))
 		mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
@@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
 				map_req->rb.srgn_idx, mem_size);
 	scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
 
-	blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
+	blk_execute_rq_nowait(req, true)
 
 	hpb->stats.map_req_cnt++;
 	return 0;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index bb3fb18b2316d..e6a967ddc08ce 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -972,8 +972,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
 
 	cmd->priv = scmd->cmnd;
 
-	blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
-			pscsi_req_done);
+	blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
 
 	return 0;
 
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9f07061418db0..e2d9daf7e8dd0 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -969,8 +969,7 @@ int blk_rq_unmap_user(struct bio *);
 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
 		unsigned int, gfp_t);
 int blk_rq_append_bio(struct request *rq, struct bio *bio);
-void blk_execute_rq_nowait(struct request *rq, bool at_head,
-		rq_end_io_fn *end_io);
+void blk_execute_rq_nowait(struct request *rq, bool at_head);
 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
 
 struct req_iterator {
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2022-05-28 12:21 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-24 12:15 cleanup blk_execute_rq* v2 Christoph Hellwig
2022-05-24 12:15 ` [PATCH 1/3] blk-mq: remove __blk_execute_rq_nowait Christoph Hellwig
2022-05-24 20:58   ` Chaitanya Kulkarni
2022-05-28 12:20   ` Jens Axboe
2022-05-24 12:15 ` [PATCH 2/3] blk-mq: avoid a mess of casts for blk_end_sync_rq Christoph Hellwig
2022-05-24 21:00   ` Chaitanya Kulkarni
2022-05-24 12:15 ` [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait Christoph Hellwig
2022-05-24 21:03   ` Chaitanya Kulkarni
  -- strict thread matches above, loose matches on Subject: below --
2022-05-17  6:48 cleanup blk_execute_rq* Christoph Hellwig
2022-05-17  6:49 ` [PATCH 3/3] blk-mq: remove the done argument to blk_execute_rq_nowait Christoph Hellwig
2022-05-18 23:23   ` kernel test robot
2022-05-19  7:36   ` Kanchan Joshi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.