All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mike Christie <michael.christie@oracle.com>
To: bvanassche@acm.org, lduncan@suse.com, cleech@redhat.com,
	ming.lei@redhat.com, martin.petersen@oracle.com,
	linux-scsi@vger.kernel.org,
	james.bottomley@hansenpartnership.com
Cc: Mike Christie <michael.christie@oracle.com>
Subject: [RFC PATCH 3/4] scsi: iscsi: Support transmit from queuecommand
Date: Mon,  7 Mar 2022 18:39:56 -0600	[thread overview]
Message-ID: <20220308003957.123312-4-michael.christie@oracle.com> (raw)
In-Reply-To: <20220308003957.123312-1-michael.christie@oracle.com>

This patch alllow drivers like iscsi_tcp to transmit the task from the
queuecommand. For the case where we were going to run from kblockd and the
app and iscsi share CPUs, we can skip the extra workqueue runs and it can
result in up to 30% increase in 4K IOPs.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/scsi/libiscsi.c | 52 +++++++++++++++++++++++++++++++++++------
 include/scsi/libiscsi.h | 11 +++++++--
 2 files changed, 54 insertions(+), 9 deletions(-)

diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 63e0d97df50f..5a2953260a94 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1700,9 +1700,13 @@ static void iscsi_xmitworker(struct work_struct *work)
 	/*
 	 * serialize Xmit worker on a per-connection basis.
 	 */
+	mutex_lock(&conn->xmit_mutex);
+
 	do {
 		rc = iscsi_data_xmit(conn);
 	} while (rc >= 0 || rc == -EAGAIN);
+
+	mutex_unlock(&conn->xmit_mutex);
 }
 
 static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
@@ -1832,9 +1836,16 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
 		goto reject;
 	}
 
-	if (!ihost->workq) {
+	session->queued_cmdsn++;
+
+	if (!ihost->workq ||
+	    (ihost->xmit_from_qc && !conn->task &&
+	     list_empty(&conn->cmdqueue) && mutex_trylock(&conn->xmit_mutex))) {
 		reason = iscsi_prep_scsi_cmd_pdu(task);
 		if (reason) {
+			if (ihost->workq)
+				mutex_unlock(&conn->xmit_mutex);
+
 			if (reason == -ENOMEM ||  reason == -EACCES) {
 				reason = FAILURE_OOM;
 				goto prepd_reject;
@@ -1843,21 +1854,27 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
 				goto prepd_fault;
 			}
 		}
-		if (session->tt->xmit_task(task, true)) {
-			session->cmdsn--;
-			reason = FAILURE_SESSION_NOT_READY;
-			goto prepd_reject;
+
+		if (!ihost->workq) {
+			if (session->tt->xmit_task(task, true)) {
+				session->cmdsn--;
+				reason = FAILURE_SESSION_NOT_READY;
+				goto prepd_reject;
+			}
+		} else {
+			iscsi_xmit_task(conn, task, false, true);
+			mutex_unlock(&conn->xmit_mutex);
 		}
 	} else {
 		list_add_tail(&task->running, &conn->cmdqueue);
 		iscsi_conn_queue_xmit(conn);
 	}
 
-	session->queued_cmdsn++;
 	spin_unlock_bh(&session->frwd_lock);
 	return 0;
 
 prepd_reject:
+	session->queued_cmdsn--;
 	spin_lock_bh(&session->back_lock);
 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 	spin_unlock_bh(&session->back_lock);
@@ -1868,6 +1885,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
 	return SCSI_MLQUEUE_TARGET_BUSY;
 
 prepd_fault:
+	session->queued_cmdsn--;
 	spin_lock_bh(&session->back_lock);
 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 	spin_unlock_bh(&session->back_lock);
@@ -2558,11 +2576,17 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
 
 	iscsi_suspend_tx(conn);
 
+	/*
+	 * If this is called from a SG ioctl the host may not be fully stopped.
+	 * Take the xmit_mutex in case we are xmitting from iscsi_queuecommand.
+	 */
+	mutex_lock(&conn->xmit_mutex);
 	spin_lock_bh(&session->frwd_lock);
 	memset(hdr, 0, sizeof(*hdr));
 	fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
 	session->tmf_state = TMF_INITIAL;
 	spin_unlock_bh(&session->frwd_lock);
+	mutex_unlock(&conn->xmit_mutex);
 
 	iscsi_start_tx(conn);
 	goto done;
@@ -2720,11 +2744,17 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
 
 	iscsi_suspend_tx(conn);
 
+	/*
+	 * If this is called from a SG ioctl the host may not be fully stopped.
+	 * Take the xmit_mutex in case we are xmitting from iscsi_queuecommand.
+	 */
+	mutex_lock(&conn->xmit_mutex);
 	spin_lock_bh(&session->frwd_lock);
 	memset(hdr, 0, sizeof(*hdr));
 	fail_scsi_tasks(conn, -1, DID_ERROR);
 	session->tmf_state = TMF_INITIAL;
 	spin_unlock_bh(&session->frwd_lock);
+	mutex_unlock(&conn->xmit_mutex);
 
 	iscsi_start_tx(conn);
 	goto done;
@@ -2909,6 +2939,8 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
 			2, shost->host_no);
 		if (!ihost->workq)
 			goto free_host;
+	} else {
+		ihost->xmit_from_qc = true;
 	}
 
 	spin_lock_init(&ihost->lock);
@@ -3165,6 +3197,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
 	INIT_LIST_HEAD(&conn->cmdqueue);
 	INIT_LIST_HEAD(&conn->requeue);
 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+	mutex_init(&conn->xmit_mutex);
 
 	/* allocate login_task used for the login/text sequences */
 	spin_lock_bh(&session->frwd_lock);
@@ -3395,13 +3428,18 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
 	}
 
 	/*
-	 * flush queues.
+	 * When flushing the queues we don't wait for the block to complete
+	 * above because it can be expensive when there are lots of devices.
+	 * To make sure there is not an xmit from iscsi_queuecommand running
+	 * take the xmit_mutex.
 	 */
+	mutex_lock(&conn->xmit_mutex);
 	spin_lock_bh(&session->frwd_lock);
 	fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
 	fail_mgmt_tasks(session, conn);
 	memset(&session->tmhdr, 0, sizeof(session->tmhdr));
 	spin_unlock_bh(&session->frwd_lock);
+	mutex_unlock(&conn->xmit_mutex);
 	mutex_unlock(&session->eh_mutex);
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_stop);
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 412722f44747..1f2accf2bc1b 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -198,6 +198,12 @@ struct iscsi_conn {
 	struct list_head	cmdqueue;	/* data-path cmd queue */
 	struct list_head	requeue;	/* tasks needing another run */
 	struct work_struct	xmitwork;	/* per-conn. xmit workqueue */
+	/*
+	 * This must be held while calling the xmit_task callout if it can
+	 * called from the iscsi_q workqueue. It must be taken after the
+	 * eh_mutex if both mutex's are needed.
+	 */
+	struct mutex		xmit_mutex;
 	/* recv */
 	struct work_struct	recvwork;
 	unsigned long		flags;		/* ISCSI_CONN_FLAGs */
@@ -267,8 +273,8 @@ struct iscsi_session {
 	struct iscsi_cls_session *cls_session;
 	/*
 	 * Syncs up the scsi eh thread with the iscsi eh thread when sending
-	 * task management functions. This must be taken before the session
-	 * and recv lock.
+	 * task management functions. This must be taken before the conn's
+	 * xmit_mutex.
 	 */
 	struct mutex		eh_mutex;
 	/* abort */
@@ -370,6 +376,7 @@ struct iscsi_host {
 	int			num_sessions;
 	int			state;
 
+	bool			xmit_from_qc;
 	struct workqueue_struct	*workq;
 };
 
-- 
2.25.1


  parent reply	other threads:[~2022-03-08  0:40 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-08  0:39 [RFC PATCH 0/4] scsi/iscsi: Send iscsi data from kblockd Mike Christie
2022-03-08  0:39 ` [RFC PATCH 1/4] scsi: Allow drivers to set BLK_MQ_F_BLOCKING Mike Christie
2022-03-08  4:57   ` Bart Van Assche
2022-03-09  0:53   ` Ming Lei
2022-03-09  1:17     ` Mike Christie
2022-03-09  1:43       ` Ming Lei
2022-03-09 19:38         ` Mike Christie
2022-03-08  0:39 ` [RFC PATCH 2/4] scsi: iscsi: Tell drivers when we must not block Mike Christie
2022-03-08  4:59   ` Bart Van Assche
2022-03-08 15:58     ` Mike Christie
2022-03-08  0:39 ` Mike Christie [this message]
2022-03-08  0:39 ` [RFC PATCH 4/4] scsi: iscsi_tcp: Allow user to control if transmit from queuecommand Mike Christie
2022-03-08  5:10   ` Bart Van Assche
2022-03-08 16:51     ` Mike Christie
     [not found] ` <CGME20220308004023epcas2p12ebd497c14d32f36d0aa6682c0b9d0db@epcms2p7>
2022-03-08  7:00   ` [RFC PATCH 1/4] scsi: Allow drivers to set BLK_MQ_F_BLOCKING Daejun Park
2022-03-08 16:14     ` Mike Christie
2022-03-15  8:26 ` [RFC PATCH 0/4] scsi/iscsi: Send iscsi data from kblockd Christoph Hellwig
2022-03-16  1:08   ` Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220308003957.123312-4-michael.christie@oracle.com \
    --to=michael.christie@oracle.com \
    --cc=bvanassche@acm.org \
    --cc=cleech@redhat.com \
    --cc=james.bottomley@hansenpartnership.com \
    --cc=lduncan@suse.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=ming.lei@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.