All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mike Christie <michael.christie@oracle.com>
To: martin.petersen@oracle.com, linux-scsi@vger.kernel.org,
	target-devel@vger.kernel.org, mst@redhat.com,
	jasowang@redhat.com, stefanha@redhat.com,
	virtualization@lists.linux-foundation.org
Cc: Mike Christie <michael.christie@oracle.com>
Subject: [PATCH 10/11] target: replace work per cmd in completion path
Date: Thu,  4 Feb 2021 05:35:12 -0600	[thread overview]
Message-ID: <20210204113513.93204-11-michael.christie@oracle.com> (raw)
In-Reply-To: <20210204113513.93204-1-michael.christie@oracle.com>

Doing a work per cmd can lead to lots of threads being created.
This patch just replaces the completion work per cmd with a list.
Combined with the first patches this allows tcm loop with higher
perf initiators like iser to go from around 700K IOPs to 1000K
and reduces the number of threads that get created when the system
is under heavy load and hitting the initiator drivers tagging limits.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/target/target_core_transport.c | 124 +++++++++++++++----------
 include/target/target_core_base.h      |   1 +
 2 files changed, 77 insertions(+), 48 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 35aa201ed80b..57022285badb 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -55,7 +55,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
 static void transport_handle_queue_full(struct se_cmd *cmd,
 		struct se_device *dev, int err, bool write_pending);
-static void target_complete_ok_work(struct work_struct *work);
+static void target_queued_compl_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
 {
@@ -295,10 +295,20 @@ static void target_queued_submit_work(struct work_struct *work)
 }
 
 static void target_queue_cmd_work(struct se_sess_cmd_queue *q,
-				  struct se_cmd *se_cmd, int cpu)
+				  struct se_cmd *se_cmd, int cpu,
+				  struct workqueue_struct *wq)
 {
 	llist_add(&se_cmd->se_cmd_list, &q->cmd_list);
-	queue_work_on(cpu, target_submission_wq, &q->work);
+	queue_work_on(cpu, wq, &q->work);
+}
+
+static void target_queue_cmd_compl(struct se_cmd *se_cmd)
+{
+	struct se_session *se_sess = se_cmd->se_sess;
+	int cpu = se_cmd->cpuid;
+
+	target_queue_cmd_work(&se_sess->cq[cpu], se_cmd, cpu,
+			      target_completion_wq);
 }
 
 /**
@@ -310,7 +320,8 @@ void target_queue_cmd_submit(struct se_session *se_sess, struct se_cmd *se_cmd)
 {
 	int cpu = smp_processor_id();
 
-	target_queue_cmd_work(&se_sess->sq[cpu], se_cmd, cpu);
+	target_queue_cmd_work(&se_sess->sq[cpu], se_cmd, cpu,
+			      target_submission_wq);
 }
 EXPORT_SYMBOL_GPL(target_queue_cmd_submit);
 
@@ -318,11 +329,13 @@ static void target_flush_queued_cmds(struct se_session *se_sess)
 {
 	int i;
 
-	if (!se_sess->sq)
-		return;
+	if (se_sess->sq) {
+		for (i = 0; i < se_sess->q_cnt; i++)
+			cancel_work_sync(&se_sess->sq[i].work);
+	}
 
 	for (i = 0; i < se_sess->q_cnt; i++)
-		cancel_work_sync(&se_sess->sq[i].work);
+		cancel_work_sync(&se_sess->cq[i].work);
 }
 
 static void target_init_sess_cmd_queues(struct se_session *se_sess,
@@ -359,13 +372,21 @@ int transport_init_session(const struct target_core_fabric_ops *tfo,
 	atomic_set(&se_sess->stopped, 0);
 	se_sess->tfo = tfo;
 
+	se_sess->cq = kcalloc(nr_cpu_ids, sizeof(*se_sess->cq), GFP_KERNEL);
+	if (!se_sess->cq)
+		return -ENOMEM;
+	se_sess->q_cnt = nr_cpu_ids;
+	target_init_sess_cmd_queues(se_sess, se_sess->cq,
+				    target_queued_compl_work);
+
 	if (tfo->submit_queued_cmd) {
 		se_sess->sq = kcalloc(nr_cpu_ids, sizeof(*se_sess->sq),
 				      GFP_KERNEL);
-		if (!se_sess->sq)
-			return -ENOMEM;
+		if (!se_sess->sq) {
+			rc = -ENOMEM;
+			goto free_cq;
+		}
 
-		se_sess->q_cnt = nr_cpu_ids;
 		target_init_sess_cmd_queues(se_sess, se_sess->sq,
 					    target_queued_submit_work);
 	}
@@ -379,12 +400,15 @@ int transport_init_session(const struct target_core_fabric_ops *tfo,
 
 free_sq:
 	kfree(se_sess->sq);
+free_cq:
+	kfree(se_sess->cq);
 	return rc;
 }
 EXPORT_SYMBOL(transport_init_session);
 
 void transport_uninit_session(struct se_session *se_sess)
 {
+	kfree(se_sess->cq);
 	kfree(se_sess->sq);
 	/*
 	 * Drivers like iscsi and loop do not call target_stop_session
@@ -877,14 +901,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 		percpu_ref_put(&lun->lun_ref);
 }
 
-static void target_complete_failure_work(struct work_struct *work)
-{
-	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
-
-	transport_generic_request_failure(cmd,
-			TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
-}
-
 /*
  * Used when asking transport to copy Sense Data from the underlying
  * Linux/SCSI struct scsi_cmnd
@@ -972,13 +988,6 @@ static void target_handle_abort(struct se_cmd *cmd)
 	transport_cmd_check_stop_to_fabric(cmd);
 }
 
-static void target_abort_work(struct work_struct *work)
-{
-	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
-
-	target_handle_abort(cmd);
-}
-
 static bool target_cmd_interrupted(struct se_cmd *cmd)
 {
 	int post_ret;
@@ -986,8 +995,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
 	if (cmd->transport_state & CMD_T_ABORTED) {
 		if (cmd->transport_complete_callback)
 			cmd->transport_complete_callback(cmd, false, &post_ret);
-		INIT_WORK(&cmd->work, target_abort_work);
-		queue_work(target_completion_wq, &cmd->work);
+
+		target_queue_cmd_compl(cmd);
 		return true;
 	} else if (cmd->transport_state & CMD_T_STOP) {
 		if (cmd->transport_complete_callback)
@@ -1002,7 +1011,6 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
 /* May be called from interrupt context so must not sleep. */
 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 {
-	int success;
 	unsigned long flags;
 
 	if (target_cmd_interrupted(cmd))
@@ -1011,25 +1019,11 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 	cmd->scsi_status = scsi_status;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	switch (cmd->scsi_status) {
-	case SAM_STAT_CHECK_CONDITION:
-		if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
-			success = 1;
-		else
-			success = 0;
-		break;
-	default:
-		success = 1;
-		break;
-	}
-
 	cmd->t_state = TRANSPORT_COMPLETE;
 	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-	INIT_WORK(&cmd->work, success ? target_complete_ok_work :
-		  target_complete_failure_work);
-	queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+	target_queue_cmd_compl(cmd);
 }
 EXPORT_SYMBOL(target_complete_cmd);
 
@@ -2006,8 +2000,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
 		cmd->transport_complete_callback(cmd, false, &post_ret);
 
 	if (cmd->transport_state & CMD_T_ABORTED) {
-		INIT_WORK(&cmd->work, target_abort_work);
-		queue_work(target_completion_wq, &cmd->work);
+		target_queue_cmd_compl(cmd);
 		return;
 	}
 
@@ -2433,10 +2426,32 @@ static bool target_read_prot_action(struct se_cmd *cmd)
 	return false;
 }
 
-static void target_complete_ok_work(struct work_struct *work)
+static void target_complete_cmd_work(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
-	int ret;
+	int ret, success;
+
+	if (cmd->transport_state & CMD_T_ABORTED) {
+		target_handle_abort(cmd);
+		return;
+	}
+
+	switch (cmd->scsi_status) {
+	case SAM_STAT_CHECK_CONDITION:
+		if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+			success = 1;
+		else
+			success = 0;
+		break;
+	default:
+		success = 1;
+		break;
+	}
+
+	if (!success) {
+		transport_generic_request_failure(cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+		return;
+	}
 
 	/*
 	 * Check if we need to move delayed/dormant tasks from cmds on the
@@ -2578,6 +2593,19 @@ static void target_complete_ok_work(struct work_struct *work)
 	transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 
+static void target_queued_compl_work(struct work_struct *work)
+{
+	struct se_sess_cmd_queue *cq =
+				container_of(work, struct se_sess_cmd_queue,
+					     work);
+	struct se_cmd *se_cmd, *next_cmd;
+	struct llist_node *cmd_list;
+
+	cmd_list = llist_del_all(&cq->cmd_list);
+	llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list)
+		target_complete_cmd_work(se_cmd);
+}
+
 void target_free_sgl(struct scatterlist *sgl, int nents)
 {
 	sgl_free_n_order(sgl, nents, 0);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 10ac30f7f638..6b32e8d26347 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -643,6 +643,7 @@ struct se_session {
 	void			*sess_cmd_map;
 	struct sbitmap_queue	sess_tag_pool;
 	const struct target_core_fabric_ops *tfo;
+	struct se_sess_cmd_queue *cq;
 	struct se_sess_cmd_queue *sq;
 	int			q_cnt;
 };
-- 
2.25.1


  parent reply	other threads:[~2021-02-04 11:38 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-04 11:35 [PATCH 00/11] target: fix cmd plugging and completion Mike Christie
2021-02-04 11:35 ` [PATCH 01/11] target: pass in fabric ops to session creation Mike Christie
2021-02-04 11:35 ` [PATCH 02/11] target: add workqueue cmd submission helper Mike Christie
2021-02-04 23:13   ` Chaitanya Kulkarni
2021-02-05  0:43     ` michael.christie
2021-02-05  1:50       ` Chaitanya Kulkarni
2021-02-04 11:35 ` [PATCH 03/11] tcm loop: use blk cmd allocator for se_cmds Mike Christie
2021-02-04 11:35 ` [PATCH 04/11] tcm loop: use lio wq cmd submission helper Mike Christie
2021-02-04 11:35 ` [PATCH 05/11] vhost scsi: " Mike Christie
2021-02-05 16:17   ` Michael S. Tsirkin
2021-02-05 16:17     ` Michael S. Tsirkin
2021-02-05 17:38     ` Mike Christie
2021-02-05 18:04       ` Mike Christie
2021-02-04 11:35 ` [PATCH 06/11] target: cleanup cmd flag bits Mike Christie
2021-02-04 23:15   ` Chaitanya Kulkarni
2021-02-04 11:35 ` [PATCH 07/11] target: fix backend plugging Mike Christie
2021-02-04 11:35 ` [PATCH 08/11] target iblock: add backend plug/unplug callouts Mike Christie
2021-02-04 23:23   ` Chaitanya Kulkarni
2021-02-05  0:45     ` michael.christie
2021-02-07  1:06   ` Chaitanya Kulkarni
2021-02-07  2:21     ` Bart Van Assche
2021-02-07  2:21       ` Bart Van Assche
2021-02-04 11:35 ` [PATCH 09/11] target_core_user: " Mike Christie
2021-02-04 23:25   ` Chaitanya Kulkarni
2021-02-07 21:37     ` Mike Christie
2021-02-04 11:35 ` Mike Christie [this message]
2021-02-04 23:26   ` [PATCH 10/11] target: replace work per cmd in completion path Chaitanya Kulkarni
2021-02-04 11:35 ` [PATCH 11/11] target, vhost-scsi: don't switch cpus on completion Mike Christie
2021-02-08 10:48 ` [PATCH 00/11] target: fix cmd plugging and completion Stefan Hajnoczi
2021-02-08 10:48   ` Stefan Hajnoczi
2021-02-08 12:01 ` Michael S. Tsirkin
2021-02-08 12:01   ` Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210204113513.93204-11-michael.christie@oracle.com \
    --to=michael.christie@oracle.com \
    --cc=jasowang@redhat.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=mst@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=target-devel@vger.kernel.org \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.