All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
To: Nilesh Javali <njavali@marvell.com>,
	GR-QLogic-Storage-Upstream@marvell.com,
	"James E.J. Bottomley" <jejb@linux.ibm.com>,
	"Martin K. Petersen" <martin.petersen@oracle.com>
Cc: linux-scsi@vger.kernel.org
Subject: [PATCH 2/2] scsi: qla2xxx: avoid flush_scheduled_work() usage
Date: Thu, 23 Jun 2022 19:29:00 +0900	[thread overview]
Message-ID: <46629616-be04-9a53-b9d8-b2b947adf9d0@I-love.SAKURA.ne.jp> (raw)
In-Reply-To: <b4a96c07-76ef-c7ac-6a73-a64bba32d26f@I-love.SAKURA.ne.jp>

As per commit c4f135d643823a86 ("workqueue: Wrap flush_workqueue() using
a macro") says, remove flush_scheduled_work() call from qla2xxx driver.

Although qla2xxx driver is calling schedule_{,delayed}_work() from 10
locations, I assume that flush_scheduled_work() from qlt_stop_phase1()
needs to flush only works scheduled by qlt_sched_sess_work(), for
flush_scheduled_work() from qlt_stop_phase1() is called only when
"struct qla_tgt"->sess_works_list is not empty.

Currently qlt_stop_phase1() may fail to call flush_scheduled_work(), for
list_empty() may return true as soon as qlt_sess_work_fn() called
list_del(). In order to close this race window while removing
flush_scheduled_work() call, replace sess_* fields in "struct qla_tgt"
with a workqueue and unconditionally call flush_workqueue().

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
This patch is only compile tested.

Changes in v2:
  Use per "struct qla_tgt" workqueue.

 drivers/scsi/qla2xxx/qla_dbg.c    |  2 +-
 drivers/scsi/qla2xxx/qla_target.c | 79 ++++++++++++-------------------
 drivers/scsi/qla2xxx/qla_target.h |  7 +--
 3 files changed, 35 insertions(+), 53 deletions(-)

diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 7cf1f78cbaee..35a17bb6512d 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -10,7 +10,7 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0199       |                |
+ * | Module Init and Probe        |       0x019a       |                |
  * | Mailbox commands             |       0x1206       | 0x11a5-0x11ff	|
  * | Device Discovery             |       0x2134       | 0x210e-0x2115  |
  * |                              |                    | 0x211c-0x2128  |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index d9e0e7ffe130..d08e50779f73 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -126,6 +126,8 @@ static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
 static inline uint32_t qlt_make_handle(struct qla_qpair *);
 
+static void qlt_sess_work_fn(struct work_struct *work);
+
 /*
  * Global Variables
  */
@@ -1529,7 +1531,6 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
 {
 	struct scsi_qla_host *vha = tgt->vha;
 	struct qla_hw_data *ha = tgt->ha;
-	unsigned long flags;
 
 	mutex_lock(&ha->optrom_mutex);
 	mutex_lock(&qla_tgt_mutex);
@@ -1556,13 +1557,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
 	    "Waiting for sess works (tgt %p)", tgt);
-	spin_lock_irqsave(&tgt->sess_work_lock, flags);
-	while (!list_empty(&tgt->sess_works_list)) {
-		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
-		flush_scheduled_work();
-		spin_lock_irqsave(&tgt->sess_work_lock, flags);
-	}
-	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+	flush_workqueue(tgt->wq);
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
 	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
@@ -1669,6 +1664,7 @@ static void qlt_release(struct qla_tgt *tgt)
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
 	    "Release of tgt %p finished\n", tgt);
 
+	destroy_workqueue(tgt->wq);
 	kfree(tgt);
 }
 
@@ -1677,7 +1673,6 @@ static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
 	const void *param, unsigned int param_size)
 {
 	struct qla_tgt_sess_work_param *prm;
-	unsigned long flags;
 
 	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
 	if (!prm) {
@@ -1695,11 +1690,9 @@ static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
 	prm->type = type;
 	memcpy(&prm->tm_iocb, param, param_size);
 
-	spin_lock_irqsave(&tgt->sess_work_lock, flags);
-	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
-	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
-
-	schedule_work(&tgt->sess_work);
+	prm->tgt = tgt;
+	INIT_WORK(&prm->work, qlt_sess_work_fn);
+	queue_work(tgt->wq, &prm->work);
 
 	return 0;
 }
@@ -6399,43 +6392,25 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
 
 static void qlt_sess_work_fn(struct work_struct *work)
 {
-	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
+	struct qla_tgt_sess_work_param *prm =
+		container_of(work, struct qla_tgt_sess_work_param, work);
+	struct qla_tgt *tgt = prm->tgt;
 	struct scsi_qla_host *vha = tgt->vha;
-	unsigned long flags;
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
 
-	spin_lock_irqsave(&tgt->sess_work_lock, flags);
-	while (!list_empty(&tgt->sess_works_list)) {
-		struct qla_tgt_sess_work_param *prm = list_entry(
-		    tgt->sess_works_list.next, typeof(*prm),
-		    sess_works_list_entry);
-
-		/*
-		 * This work can be scheduled on several CPUs at time, so we
-		 * must delete the entry to eliminate double processing
-		 */
-		list_del(&prm->sess_works_list_entry);
-
-		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
-
-		switch (prm->type) {
-		case QLA_TGT_SESS_WORK_ABORT:
-			qlt_abort_work(tgt, prm);
-			break;
-		case QLA_TGT_SESS_WORK_TM:
-			qlt_tmr_work(tgt, prm);
-			break;
-		default:
-			BUG_ON(1);
-			break;
-		}
-
-		spin_lock_irqsave(&tgt->sess_work_lock, flags);
-
-		kfree(prm);
+	switch (prm->type) {
+	case QLA_TGT_SESS_WORK_ABORT:
+		qlt_abort_work(tgt, prm);
+		break;
+	case QLA_TGT_SESS_WORK_TM:
+		qlt_tmr_work(tgt, prm);
+		break;
+	default:
+		BUG_ON(1);
+		break;
 	}
-	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+	kfree(prm);
 }
 
 /* Must be called under tgt_host_action_mutex */
@@ -6465,11 +6440,19 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
 		    "Unable to allocate struct qla_tgt\n");
 		return -ENOMEM;
 	}
+	tgt->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+	if (!tgt->wq) {
+		kfree(tgt);
+		ql_log(ql_log_warn, base_vha, 0x019a,
+		       "Unable to allocate workqueue.\n");
+		return -ENOMEM;
+	}
 
 	tgt->qphints = kcalloc(ha->max_qpairs + 1,
 			       sizeof(struct qla_qpair_hint),
 			       GFP_KERNEL);
 	if (!tgt->qphints) {
+		destroy_workqueue(tgt->wq);
 		kfree(tgt);
 		ql_log(ql_log_warn, base_vha, 0x0197,
 		    "Unable to allocate qpair hints.\n");
@@ -6482,6 +6465,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
 	rc = btree_init64(&tgt->lun_qpair_map);
 	if (rc) {
 		kfree(tgt->qphints);
+		destroy_workqueue(tgt->wq);
 		kfree(tgt);
 		ql_log(ql_log_info, base_vha, 0x0198,
 			"Unable to initialize lun_qpair_map btree\n");
@@ -6512,9 +6496,6 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
 	tgt->ha = ha;
 	tgt->vha = base_vha;
 	init_waitqueue_head(&tgt->waitQ);
-	spin_lock_init(&tgt->sess_work_lock);
-	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
-	INIT_LIST_HEAD(&tgt->sess_works_list);
 	atomic_set(&tgt->tgt_global_resets_count, 0);
 
 	base_vha->vha_tgt.qla_tgt = tgt;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index e899e13696e9..02fbffd8248a 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -813,9 +813,8 @@ struct qla_tgt {
 	/* Count of sessions refering qla_tgt. Protected by hardware_lock. */
 	int sess_count;
 
-	spinlock_t sess_work_lock;
-	struct list_head sess_works_list;
-	struct work_struct sess_work;
+	/* Workqueue for processing session work. */
+	struct workqueue_struct *wq;
 
 	struct imm_ntfy_from_isp link_reinit_iocb;
 	wait_queue_head_t waitQ;
@@ -950,6 +949,8 @@ struct qla_tgt_sess_work_param {
 		struct imm_ntfy_from_isp tm_iocb;
 		struct atio_from_isp tm_iocb2;
 	};
+	struct qla_tgt *tgt;
+	struct work_struct work;
 };
 
 struct qla_tgt_mgmt_cmd {
-- 
2.18.4


  parent reply	other threads:[~2022-06-23 10:29 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-09 11:57 [PATCH] scsi: qla2xxx: avoid flush_scheduled_work() usage Tetsuo Handa
2022-06-23 10:27 ` [PATCH 1/2] scsi: qla2xxx: Remove unused del_sess_list field Tetsuo Handa
2022-06-23 10:29 ` Tetsuo Handa [this message]
2022-07-02  5:00   ` [PATCH v3 1/4] " Tetsuo Handa
2022-07-02 16:19     ` Bart Van Assche
2022-07-02  5:00   ` [PATCH v3 2/4] scsi: qla2xxx: Remove unused qlt_tmr_work() Tetsuo Handa
2022-07-02  5:00   ` [PATCH v3 3/4] scsi: qla2xxx: always wait for qlt_sess_work_fn() from qlt_stop_phase1() Tetsuo Handa
2022-07-02  5:00   ` [PATCH v3 4/4] scsi: qla2xxx: avoid flush_scheduled_work() usage Tetsuo Handa

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=46629616-be04-9a53-b9d8-b2b947adf9d0@I-love.SAKURA.ne.jp \
    --to=penguin-kernel@i-love.sakura.ne.jp \
    --cc=GR-QLogic-Storage-Upstream@marvell.com \
    --cc=jejb@linux.ibm.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=njavali@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.