All of lore.kernel.org
 help / color / mirror / Atom feed
From: John Garry <john.g.garry@oracle.com>
To: jejb@linux.ibm.com, martin.petersen@oracle.com, dgilbert@interlog.com
Cc: linux-scsi@vger.kernel.org, linux-kernel@vger.kernel.org,
	bvanassche@acm.org, John Garry <john.g.garry@oracle.com>
Subject: [PATCH v2 04/11] scsi: scsi_debug: Protect block_unblock_all_queues() with mutex
Date: Thu, 23 Mar 2023 09:45:48 +0000	[thread overview]
Message-ID: <20230323094555.584624-5-john.g.garry@oracle.com> (raw)
In-Reply-To: <20230323094555.584624-1-john.g.garry@oracle.com>

There is no reason that calls to block_unblock_all_queues() from different
context can't race with one another, so protect with the
sdebug_host_list_mutex. There's no need for a more fine-grained per shost
locking here (and we don't have a per-host lock anyway).

Also simplify some touched code in sdebug_change_qdepth().

Signed-off-by: John Garry <john.g.garry@oracle.com>
---
 drivers/scsi/scsi_debug.c | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a61e7c31dab5..cd05e2f87417 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5497,6 +5497,8 @@ static void block_unblock_all_queues(bool block)
 	int j;
 	struct sdebug_queue *sqp;
 
+	lockdep_assert_held(&sdebug_host_list_mutex);
+
 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
 		atomic_set(&sqp->blocked, (int)block);
 }
@@ -5511,10 +5513,13 @@ static void tweak_cmnd_count(void)
 	modulo = abs(sdebug_every_nth);
 	if (modulo < 2)
 		return;
+
+	mutex_lock(&sdebug_host_list_mutex);
 	block_unblock_all_queues(true);
 	count = atomic_read(&sdebug_cmnd_count);
 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
 	block_unblock_all_queues(false);
+	mutex_unlock(&sdebug_host_list_mutex);
 }
 
 static void clear_queue_stats(void)
@@ -6036,6 +6041,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
 			int j, k;
 			struct sdebug_queue *sqp;
 
+			mutex_lock(&sdebug_host_list_mutex);
 			block_unblock_all_queues(true);
 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
 			     ++j, ++sqp) {
@@ -6051,6 +6057,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
 				sdebug_ndelay = 0;
 			}
 			block_unblock_all_queues(false);
+			mutex_unlock(&sdebug_host_list_mutex);
 		}
 		return res;
 	}
@@ -6076,6 +6083,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
 			int j, k;
 			struct sdebug_queue *sqp;
 
+			mutex_lock(&sdebug_host_list_mutex);
 			block_unblock_all_queues(true);
 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
 			     ++j, ++sqp) {
@@ -6092,6 +6100,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
 							: DEF_JDELAY;
 			}
 			block_unblock_all_queues(false);
+			mutex_unlock(&sdebug_host_list_mutex);
 		}
 		return res;
 	}
@@ -6405,6 +6414,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
 	    (n <= SDEBUG_CANQUEUE) &&
 	    (sdebug_host_max_queue == 0)) {
+		mutex_lock(&sdebug_host_list_mutex);
 		block_unblock_all_queues(true);
 		k = 0;
 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
@@ -6421,6 +6431,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
 		else
 			atomic_set(&retired_max_queue, 0);
 		block_unblock_all_queues(false);
+		mutex_unlock(&sdebug_host_list_mutex);
 		return count;
 	}
 	return -EINVAL;
@@ -7352,7 +7363,9 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
 	if (!devip)
 		return	-ENODEV;
 
+	mutex_lock(&sdebug_host_list_mutex);
 	block_unblock_all_queues(true);
+
 	if (qdepth > SDEBUG_CANQUEUE) {
 		qdepth = SDEBUG_CANQUEUE;
 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
@@ -7363,9 +7376,12 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
 	if (qdepth != sdev->queue_depth)
 		scsi_change_queue_depth(sdev, qdepth);
 
+	block_unblock_all_queues(false);
+	mutex_unlock(&sdebug_host_list_mutex);
+
 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
-	block_unblock_all_queues(false);
+
 	return sdev->queue_depth;
 }
 
-- 
2.35.3


  parent reply	other threads:[~2023-03-23  9:46 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-23  9:45 [PATCH v2 00/11] scsi_debug: Fix shost command overloading issues John Garry
2023-03-23  9:45 ` [PATCH v2 01/11] scsi: scsi_debug: Fix check for sdev queue full John Garry
2023-03-23  9:45 ` [PATCH v2 02/11] scsi: scsi_debug: Don't iter all shosts in clear_luns_changed_on_target() John Garry
2023-03-23  9:45 ` [PATCH v2 03/11] scsi: scsi_debug: Change shost list lock to a mutex John Garry
2023-03-23  9:45 ` John Garry [this message]
2023-03-23  9:45 ` [PATCH v2 05/11] scsi: scsi_debug: Use scsi_block_requests() to block queues John Garry
2023-03-23  9:45 ` [PATCH v2 06/11] scsi: scsi_debug: Dynamically allocate sdebug_queued_cmd John Garry
2023-03-23 13:46   ` kernel test robot
2023-03-23 18:13   ` kernel test robot
2023-03-23  9:45 ` [PATCH v2 07/11] scsi: scsi_debug: Use blk_mq_tagset_busy_iter() in sdebug_blk_mq_poll() John Garry
2023-03-23  9:45 ` [PATCH v2 08/11] scsi: scsi_debug: Use blk_mq_tagset_busy_iter() in stop_all_queued() John Garry
2023-03-23  9:45 ` [PATCH v2 09/11] scsi: scsi_debug: Use scsi_host_busy() in delay_store() and ndelay_store() John Garry
2023-03-23  9:45 ` [PATCH v2 10/11] scsi: scsi_debug: Only allow sdebug_max_queue be modified when no shosts John Garry
2023-03-23  9:45 ` [PATCH v2 11/11] scsi: scsi_debug: Drop sdebug_queue John Garry

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230323094555.584624-5-john.g.garry@oracle.com \
    --to=john.g.garry@oracle.com \
    --cc=bvanassche@acm.org \
    --cc=dgilbert@interlog.com \
    --cc=jejb@linux.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.