All of lore.kernel.org
 help / color / mirror / Atom feed
From: Douglas Gilbert <dgilbert@interlog.com>
To: linux-scsi@vger.kernel.org
Cc: martin.petersen@oracle.com, jejb@linux.vnet.ibm.com,
	hare@suse.de, bvanassche@acm.org
Subject: [PATCH v25 10/44] sg: change rwlock to spinlock
Date: Sun, 23 Oct 2022 23:20:24 -0400	[thread overview]
Message-ID: <20221024032058.14077-11-dgilbert@interlog.com> (raw)
In-Reply-To: <20221024032058.14077-1-dgilbert@interlog.com>

A reviewer suggested that the extra overhead associated with a
rw lock compared to a spinlock was not worth it for short,
oft-used critcal sections.

So the rwlock on the request list/array is changed to a spinlock.
The head of that list is in the owning sf file descriptor object.

Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Douglas Gilbert <dgilbert@interlog.com>
---
 drivers/scsi/sg.c | 58 +++++++++++++++++++++++------------------------
 1 file changed, 29 insertions(+), 29 deletions(-)

diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 13d9c69494d1..0665e61d448c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -144,7 +144,7 @@ struct sg_fd {		/* holds the state of a file descriptor */
 	struct list_head sfd_entry;	/* member sg_device::sfds list */
 	struct sg_device *parentdp;	/* owning device */
 	wait_queue_head_t read_wait;	/* queue read until command done */
-	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
+	spinlock_t rq_list_lock;	/* protect access to list in req_arr */
 	struct mutex f_mutex;	/* protect against changes in this fd */
 	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
 	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
@@ -815,9 +815,9 @@ srp_done(struct sg_fd *sfp, struct sg_request *srp)
 	unsigned long flags;
 	int ret;
 
-	read_lock_irqsave(&sfp->rq_list_lock, flags);
+	spin_lock_irqsave(&sfp->rq_list_lock, flags);
 	ret = srp->done;
-	read_unlock_irqrestore(&sfp->rq_list_lock, flags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, flags);
 	return ret;
 }
 
@@ -1032,15 +1032,15 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp,
 			return result;
 		result = wait_event_interruptible(sfp->read_wait,
 			srp_done(sfp, srp));
-		write_lock_irq(&sfp->rq_list_lock);
+		spin_lock_irq(&sfp->rq_list_lock);
 		if (srp->done) {
 			srp->done = 2;
-			write_unlock_irq(&sfp->rq_list_lock);
+			spin_unlock_irq(&sfp->rq_list_lock);
 			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
 			return (result < 0) ? result : 0;
 		}
 		srp->orphan = 1;
-		write_unlock_irq(&sfp->rq_list_lock);
+		spin_unlock_irq(&sfp->rq_list_lock);
 		return result;	/* -ERESTARTSYS because signal hit process */
 	case SG_SET_TIMEOUT:
 		result = get_user(val, ip);
@@ -1092,15 +1092,15 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp,
 		sfp->force_packid = val ? 1 : 0;
 		return 0;
 	case SG_GET_PACK_ID:
-		read_lock_irqsave(&sfp->rq_list_lock, iflags);
+		spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 		list_for_each_entry(srp, &sfp->rq_list, entry) {
 			if ((1 == srp->done) && (!srp->sg_io_owned)) {
-				read_unlock_irqrestore(&sfp->rq_list_lock,
+				spin_unlock_irqrestore(&sfp->rq_list_lock,
 						       iflags);
 				return put_user(srp->header.pack_id, ip);
 			}
 		}
-		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+		spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 		return put_user(-1, ip);
 	case SG_GET_NUM_WAITING:
 		return put_user(atomic_read(&sfp->waiting), ip);
@@ -1169,9 +1169,9 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp,
 					GFP_KERNEL);
 			if (!rinfo)
 				return -ENOMEM;
-			read_lock_irqsave(&sfp->rq_list_lock, iflags);
+			spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 			sg_fill_request_table(sfp, rinfo);
-			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+			spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	#ifdef CONFIG_COMPAT
 			if (in_compat_syscall())
 				result = put_compat_request_table(p, rinfo);
@@ -1466,7 +1466,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
 	srp->rq = NULL;
 	blk_mq_free_request(rq);
 
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (unlikely(srp->orphan)) {
 		if (sfp->keep_orphan)
 			srp->sg_io_owned = 0;
@@ -1474,7 +1474,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
 			done = 0;
 	}
 	srp->done = done;
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 
 	if (likely(done)) {
 		/* Now wake up any sg_read() that is waiting for this
@@ -2129,7 +2129,7 @@ sg_get_rq_mark(struct sg_fd *sfp, int pack_id, bool *busy)
 	unsigned long iflags;
 
 	*busy = false;
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	list_for_each_entry(resp, &sfp->rq_list, entry) {
 		/* look for requests that are not SG_IO owned */
 		if ((!resp->sg_io_owned) &&
@@ -2140,14 +2140,14 @@ sg_get_rq_mark(struct sg_fd *sfp, int pack_id, bool *busy)
 				break;
 			case 1: /* request done; response ready to return */
 				resp->done = 2;	/* guard against other readers */
-				write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+				spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 				return resp;
 			case 2: /* response already being returned */
 				break;
 			}
 		}
 	}
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return NULL;
 }
 
@@ -2159,7 +2159,7 @@ sg_setup_req(struct sg_fd *sfp)
 	unsigned long iflags;
 	struct sg_request *rp = sfp->req_arr;
 
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (!list_empty(&sfp->rq_list)) {
 		if (!sfp->cmd_q)
 			goto out_unlock;
@@ -2175,10 +2175,10 @@ sg_setup_req(struct sg_fd *sfp)
 	rp->parentfp = sfp;
 	rp->header.duration = jiffies_to_msecs(jiffies);
 	list_add_tail(&rp->entry, &sfp->rq_list);
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return rp;
 out_unlock:
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return NULL;
 }
 
@@ -2191,13 +2191,13 @@ sg_remove_request(struct sg_fd *sfp, struct sg_request *srp)
 
 	if (!sfp || !srp || list_empty(&sfp->rq_list))
 		return res;
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (!list_empty(&srp->entry)) {
 		list_del(&srp->entry);
 		srp->parentfp = NULL;
 		res = 1;
 	}
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 
 	/*
 	 * If the device is detaching, wakeup any readers in case we just
@@ -2222,7 +2222,7 @@ sg_add_sfp(struct sg_device *sdp)
 		return ERR_PTR(-ENOMEM);
 
 	init_waitqueue_head(&sfp->read_wait);
-	rwlock_init(&sfp->rq_list_lock);
+	spin_lock_init(&sfp->rq_list_lock);
 	INIT_LIST_HEAD(&sfp->rq_list);
 	kref_init(&sfp->f_ref);
 	mutex_init(&sfp->f_mutex);
@@ -2267,14 +2267,14 @@ sg_remove_sfp_usercontext(struct work_struct *work)
 	unsigned long iflags;
 
 	/* Cleanup any responses which were never read(). */
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	while (!list_empty(&sfp->rq_list)) {
 		srp = list_first_entry(&sfp->rq_list, struct sg_request, entry);
 		sg_finish_scsi_blk_rq(srp);
 		list_del(&srp->entry);
 		srp->parentfp = NULL;
 	}
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 
 	if (sfp->reserve.buflen > 0) {
 		SG_LOG(6, sfp, "%s:    buflen=%d, num_sgat=%d\n", __func__,
@@ -2556,9 +2556,9 @@ sg_proc_seq_show_dev(struct seq_file *s, void *v)
 			      scsidp->host->host_no, scsidp->channel,
 			      scsidp->id, scsidp->lun, (int) scsidp->type,
 			      1,
-			      (int) scsidp->queue_depth,
-			      (int) scsi_device_busy(scsidp),
-			      (int) scsi_device_online(scsidp));
+			      (int)scsidp->queue_depth,
+			      (int)scsi_device_busy(scsidp),
+			      (int)scsi_device_online(scsidp));
 	}
 	read_unlock_irqrestore(&sg_index_lock, iflags);
 	return 0;
@@ -2598,7 +2598,7 @@ sg_proc_debug_helper(struct seq_file *s, struct sg_device *sdp)
 	k = 0;
 	list_for_each_entry(fp, &sdp->sfds, sfd_entry) {
 		k++;
-		read_lock(&fp->rq_list_lock); /* irqs already disabled */
+		spin_lock(&fp->rq_list_lock); /* irqs already disabled */
 		seq_printf(s, "   FD(%d): timeout=%dms buflen=%d (res)sgat=%d\n",
 			   k, jiffies_to_msecs(fp->timeout),
 			   fp->reserve.buflen,
@@ -2646,7 +2646,7 @@ sg_proc_debug_helper(struct seq_file *s, struct sg_device *sdp)
 		}
 		if (list_empty(&fp->rq_list))
 			seq_puts(s, "     No requests active\n");
-		read_unlock(&fp->rq_list_lock);
+		spin_unlock(&fp->rq_list_lock);
 	}
 }
 
-- 
2.37.3


  parent reply	other threads:[~2022-10-24  3:24 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-24  3:20 [PATCH v25 00/44] sg: add v4 interface Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 01/44] sg: move functions around Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 02/44] sg: remove typedefs, type+formatting cleanup Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 03/44] sg: sg_log and is_enabled Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 04/44] sg: remove typedefs, type+formatting cleanup Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 05/44] sg: bitops in sg_device Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 06/44] sg: make open count an atomic Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 07/44] sg: move header to uapi section Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 08/44] sg: speed sg_poll and sg_get_num_waiting Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 09/44] sg: sg_allow_if_err_recovery and renames Douglas Gilbert
2022-10-24  3:20 ` Douglas Gilbert [this message]
2022-10-24  3:20 ` [PATCH v25 11/44] sg: ioctl handling Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 12/44] sg: split sg_read Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 13/44] sg: sg_common_write add structure for arguments Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 14/44] sg: rework sg_vma_fault Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 15/44] sg: rework sg_mmap Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 16/44] sg: replace sg_allow_access Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 17/44] sg: rework scatter gather handling Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 18/44] sg: introduce request state machine Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 19/44] sg: sg_find_srp_by_id Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 20/44] sg: sg_fill_request_element Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 21/44] sg: printk change %p to %pK Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 22/44] sg: xarray for fds in device Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 23/44] sg: xarray for reqs in fd Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 24/44] sg: replace rq array with xarray Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 25/44] sg: sense buffer rework Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 26/44] sg: add sg v4 interface support Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 27/44] sg: rework debug info Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 28/44] sg: add 8 byte SCSI LUN to sg_scsi_id Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 29/44] sg: expand sg_comm_wr_t Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 30/44] sg: add sg_iosubmit_v3 and sg_ioreceive_v3 ioctls Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 31/44] sg: move procfs objects to avoid forward decls Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 32/44] sg: protect multiple receivers Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 33/44] sg: first debugfs support Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 34/44] sg: rework mmap support Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 35/44] sg: defang allow_dio Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 36/44] sg: warn v3 write system call users Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 37/44] sg: add mmap_sz tracking Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 38/44] sg: track lowest inactive and await indexes Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 39/44] sg: remove unit attention check for device changed Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 40/44] sg: no_dxfer: move to/from kernel buffers Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 41/44] sg: add bio_poll support Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 42/44] sg: add statistics similar to st Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 43/44] sg: rework command completion when removed device Douglas Gilbert
2022-10-24  3:20 ` [PATCH v25 44/44] sg: bump version to 4.0.14 Douglas Gilbert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221024032058.14077-11-dgilbert@interlog.com \
    --to=dgilbert@interlog.com \
    --cc=bvanassche@acm.org \
    --cc=hare@suse.de \
    --cc=jejb@linux.vnet.ibm.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.