All of lore.kernel.org
 help / color / mirror / Atom feed
From: Douglas Gilbert <dgilbert@interlog.com>
To: linux-scsi@vger.kernel.org
Cc: martin.petersen@oracle.com, jejb@linux.vnet.ibm.com, hare@suse.de
Subject: [PATCH v5 12/23] sg: change rwlock to spinlock
Date: Tue,  8 Oct 2019 09:50:11 +0200	[thread overview]
Message-ID: <20191008075022.30055-13-dgilbert@interlog.com> (raw)
In-Reply-To: <20191008075022.30055-1-dgilbert@interlog.com>

A reviewer suggested that the extra overhead associated with a
rw lock compared to a spinlock was not worth it for short,
oft-used critcal sections.

So the rwlock on the request list/array is changed to a spinlock.
The head of that list is in the owning sf file descriptor object.

Signed-off-by: Douglas Gilbert <dgilbert@interlog.com>
---
 drivers/scsi/sg.c | 52 +++++++++++++++++++++++------------------------
 1 file changed, 26 insertions(+), 26 deletions(-)

diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index d28278a30ced..2796fef42837 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -139,7 +139,7 @@ struct sg_fd {		/* holds the state of a file descriptor */
 	struct list_head sfd_entry;	/* member sg_device::sfds list */
 	struct sg_device *parentdp;	/* owning device */
 	wait_queue_head_t read_wait;	/* queue read until command done */
-	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
+	spinlock_t rq_list_lock;	/* protect access to list in req_arr */
 	struct mutex f_mutex;	/* protect against changes in this fd */
 	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
 	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
@@ -742,17 +742,17 @@ sg_get_rq_mark(struct sg_fd *sfp, int pack_id)
 	struct sg_request *resp;
 	unsigned long iflags;
 
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	list_for_each_entry(resp, &sfp->rq_list, entry) {
 		/* look for requests that are ready + not SG_IO owned */
 		if ((resp->done == 1) && (!resp->sg_io_owned) &&
 		    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
 			resp->done = 2;	/* guard against other readers */
-			write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+			spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 			return resp;
 		}
 	}
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return NULL;
 }
 
@@ -804,9 +804,9 @@ srp_done(struct sg_fd *sfp, struct sg_request *srp)
 	unsigned long flags;
 	int ret;
 
-	read_lock_irqsave(&sfp->rq_list_lock, flags);
+	spin_lock_irqsave(&sfp->rq_list_lock, flags);
 	ret = srp->done;
-	read_unlock_irqrestore(&sfp->rq_list_lock, flags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, flags);
 	return ret;
 }
 
@@ -1045,15 +1045,15 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 			(srp_done(sfp, srp) || SG_IS_DETACHING(sdp)));
 		if (SG_IS_DETACHING(sdp))
 			return -ENODEV;
-		write_lock_irq(&sfp->rq_list_lock);
+		spin_lock_irq(&sfp->rq_list_lock);
 		if (srp->done) {
 			srp->done = 2;
-			write_unlock_irq(&sfp->rq_list_lock);
+			spin_unlock_irq(&sfp->rq_list_lock);
 			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
 			return (result < 0) ? result : 0;
 		}
 		srp->orphan = 1;
-		write_unlock_irq(&sfp->rq_list_lock);
+		spin_unlock_irq(&sfp->rq_list_lock);
 		return result;	/* -ERESTARTSYS because signal hit process */
 	case SG_SET_TIMEOUT:
 		result = get_user(val, ip);
@@ -1105,16 +1105,16 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 		sfp->force_packid = val ? 1 : 0;
 		return 0;
 	case SG_GET_PACK_ID:
-		read_lock_irqsave(&sfp->rq_list_lock, iflags);
+		spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 		list_for_each_entry(srp, &sfp->rq_list, entry) {
 			if ((1 == srp->done) && (!srp->sg_io_owned)) {
-				read_unlock_irqrestore(&sfp->rq_list_lock,
+				spin_unlock_irqrestore(&sfp->rq_list_lock,
 						       iflags);
 				put_user(srp->header.pack_id, ip);
 				return 0;
 			}
 		}
-		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+		spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 		put_user(-1, ip);
 		return 0;
 	case SG_GET_NUM_WAITING:
@@ -1184,9 +1184,9 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 					GFP_KERNEL);
 			if (!rinfo)
 				return -ENOMEM;
-			read_lock_irqsave(&sfp->rq_list_lock, iflags);
+			spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 			sg_fill_request_table(sfp, rinfo);
-			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+			spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 			result = copy_to_user(p, rinfo,
 					      SZ_SG_REQ_INFO * SG_MAX_QUEUE);
 			result = result ? -EFAULT : 0;
@@ -1484,7 +1484,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
 	scsi_req_free_cmd(scsi_req(rq));
 	blk_put_request(rq);
 
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (unlikely(srp->orphan)) {
 		if (sfp->keep_orphan)
 			srp->sg_io_owned = 0;
@@ -1492,7 +1492,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
 			done = 0;
 	}
 	srp->done = done;
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 
 	if (likely(done)) {
 		/* Now wake up any sg_read() that is waiting for this
@@ -2166,7 +2166,7 @@ sg_add_request(struct sg_fd *sfp)
 	unsigned long iflags;
 	struct sg_request *rp = sfp->req_arr;
 
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (!list_empty(&sfp->rq_list)) {
 		if (!sfp->cmd_q)
 			goto out_unlock;
@@ -2182,10 +2182,10 @@ sg_add_request(struct sg_fd *sfp)
 	rp->parentfp = sfp;
 	rp->header.duration = jiffies_to_msecs(jiffies);
 	list_add_tail(&rp->entry, &sfp->rq_list);
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return rp;
 out_unlock:
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return NULL;
 }
 
@@ -2198,13 +2198,13 @@ sg_remove_request(struct sg_fd *sfp, struct sg_request *srp)
 
 	if (!sfp || !srp || list_empty(&sfp->rq_list))
 		return res;
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (!list_empty(&srp->entry)) {
 		list_del(&srp->entry);
 		srp->parentfp = NULL;
 		res = 1;
 	}
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return res;
 }
 
@@ -2220,7 +2220,7 @@ sg_add_sfp(struct sg_device *sdp)
 		return ERR_PTR(-ENOMEM);
 
 	init_waitqueue_head(&sfp->read_wait);
-	rwlock_init(&sfp->rq_list_lock);
+	spin_lock_init(&sfp->rq_list_lock);
 	INIT_LIST_HEAD(&sfp->rq_list);
 	kref_init(&sfp->f_ref);
 	mutex_init(&sfp->f_mutex);
@@ -2265,14 +2265,14 @@ sg_remove_sfp_usercontext(struct work_struct *work)
 	unsigned long iflags;
 
 	/* Cleanup any responses which were never read(). */
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	while (!list_empty(&sfp->rq_list)) {
 		srp = list_first_entry(&sfp->rq_list, struct sg_request, entry);
 		sg_finish_scsi_blk_rq(srp);
 		list_del(&srp->entry);
 		srp->parentfp = NULL;
 	}
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 
 	if (sfp->reserve.buflen > 0) {
 		SG_LOG(6, sfp, "%s:    buflen=%d, num_sgat=%d\n", __func__,
@@ -2596,7 +2596,7 @@ sg_proc_debug_helper(struct seq_file *s, struct sg_device *sdp)
 	k = 0;
 	list_for_each_entry(fp, &sdp->sfds, sfd_entry) {
 		k++;
-		read_lock(&fp->rq_list_lock); /* irqs already disabled */
+		spin_lock(&fp->rq_list_lock); /* irqs already disabled */
 		seq_printf(s, "   FD(%d): timeout=%dms buflen=%d "
 			   "(res)sgat=%d low_dma=%d\n", k,
 			   jiffies_to_msecs(fp->timeout),
@@ -2646,7 +2646,7 @@ sg_proc_debug_helper(struct seq_file *s, struct sg_device *sdp)
 		}
 		if (list_empty(&fp->rq_list))
 			seq_puts(s, "     No requests active\n");
-		read_unlock(&fp->rq_list_lock);
+		spin_unlock(&fp->rq_list_lock);
 	}
 }
 
-- 
2.23.0


  parent reply	other threads:[~2019-10-08  7:50 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-08  7:49 [PATCH v5 00/23] sg: add v4 interface Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 01/23] sg: move functions around Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 02/23] sg: remove typedefs, type+formatting cleanup Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 03/23] sg: sg_log and is_enabled Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 04/23] sg: rework sg_poll(), minor changes Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 05/23] sg: bitops in sg_device Douglas Gilbert
2019-10-18 10:05   ` Hannes Reinecke
2019-10-21 13:22     ` Douglas Gilbert
2019-10-21 13:38       ` Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 06/23] sg: make open count an atomic Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 07/23] sg: move header to uapi section Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 08/23] sg: speed sg_poll and sg_get_num_waiting Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 09/23] sg: sg_allow_if_err_recovery and renames Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 10/23] sg: remove access_ok functions Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 11/23] sg: improve naming Douglas Gilbert
2019-10-18 10:06   ` Hannes Reinecke
2019-10-08  7:50 ` Douglas Gilbert [this message]
2019-10-18 10:09   ` [PATCH v5 12/23] sg: change rwlock to spinlock Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 13/23] sg: ioctl handling Douglas Gilbert
2019-10-18 10:12   ` Hannes Reinecke
2019-10-24  2:47     ` Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 14/23] sg: split sg_read Douglas Gilbert
2019-10-18 10:15   ` Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 15/23] sg: sg_common_write add structure for arguments Douglas Gilbert
2019-10-18 10:16   ` Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 16/23] sg: rework sg_vma_fault Douglas Gilbert
2019-10-18 10:17   ` Hannes Reinecke
2019-10-24  3:07     ` Douglas Gilbert
2019-10-08  7:50 ` [PATCH v5 17/23] sg: rework sg_mmap Douglas Gilbert
2019-10-18 10:18   ` Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 18/23] sg: replace sg_allow_access Douglas Gilbert
2019-10-18 10:20   ` Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 19/23] sg: rework scatter gather handling Douglas Gilbert
2019-10-18 10:22   ` Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 20/23] sg: introduce request state machine Douglas Gilbert
2019-10-18 10:25   ` Hannes Reinecke
2019-10-24  4:24     ` Douglas Gilbert
2019-10-24  5:51       ` Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 21/23] sg: sg_find_srp_by_id Douglas Gilbert
2019-10-18 10:27   ` Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 22/23] sg: sg_fill_request_element Douglas Gilbert
2019-10-18 10:29   ` Hannes Reinecke
2019-10-08  7:50 ` [PATCH v5 23/23] sg: printk change %p to %pK Douglas Gilbert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191008075022.30055-13-dgilbert@interlog.com \
    --to=dgilbert@interlog.com \
    --cc=hare@suse.de \
    --cc=jejb@linux.vnet.ibm.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.