linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* io_request_lock/queue_lock patch
@ 2001-08-30 20:49 Jonathan Lahr
  2001-08-30 21:32 ` Gérard Roudier
  2001-08-31  5:56 ` Jens Axboe
  0 siblings, 2 replies; 14+ messages in thread
From: Jonathan Lahr @ 2001-08-30 20:49 UTC (permalink / raw)
  To: linux-kernel, linux-scsi


Included below is a snapshot of a patch I am developing to reduce 
io_request_lock contention in 2.4.  

This patch changes __make_request to take the device-specific queue_lock 
instead of io_request_lock and changes generic_unplug_device to take
queue_lock in addition to io_request_lock to serialize access to the 
request queue.  Also, locking in various functions has been modified 
in accordance with this new scheme.

I have done some testing of this with aic7xxx, qla2x00, and lpfc adapters.

To apply it to 2.4.6 or 2.4.7, from linux/ "patch -p1 < iorlv0_247".

Comments and suggestions are welcome and invited.

Jonathan

----- patch -----
diff -Naur linux.base/drivers/block/ll_rw_blk.c linux.mod/drivers/block/ll_rw_blk.c
--- linux.base/drivers/block/ll_rw_blk.c	Thu Jul 19 20:51:23 2001
+++ linux.mod/drivers/block/ll_rw_blk.c	Fri Aug 24 10:17:38 2001
@@ -343,11 +343,13 @@
 void generic_unplug_device(void *data)
 {
 	request_queue_t *q = (request_queue_t *) data;
-	unsigned long flags;
+	unsigned long flags[2];
 
-	spin_lock_irqsave(&io_request_lock, flags);
+	spin_lock_irqsave(&io_request_lock, flags[0]);
+	spin_lock_irqsave(&q->queue_lock, flags[1]);
 	__generic_unplug_device(q);
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irqrestore(&q->queue_lock, flags[1]);
+	spin_unlock_irqrestore(&io_request_lock, flags[0]);
 }
 
 static void blk_init_free_list(request_queue_t *q)
@@ -470,9 +472,9 @@
 	add_wait_queue_exclusive(&q->wait_for_request, &wait);
 	for (;;) {
 		__set_current_state(TASK_UNINTERRUPTIBLE);
-		spin_lock_irq(&io_request_lock);
+		spin_lock_irq(&q->queue_lock);
 		rq = get_request(q, rw);
-		spin_unlock_irq(&io_request_lock);
+		spin_unlock_irq(&q->queue_lock);
 		if (rq)
 			break;
 		generic_unplug_device(q);
@@ -487,9 +489,9 @@
 {
 	register struct request *rq;
 
-	spin_lock_irq(&io_request_lock);
+	spin_lock_irq(&q->queue_lock);
 	rq = get_request(q, rw);
-	spin_unlock_irq(&io_request_lock);
+	spin_unlock_irq(&q->queue_lock);
 	if (rq)
 		return rq;
 	return __get_request_wait(q, rw);
@@ -555,7 +557,7 @@
 	drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
 
 	if (!q->plugged && q->head_active && insert_here == &q->queue_head) {
-		spin_unlock_irq(&io_request_lock);
+		spin_unlock_irq(&q->queue_lock);
 		BUG();
 	}
 
@@ -727,7 +729,7 @@
 	 * Now we acquire the request spinlock, we have to be mega careful
 	 * not to schedule or do something nonatomic
 	 */
-	spin_lock_irq(&io_request_lock);
+	spin_lock_irq(&q->queue_lock);
 
 	insert_here = head->prev;
 	if (list_empty(head)) {
@@ -794,7 +796,7 @@
 		req = freereq;
 		freereq = NULL;
 	} else if ((req = get_request(q, rw)) == NULL) {
-		spin_unlock_irq(&io_request_lock);
+		spin_unlock_irq(&q->queue_lock);
 		if (rw_ahead)
 			goto end_io;
 
@@ -821,7 +823,7 @@
 out:
 	if (freereq)
 		blkdev_release_request(freereq);
-	spin_unlock_irq(&io_request_lock);
+	spin_unlock_irq(&q->queue_lock);
 	return 0;
 end_io:
 	bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
diff -Naur linux.base/drivers/scsi/scsi.c linux.mod/drivers/scsi/scsi.c
--- linux.base/drivers/scsi/scsi.c	Thu Jul 19 21:07:04 2001
+++ linux.mod/drivers/scsi/scsi.c	Fri Aug 24 11:06:25 2001
@@ -678,41 +678,33 @@
 		 * passes a meaningful return value.
 		 */
 		if (host->hostt->use_new_eh_code) {
-                        spin_lock_irqsave(&io_request_lock, flags);
 			rtn = host->hostt->queuecommand(SCpnt, scsi_done);
-                        spin_unlock_irqrestore(&io_request_lock, flags);
 			if (rtn != 0) {
 				scsi_delete_timer(SCpnt);
 				scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
                                 SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));                                
 			}
 		} else {
-                        spin_lock_irqsave(&io_request_lock, flags);
 			host->hostt->queuecommand(SCpnt, scsi_old_done);
-                        spin_unlock_irqrestore(&io_request_lock, flags);
 		}
 	} else {
 		int temp;
 
 		SCSI_LOG_MLQUEUE(3, printk("command() :  routine at %p\n", host->hostt->command));
-                spin_lock_irqsave(&io_request_lock, flags);
 		temp = host->hostt->command(SCpnt);
 		SCpnt->result = temp;
 #ifdef DEBUG_DELAY
-                spin_unlock_irqrestore(&io_request_lock, flags);
 		clock = jiffies + 4 * HZ;
 		while (time_before(jiffies, clock))
 			barrier();
 		printk("done(host = %d, result = %04x) : routine at %p\n",
 		       host->host_no, temp, host->hostt->command);
-                spin_lock_irqsave(&io_request_lock, flags);
 #endif
 		if (host->hostt->use_new_eh_code) {
 			scsi_done(SCpnt);
 		} else {
 			scsi_old_done(SCpnt);
 		}
-                spin_unlock_irqrestore(&io_request_lock, flags);
 	}
 	SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
 	return rtn;
diff -Naur linux.base/drivers/scsi/scsi_lib.c linux.mod/drivers/scsi/scsi_lib.c
--- linux.base/drivers/scsi/scsi_lib.c	Thu Jul 19 20:48:04 2001
+++ linux.mod/drivers/scsi/scsi_lib.c	Fri Aug 24 10:19:50 2001
@@ -68,7 +68,7 @@
 static void __scsi_insert_special(request_queue_t *q, struct request *rq,
 				  void *data, int at_head)
 {
-	unsigned long flags;
+	unsigned long flags[2];
 
 	ASSERT_LOCK(&io_request_lock, 0);
 
@@ -84,7 +84,8 @@
 	 * head of the queue for things like a QUEUE_FULL message from a
 	 * device, or a host that is unable to accept a particular command.
 	 */
-	spin_lock_irqsave(&io_request_lock, flags);
+	spin_lock_irqsave(&io_request_lock, flags[0]);
+	spin_lock_irqsave(&q->queue_lock, flags[1]);
 
 	if (at_head)
 		list_add(&rq->queue, &q->queue_head);
@@ -92,7 +93,8 @@
 		list_add_tail(&rq->queue, &q->queue_head);
 
 	q->request_fn(q);
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irqrestore(&q->queue_lock, flags[1]);
+	spin_unlock_irqrestore(&io_request_lock, flags[0]);
 }
 
 
@@ -246,13 +248,14 @@
 void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
 {
 	int all_clear;
-	unsigned long flags;
+	unsigned long flags[2];
 	Scsi_Device *SDpnt;
 	struct Scsi_Host *SHpnt;
 
 	ASSERT_LOCK(&io_request_lock, 0);
 
-	spin_lock_irqsave(&io_request_lock, flags);
+	spin_lock_irqsave(&io_request_lock, flags[0]);
+	spin_lock_irqsave(&q->queue_lock, flags[1]);
 	if (SCpnt != NULL) {
 
 		/*
@@ -328,7 +331,8 @@
 			SHpnt->some_device_starved = 0;
 		}
 	}
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irqrestore(&q->queue_lock, flags[1]);
+	spin_unlock_irqrestore(&io_request_lock, flags[0]);
 }
 
 /*
@@ -821,6 +825,7 @@
 	struct Scsi_Device_Template *STpnt;
 
 	ASSERT_LOCK(&io_request_lock, 1);
+	ASSERT_LOCK(&q->queue_lock, 1);
 
 	SDpnt = (Scsi_Device *) q->queuedata;
 	if (!SDpnt) {
@@ -828,6 +833,7 @@
 	}
 	SHpnt = SDpnt->host;
 
+
 	/*
 	 * To start with, we keep looping until the queue is empty, or until
 	 * the host is no longer able to accept any more requests.
@@ -839,7 +845,7 @@
 		 * we need to check to see if the queue is plugged or not.
 		 */
 		if (SHpnt->in_recovery || q->plugged)
-			return;
+			break;
 
 		/*
 		 * If the device cannot accept another request, then quit.
@@ -887,9 +893,11 @@
 			 */
 			SDpnt->was_reset = 0;
 			if (SDpnt->removable && !in_interrupt()) {
+				spin_unlock_irq(&q->queue_lock);
 				spin_unlock_irq(&io_request_lock);
 				scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0);
 				spin_lock_irq(&io_request_lock);
+				spin_lock_irq(&q->queue_lock);
 				continue;
 			}
 		}
@@ -998,8 +1006,6 @@
 		 * another.  
 		 */
 		req = NULL;
-		spin_unlock_irq(&io_request_lock);
-
 		if (SCpnt->request.cmd != SPECIAL) {
 			/*
 			 * This will do a couple of things:
@@ -1028,7 +1034,6 @@
 				{
 					panic("Should not have leftover blocks\n");
 				}
-				spin_lock_irq(&io_request_lock);
 				SHpnt->host_busy--;
 				SDpnt->device_busy--;
 				continue;
@@ -1044,7 +1049,6 @@
 				{
 					panic("Should not have leftover blocks\n");
 				}
-				spin_lock_irq(&io_request_lock);
 				SHpnt->host_busy--;
 				SDpnt->device_busy--;
 				continue;
@@ -1065,7 +1069,6 @@
 		 * Now we need to grab the lock again.  We are about to mess
 		 * with the request queue and try to find another command.
 		 */
-		spin_lock_irq(&io_request_lock);
 	}
 }
 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-08-30 20:49 io_request_lock/queue_lock patch Jonathan Lahr
@ 2001-08-30 21:32 ` Gérard Roudier
  2001-08-30 21:47   ` Eric Youngdale
  2001-08-31  5:56 ` Jens Axboe
  1 sibling, 1 reply; 14+ messages in thread
From: Gérard Roudier @ 2001-08-30 21:32 UTC (permalink / raw)
  To: Jonathan Lahr; +Cc: linux-kernel, linux-scsi


Here are my welcome comments. :)

In my opinion, it would well be a miracle if your patch does not introduce
new race conditions, at least for drivers that still use the old scsi done
method.

  Gérard.

On Thu, 30 Aug 2001, Jonathan Lahr wrote:

> Included below is a snapshot of a patch I am developing to reduce
> io_request_lock contention in 2.4.
>
> This patch changes __make_request to take the device-specific queue_lock
> instead of io_request_lock and changes generic_unplug_device to take
> queue_lock in addition to io_request_lock to serialize access to the
> request queue.  Also, locking in various functions has been modified
> in accordance with this new scheme.
>
> I have done some testing of this with aic7xxx, qla2x00, and lpfc adapters.
>
> To apply it to 2.4.6 or 2.4.7, from linux/ "patch -p1 < iorlv0_247".
>
> Comments and suggestions are welcome and invited.
>
> Jonathan
>
> ----- patch -----
> diff -Naur linux.base/drivers/block/ll_rw_blk.c linux.mod/drivers/block/ll_rw_blk.c
> --- linux.base/drivers/block/ll_rw_blk.c	Thu Jul 19 20:51:23 2001
> +++ linux.mod/drivers/block/ll_rw_blk.c	Fri Aug 24 10:17:38 2001
> @@ -343,11 +343,13 @@
>  void generic_unplug_device(void *data)
>  {
>  	request_queue_t *q = (request_queue_t *) data;
> -	unsigned long flags;
> +	unsigned long flags[2];
>
> -	spin_lock_irqsave(&io_request_lock, flags);
> +	spin_lock_irqsave(&io_request_lock, flags[0]);
> +	spin_lock_irqsave(&q->queue_lock, flags[1]);
>  	__generic_unplug_device(q);
> -	spin_unlock_irqrestore(&io_request_lock, flags);
> +	spin_unlock_irqrestore(&q->queue_lock, flags[1]);
> +	spin_unlock_irqrestore(&io_request_lock, flags[0]);
>  }
>
>  static void blk_init_free_list(request_queue_t *q)
> @@ -470,9 +472,9 @@
>  	add_wait_queue_exclusive(&q->wait_for_request, &wait);
>  	for (;;) {
>  		__set_current_state(TASK_UNINTERRUPTIBLE);
> -		spin_lock_irq(&io_request_lock);
> +		spin_lock_irq(&q->queue_lock);
>  		rq = get_request(q, rw);
> -		spin_unlock_irq(&io_request_lock);
> +		spin_unlock_irq(&q->queue_lock);
>  		if (rq)
>  			break;
>  		generic_unplug_device(q);
> @@ -487,9 +489,9 @@
>  {
>  	register struct request *rq;
>
> -	spin_lock_irq(&io_request_lock);
> +	spin_lock_irq(&q->queue_lock);
>  	rq = get_request(q, rw);
> -	spin_unlock_irq(&io_request_lock);
> +	spin_unlock_irq(&q->queue_lock);
>  	if (rq)
>  		return rq;
>  	return __get_request_wait(q, rw);
> @@ -555,7 +557,7 @@
>  	drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
>
>  	if (!q->plugged && q->head_active && insert_here == &q->queue_head) {
> -		spin_unlock_irq(&io_request_lock);
> +		spin_unlock_irq(&q->queue_lock);
>  		BUG();
>  	}
>
> @@ -727,7 +729,7 @@
>  	 * Now we acquire the request spinlock, we have to be mega careful
>  	 * not to schedule or do something nonatomic
>  	 */
> -	spin_lock_irq(&io_request_lock);
> +	spin_lock_irq(&q->queue_lock);
>
>  	insert_here = head->prev;
>  	if (list_empty(head)) {
> @@ -794,7 +796,7 @@
>  		req = freereq;
>  		freereq = NULL;
>  	} else if ((req = get_request(q, rw)) == NULL) {
> -		spin_unlock_irq(&io_request_lock);
> +		spin_unlock_irq(&q->queue_lock);
>  		if (rw_ahead)
>  			goto end_io;
>
> @@ -821,7 +823,7 @@
>  out:
>  	if (freereq)
>  		blkdev_release_request(freereq);
> -	spin_unlock_irq(&io_request_lock);
> +	spin_unlock_irq(&q->queue_lock);
>  	return 0;
>  end_io:
>  	bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
> diff -Naur linux.base/drivers/scsi/scsi.c linux.mod/drivers/scsi/scsi.c
> --- linux.base/drivers/scsi/scsi.c	Thu Jul 19 21:07:04 2001
> +++ linux.mod/drivers/scsi/scsi.c	Fri Aug 24 11:06:25 2001
> @@ -678,41 +678,33 @@
>  		 * passes a meaningful return value.
>  		 */
>  		if (host->hostt->use_new_eh_code) {
> -                        spin_lock_irqsave(&io_request_lock, flags);
>  			rtn = host->hostt->queuecommand(SCpnt, scsi_done);
> -                        spin_unlock_irqrestore(&io_request_lock, flags);
>  			if (rtn != 0) {
>  				scsi_delete_timer(SCpnt);
>  				scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
>                                  SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));
>  			}
>  		} else {
> -                        spin_lock_irqsave(&io_request_lock, flags);
>  			host->hostt->queuecommand(SCpnt, scsi_old_done);
> -                        spin_unlock_irqrestore(&io_request_lock, flags);
>  		}
>  	} else {
>  		int temp;
>
>  		SCSI_LOG_MLQUEUE(3, printk("command() :  routine at %p\n", host->hostt->command));
> -                spin_lock_irqsave(&io_request_lock, flags);
>  		temp = host->hostt->command(SCpnt);
>  		SCpnt->result = temp;
>  #ifdef DEBUG_DELAY
> -                spin_unlock_irqrestore(&io_request_lock, flags);
>  		clock = jiffies + 4 * HZ;
>  		while (time_before(jiffies, clock))
>  			barrier();
>  		printk("done(host = %d, result = %04x) : routine at %p\n",
>  		       host->host_no, temp, host->hostt->command);
> -                spin_lock_irqsave(&io_request_lock, flags);
>  #endif
>  		if (host->hostt->use_new_eh_code) {
>  			scsi_done(SCpnt);
>  		} else {
>  			scsi_old_done(SCpnt);
>  		}
> -                spin_unlock_irqrestore(&io_request_lock, flags);
>  	}
>  	SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
>  	return rtn;
> diff -Naur linux.base/drivers/scsi/scsi_lib.c linux.mod/drivers/scsi/scsi_lib.c
> --- linux.base/drivers/scsi/scsi_lib.c	Thu Jul 19 20:48:04 2001
> +++ linux.mod/drivers/scsi/scsi_lib.c	Fri Aug 24 10:19:50 2001
> @@ -68,7 +68,7 @@
>  static void __scsi_insert_special(request_queue_t *q, struct request *rq,
>  				  void *data, int at_head)
>  {
> -	unsigned long flags;
> +	unsigned long flags[2];
>
>  	ASSERT_LOCK(&io_request_lock, 0);
>
> @@ -84,7 +84,8 @@
>  	 * head of the queue for things like a QUEUE_FULL message from a
>  	 * device, or a host that is unable to accept a particular command.
>  	 */
> -	spin_lock_irqsave(&io_request_lock, flags);
> +	spin_lock_irqsave(&io_request_lock, flags[0]);
> +	spin_lock_irqsave(&q->queue_lock, flags[1]);
>
>  	if (at_head)
>  		list_add(&rq->queue, &q->queue_head);
> @@ -92,7 +93,8 @@
>  		list_add_tail(&rq->queue, &q->queue_head);
>
>  	q->request_fn(q);
> -	spin_unlock_irqrestore(&io_request_lock, flags);
> +	spin_unlock_irqrestore(&q->queue_lock, flags[1]);
> +	spin_unlock_irqrestore(&io_request_lock, flags[0]);
>  }
>
>
> @@ -246,13 +248,14 @@
>  void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
>  {
>  	int all_clear;
> -	unsigned long flags;
> +	unsigned long flags[2];
>  	Scsi_Device *SDpnt;
>  	struct Scsi_Host *SHpnt;
>
>  	ASSERT_LOCK(&io_request_lock, 0);
>
> -	spin_lock_irqsave(&io_request_lock, flags);
> +	spin_lock_irqsave(&io_request_lock, flags[0]);
> +	spin_lock_irqsave(&q->queue_lock, flags[1]);
>  	if (SCpnt != NULL) {
>
>  		/*
> @@ -328,7 +331,8 @@
>  			SHpnt->some_device_starved = 0;
>  		}
>  	}
> -	spin_unlock_irqrestore(&io_request_lock, flags);
> +	spin_unlock_irqrestore(&q->queue_lock, flags[1]);
> +	spin_unlock_irqrestore(&io_request_lock, flags[0]);
>  }
>
>  /*
> @@ -821,6 +825,7 @@
>  	struct Scsi_Device_Template *STpnt;
>
>  	ASSERT_LOCK(&io_request_lock, 1);
> +	ASSERT_LOCK(&q->queue_lock, 1);
>
>  	SDpnt = (Scsi_Device *) q->queuedata;
>  	if (!SDpnt) {
> @@ -828,6 +833,7 @@
>  	}
>  	SHpnt = SDpnt->host;
>
> +
>  	/*
>  	 * To start with, we keep looping until the queue is empty, or until
>  	 * the host is no longer able to accept any more requests.
> @@ -839,7 +845,7 @@
>  		 * we need to check to see if the queue is plugged or not.
>  		 */
>  		if (SHpnt->in_recovery || q->plugged)
> -			return;
> +			break;
>
>  		/*
>  		 * If the device cannot accept another request, then quit.
> @@ -887,9 +893,11 @@
>  			 */
>  			SDpnt->was_reset = 0;
>  			if (SDpnt->removable && !in_interrupt()) {
> +				spin_unlock_irq(&q->queue_lock);
>  				spin_unlock_irq(&io_request_lock);
>  				scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0);
>  				spin_lock_irq(&io_request_lock);
> +				spin_lock_irq(&q->queue_lock);
>  				continue;
>  			}
>  		}
> @@ -998,8 +1006,6 @@
>  		 * another.
>  		 */
>  		req = NULL;
> -		spin_unlock_irq(&io_request_lock);
> -
>  		if (SCpnt->request.cmd != SPECIAL) {
>  			/*
>  			 * This will do a couple of things:
> @@ -1028,7 +1034,6 @@
>  				{
>  					panic("Should not have leftover blocks\n");
>  				}
> -				spin_lock_irq(&io_request_lock);
>  				SHpnt->host_busy--;
>  				SDpnt->device_busy--;
>  				continue;
> @@ -1044,7 +1049,6 @@
>  				{
>  					panic("Should not have leftover blocks\n");
>  				}
> -				spin_lock_irq(&io_request_lock);
>  				SHpnt->host_busy--;
>  				SDpnt->device_busy--;
>  				continue;
> @@ -1065,7 +1069,6 @@
>  		 * Now we need to grab the lock again.  We are about to mess
>  		 * with the request queue and try to find another command.
>  		 */
> -		spin_lock_irq(&io_request_lock);
>  	}
>  }
>
> -
> To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-08-30 21:32 ` Gérard Roudier
@ 2001-08-30 21:47   ` Eric Youngdale
  2001-08-30 23:07     ` Jonathan Lahr
  0 siblings, 1 reply; 14+ messages in thread
From: Eric Youngdale @ 2001-08-30 21:47 UTC (permalink / raw)
  To: Gérard Roudier, Jonathan Lahr; +Cc: linux-kernel, linux-scsi

    I am afraid I would have to agree with Gérard.  We were planning on
cleaning this mess up in the 2.5 kernel, and my inclination would be to
leave this alone until then.

-Eric

----- Original Message -----
From: "Gérard Roudier" <groudier@free.fr>
To: "Jonathan Lahr" <lahr@us.ibm.com>
Cc: <linux-kernel@vger.kernel.org>; <linux-scsi@vger.kernel.org>
Sent: Thursday, August 30, 2001 5:32 PM
Subject: Re: io_request_lock/queue_lock patch


>
> Here are my welcome comments. :)
>
> In my opinion, it would well be a miracle if your patch does not introduce
> new race conditions, at least for drivers that still use the old scsi done
> method.
>
>   Gérard.
>
> On Thu, 30 Aug 2001, Jonathan Lahr wrote:
>
> > Included below is a snapshot of a patch I am developing to reduce
> > io_request_lock contention in 2.4.
> >
> > This patch changes __make_request to take the device-specific queue_lock
> > instead of io_request_lock and changes generic_unplug_device to take
> > queue_lock in addition to io_request_lock to serialize access to the
> > request queue.  Also, locking in various functions has been modified
> > in accordance with this new scheme.
> >
> > I have done some testing of this with aic7xxx, qla2x00, and lpfc
adapters.
> >
> > To apply it to 2.4.6 or 2.4.7, from linux/ "patch -p1 < iorlv0_247".
> >
> > Comments and suggestions are welcome and invited.
> >
> > Jonathan
> >
> > ----- patch -----
> > diff -Naur linux.base/drivers/block/ll_rw_blk.c
linux.mod/drivers/block/ll_rw_blk.c
> > --- linux.base/drivers/block/ll_rw_blk.c Thu Jul 19 20:51:23 2001
> > +++ linux.mod/drivers/block/ll_rw_blk.c Fri Aug 24 10:17:38 2001
> > @@ -343,11 +343,13 @@
> >  void generic_unplug_device(void *data)
> >  {
> >  request_queue_t *q = (request_queue_t *) data;
> > - unsigned long flags;
> > + unsigned long flags[2];
> >
> > - spin_lock_irqsave(&io_request_lock, flags);
> > + spin_lock_irqsave(&io_request_lock, flags[0]);
> > + spin_lock_irqsave(&q->queue_lock, flags[1]);
> >  __generic_unplug_device(q);
> > - spin_unlock_irqrestore(&io_request_lock, flags);
> > + spin_unlock_irqrestore(&q->queue_lock, flags[1]);
> > + spin_unlock_irqrestore(&io_request_lock, flags[0]);
> >  }
> >
> >  static void blk_init_free_list(request_queue_t *q)
> > @@ -470,9 +472,9 @@
> >  add_wait_queue_exclusive(&q->wait_for_request, &wait);
> >  for (;;) {
> >  __set_current_state(TASK_UNINTERRUPTIBLE);
> > - spin_lock_irq(&io_request_lock);
> > + spin_lock_irq(&q->queue_lock);
> >  rq = get_request(q, rw);
> > - spin_unlock_irq(&io_request_lock);
> > + spin_unlock_irq(&q->queue_lock);
> >  if (rq)
> >  break;
> >  generic_unplug_device(q);
> > @@ -487,9 +489,9 @@
> >  {
> >  register struct request *rq;
> >
> > - spin_lock_irq(&io_request_lock);
> > + spin_lock_irq(&q->queue_lock);
> >  rq = get_request(q, rw);
> > - spin_unlock_irq(&io_request_lock);
> > + spin_unlock_irq(&q->queue_lock);
> >  if (rq)
> >  return rq;
> >  return __get_request_wait(q, rw);
> > @@ -555,7 +557,7 @@
> >  drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
> >
> >  if (!q->plugged && q->head_active && insert_here == &q->queue_head) {
> > - spin_unlock_irq(&io_request_lock);
> > + spin_unlock_irq(&q->queue_lock);
> >  BUG();
> >  }
> >
> > @@ -727,7 +729,7 @@
> >  * Now we acquire the request spinlock, we have to be mega careful
> >  * not to schedule or do something nonatomic
> >  */
> > - spin_lock_irq(&io_request_lock);
> > + spin_lock_irq(&q->queue_lock);
> >
> >  insert_here = head->prev;
> >  if (list_empty(head)) {
> > @@ -794,7 +796,7 @@
> >  req = freereq;
> >  freereq = NULL;
> >  } else if ((req = get_request(q, rw)) == NULL) {
> > - spin_unlock_irq(&io_request_lock);
> > + spin_unlock_irq(&q->queue_lock);
> >  if (rw_ahead)
> >  goto end_io;
> >
> > @@ -821,7 +823,7 @@
> >  out:
> >  if (freereq)
> >  blkdev_release_request(freereq);
> > - spin_unlock_irq(&io_request_lock);
> > + spin_unlock_irq(&q->queue_lock);
> >  return 0;
> >  end_io:
> >  bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
> > diff -Naur linux.base/drivers/scsi/scsi.c linux.mod/drivers/scsi/scsi.c
> > --- linux.base/drivers/scsi/scsi.c Thu Jul 19 21:07:04 2001
> > +++ linux.mod/drivers/scsi/scsi.c Fri Aug 24 11:06:25 2001
> > @@ -678,41 +678,33 @@
> >  * passes a meaningful return value.
> >  */
> >  if (host->hostt->use_new_eh_code) {
> > -                        spin_lock_irqsave(&io_request_lock, flags);
> >  rtn = host->hostt->queuecommand(SCpnt, scsi_done);
> > -                        spin_unlock_irqrestore(&io_request_lock,
flags);
> >  if (rtn != 0) {
> >  scsi_delete_timer(SCpnt);
> >  scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
> >                                  SCSI_LOG_MLQUEUE(3,
printk("queuecommand : request rejected\n"));
> >  }
> >  } else {
> > -                        spin_lock_irqsave(&io_request_lock, flags);
> >  host->hostt->queuecommand(SCpnt, scsi_old_done);
> > -                        spin_unlock_irqrestore(&io_request_lock,
flags);
> >  }
> >  } else {
> >  int temp;
> >
> >  SCSI_LOG_MLQUEUE(3, printk("command() :  routine at %p\n",
host->hostt->command));
> > -                spin_lock_irqsave(&io_request_lock, flags);
> >  temp = host->hostt->command(SCpnt);
> >  SCpnt->result = temp;
> >  #ifdef DEBUG_DELAY
> > -                spin_unlock_irqrestore(&io_request_lock, flags);
> >  clock = jiffies + 4 * HZ;
> >  while (time_before(jiffies, clock))
> >  barrier();
> >  printk("done(host = %d, result = %04x) : routine at %p\n",
> >         host->host_no, temp, host->hostt->command);
> > -                spin_lock_irqsave(&io_request_lock, flags);
> >  #endif
> >  if (host->hostt->use_new_eh_code) {
> >  scsi_done(SCpnt);
> >  } else {
> >  scsi_old_done(SCpnt);
> >  }
> > -                spin_unlock_irqrestore(&io_request_lock, flags);
> >  }
> >  SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
> >  return rtn;
> > diff -Naur linux.base/drivers/scsi/scsi_lib.c
linux.mod/drivers/scsi/scsi_lib.c
> > --- linux.base/drivers/scsi/scsi_lib.c Thu Jul 19 20:48:04 2001
> > +++ linux.mod/drivers/scsi/scsi_lib.c Fri Aug 24 10:19:50 2001
> > @@ -68,7 +68,7 @@
> >  static void __scsi_insert_special(request_queue_t *q, struct request
*rq,
> >    void *data, int at_head)
> >  {
> > - unsigned long flags;
> > + unsigned long flags[2];
> >
> >  ASSERT_LOCK(&io_request_lock, 0);
> >
> > @@ -84,7 +84,8 @@
> >  * head of the queue for things like a QUEUE_FULL message from a
> >  * device, or a host that is unable to accept a particular command.
> >  */
> > - spin_lock_irqsave(&io_request_lock, flags);
> > + spin_lock_irqsave(&io_request_lock, flags[0]);
> > + spin_lock_irqsave(&q->queue_lock, flags[1]);
> >
> >  if (at_head)
> >  list_add(&rq->queue, &q->queue_head);
> > @@ -92,7 +93,8 @@
> >  list_add_tail(&rq->queue, &q->queue_head);
> >
> >  q->request_fn(q);
> > - spin_unlock_irqrestore(&io_request_lock, flags);
> > + spin_unlock_irqrestore(&q->queue_lock, flags[1]);
> > + spin_unlock_irqrestore(&io_request_lock, flags[0]);
> >  }
> >
> >
> > @@ -246,13 +248,14 @@
> >  void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
> >  {
> >  int all_clear;
> > - unsigned long flags;
> > + unsigned long flags[2];
> >  Scsi_Device *SDpnt;
> >  struct Scsi_Host *SHpnt;
> >
> >  ASSERT_LOCK(&io_request_lock, 0);
> >
> > - spin_lock_irqsave(&io_request_lock, flags);
> > + spin_lock_irqsave(&io_request_lock, flags[0]);
> > + spin_lock_irqsave(&q->queue_lock, flags[1]);
> >  if (SCpnt != NULL) {
> >
> >  /*
> > @@ -328,7 +331,8 @@
> >  SHpnt->some_device_starved = 0;
> >  }
> >  }
> > - spin_unlock_irqrestore(&io_request_lock, flags);
> > + spin_unlock_irqrestore(&q->queue_lock, flags[1]);
> > + spin_unlock_irqrestore(&io_request_lock, flags[0]);
> >  }
> >
> >  /*
> > @@ -821,6 +825,7 @@
> >  struct Scsi_Device_Template *STpnt;
> >
> >  ASSERT_LOCK(&io_request_lock, 1);
> > + ASSERT_LOCK(&q->queue_lock, 1);
> >
> >  SDpnt = (Scsi_Device *) q->queuedata;
> >  if (!SDpnt) {
> > @@ -828,6 +833,7 @@
> >  }
> >  SHpnt = SDpnt->host;
> >
> > +
> >  /*
> >  * To start with, we keep looping until the queue is empty, or until
> >  * the host is no longer able to accept any more requests.
> > @@ -839,7 +845,7 @@
> >  * we need to check to see if the queue is plugged or not.
> >  */
> >  if (SHpnt->in_recovery || q->plugged)
> > - return;
> > + break;
> >
> >  /*
> >  * If the device cannot accept another request, then quit.
> > @@ -887,9 +893,11 @@
> >  */
> >  SDpnt->was_reset = 0;
> >  if (SDpnt->removable && !in_interrupt()) {
> > + spin_unlock_irq(&q->queue_lock);
> >  spin_unlock_irq(&io_request_lock);
> >  scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0);
> >  spin_lock_irq(&io_request_lock);
> > + spin_lock_irq(&q->queue_lock);
> >  continue;
> >  }
> >  }
> > @@ -998,8 +1006,6 @@
> >  * another.
> >  */
> >  req = NULL;
> > - spin_unlock_irq(&io_request_lock);
> > -
> >  if (SCpnt->request.cmd != SPECIAL) {
> >  /*
> >  * This will do a couple of things:
> > @@ -1028,7 +1034,6 @@
> >  {
> >  panic("Should not have leftover blocks\n");
> >  }
> > - spin_lock_irq(&io_request_lock);
> >  SHpnt->host_busy--;
> >  SDpnt->device_busy--;
> >  continue;
> > @@ -1044,7 +1049,6 @@
> >  {
> >  panic("Should not have leftover blocks\n");
> >  }
> > - spin_lock_irq(&io_request_lock);
> >  SHpnt->host_busy--;
> >  SDpnt->device_busy--;
> >  continue;
> > @@ -1065,7 +1069,6 @@
> >  * Now we need to grab the lock again.  We are about to mess
> >  * with the request queue and try to find another command.
> >  */
> > - spin_lock_irq(&io_request_lock);
> >  }
> >  }
> >
> > -
> > To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
> -
> To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-08-30 21:47   ` Eric Youngdale
@ 2001-08-30 23:07     ` Jonathan Lahr
  2001-08-31  5:57       ` Jens Axboe
  0 siblings, 1 reply; 14+ messages in thread
From: Jonathan Lahr @ 2001-08-30 23:07 UTC (permalink / raw)
  To: Eric Youngdale; +Cc: Gérard Roudier, linux-kernel, linux-scsi


Is there an estimate on when the 2.5 i/o subsystem will be released?

Jonathan

Eric Youngdale [eric@andante.org] wrote:
>     I am afraid I would have to agree with Gérard.  We were planning on
> cleaning this mess up in the 2.5 kernel, and my inclination would be to
> leave this alone until then.
> 
> -Eric
> 
> ----- Original Message -----
> From: "Gérard Roudier" <groudier@free.fr>
> To: "Jonathan Lahr" <lahr@us.ibm.com>
> Cc: <linux-kernel@vger.kernel.org>; <linux-scsi@vger.kernel.org>
> Sent: Thursday, August 30, 2001 5:32 PM
> Subject: Re: io_request_lock/queue_lock patch
> 
> 
> >
> > Here are my welcome comments. :)
> >
> > In my opinion, it would well be a miracle if your patch does not introduce
> > new race conditions, at least for drivers that still use the old scsi done
> > method.
> >
> >   Gérard.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-08-30 20:49 io_request_lock/queue_lock patch Jonathan Lahr
  2001-08-30 21:32 ` Gérard Roudier
@ 2001-08-31  5:56 ` Jens Axboe
  2001-08-31 14:52   ` Jonathan Lahr
  1 sibling, 1 reply; 14+ messages in thread
From: Jens Axboe @ 2001-08-31  5:56 UTC (permalink / raw)
  To: Jonathan Lahr; +Cc: linux-kernel, linux-scsi

On Thu, Aug 30 2001, Jonathan Lahr wrote:
> 
> Included below is a snapshot of a patch I am developing to reduce 
> io_request_lock contention in 2.4.  

No no no, you are opening a serious can of worms. No offense, but did
you really think this would fly?! This is already being taken care of
for 2.5, lets leave 2.4 alone in this regard.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-08-30 23:07     ` Jonathan Lahr
@ 2001-08-31  5:57       ` Jens Axboe
  0 siblings, 0 replies; 14+ messages in thread
From: Jens Axboe @ 2001-08-31  5:57 UTC (permalink / raw)
  To: Jonathan Lahr
  Cc: Eric Youngdale, Gérard Roudier, linux-kernel, linux-scsi

On Thu, Aug 30 2001, Jonathan Lahr wrote:
> 
> Is there an estimate on when the 2.5 i/o subsystem will be released?

I've at least put up incremental patches of what I'm doing here:

kernel.org/pub/linux/kernel/people/axboe/v2.5

bio-14 will be final next week.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-08-31  5:56 ` Jens Axboe
@ 2001-08-31 14:52   ` Jonathan Lahr
  2001-08-31 18:03     ` Jens Axboe
  0 siblings, 1 reply; 14+ messages in thread
From: Jonathan Lahr @ 2001-08-31 14:52 UTC (permalink / raw)
  To: Jens Axboe; +Cc: lahr, linux-kernel, linux-scsi


Jens,

Please elaborate on "no, no, no".   Are you suggesting that no further
improvements can be made or should be attempted on the 2.4 i/o subsystem?

Jonathan

Jens Axboe [axboe@suse.de] wrote:
> On Thu, Aug 30 2001, Jonathan Lahr wrote:
> > 
> > Included below is a snapshot of a patch I am developing to reduce 
> > io_request_lock contention in 2.4.  
> 
> No no no, you are opening a serious can of worms. No offense, but did
> you really think this would fly?! This is already being taken care of
> for 2.5, lets leave 2.4 alone in this regard.
> 
> -- 
> Jens Axboe

-- 
Jonathan Lahr
IBM Linux Technology Center
Beaverton, Oregon
lahr@us.ibm.com
503-578-3385


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-08-31 14:52   ` Jonathan Lahr
@ 2001-08-31 18:03     ` Jens Axboe
  2001-08-31 18:33       ` Jonathan Lahr
  0 siblings, 1 reply; 14+ messages in thread
From: Jens Axboe @ 2001-08-31 18:03 UTC (permalink / raw)
  To: Jonathan Lahr; +Cc: lahr, linux-kernel, linux-scsi

On Fri, Aug 31 2001, Jonathan Lahr wrote:
> 
> Jens,
> 
> Please elaborate on "no, no, no".   Are you suggesting that no further
> improvements can be made or should be attempted on the 2.4 i/o subsystem?

Of course not. The no no no just means that attempting to globally remove the
io_request_lock at this point is a no-go, so don't even go there. The
sledgehammer approach will not fly at this point, it's just way too risky.

Jens

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-08-31 18:03     ` Jens Axboe
@ 2001-08-31 18:33       ` Jonathan Lahr
  2001-09-03  7:07         ` Jens Axboe
  0 siblings, 1 reply; 14+ messages in thread
From: Jonathan Lahr @ 2001-08-31 18:33 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel, linux-scsi


> > Please elaborate on "no, no, no".   Are you suggesting that no further
> > improvements can be made or should be attempted on the 2.4 i/o subsystem?
> 
> Of course not. The no no no just means that attempting to globally remove the
> io_request_lock at this point is a no-go, so don't even go there. The
> sledgehammer approach will not fly at this point, it's just way too risky.

I agree that reducing locking scope is often problematic.  However,
this patch does not globally remove the io_request_lock.  The purpose
of the patch is to protect request queue integrity with a per queue 
lock instead of the global io_request_lock.  My intent was to leave 
other io_request_lock serialization intact.  Any insight into whether
the patch leaves data unprotected would be appreciated.

Jonathan

-- 
Jonathan Lahr
IBM Linux Technology Center
Beaverton, Oregon
lahr@us.ibm.com
503-578-3385


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-08-31 18:33       ` Jonathan Lahr
@ 2001-09-03  7:07         ` Jens Axboe
  2001-09-04 16:46           ` Jonathan Lahr
  2001-09-05 20:30           ` Peter Rival
  0 siblings, 2 replies; 14+ messages in thread
From: Jens Axboe @ 2001-09-03  7:07 UTC (permalink / raw)
  To: Jonathan Lahr; +Cc: linux-kernel, linux-scsi

On Fri, Aug 31 2001, Jonathan Lahr wrote:
> 
> > > Please elaborate on "no, no, no".   Are you suggesting that no further
> > > improvements can be made or should be attempted on the 2.4 i/o subsystem?
> > 
> > Of course not. The no no no just means that attempting to globally remove the
> > io_request_lock at this point is a no-go, so don't even go there. The
> > sledgehammer approach will not fly at this point, it's just way too risky.
> 
> I agree that reducing locking scope is often problematic.  However,
> this patch does not globally remove the io_request_lock.  The purpose
> of the patch is to protect request queue integrity with a per queue 
> lock instead of the global io_request_lock.  My intent was to leave 
> other io_request_lock serialization intact.  Any insight into whether
> the patch leaves data unprotected would be appreciated.

You are now browsing the request list without agreeing on what lock is
being held -- what happens to drivers assuming that io_request_lock
protects the list? Boom. For 2.4 we simply cannot afford to muck around
with this, it's jsut too dangerous. For 2.5 I already completely removed
the io_request_lock (also helps to catch references to it from drivers).

I agree with your SCSI approach, it's the same we took. Low level
drivers must be responsible for their own locking, the mid layer should
not pre-grab anything for them. 

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-09-03  7:07         ` Jens Axboe
@ 2001-09-04 16:46           ` Jonathan Lahr
  2001-09-04 17:17             ` Jens Axboe
  2001-09-05 20:30           ` Peter Rival
  1 sibling, 1 reply; 14+ messages in thread
From: Jonathan Lahr @ 2001-09-04 16:46 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel, linux-scsi


> You are now browsing the request list without agreeing on what lock is
> being held -- what happens to drivers assuming that io_request_lock
> protects the list? Boom. For 2.4 we simply cannot afford to muck around
> with this, it's jsut too dangerous. For 2.5 I already completely removed
> the io_request_lock (also helps to catch references to it from drivers).

In this patch, io_request_lock and queue_lock are both acquired in  
generic_unplug_device, so request_fn invocations protect request queue 
integrity.  __make_request acquires queue_lock instead of io_request_lock 
thus protecting queue integrity while allowing greater concurrency.

Nevertheless, I understand your unwillingness to change locking as 
pervasive as io_request_lock.  Such changes would of course involve 
risk.  I am simply trying to improve 2.4 i/o performance, since 2.4
could have a long time left to live.  

> I agree with your SCSI approach, it's the same we took. Low level
> drivers must be responsible for their own locking, the mid layer should
> not pre-grab anything for them. 

Yes, calling out of subsystem scope with locks held can cause problems.

Thanks for your feedback.  

Jonathan 

-- 
Jonathan Lahr
IBM Linux Technology Center
Beaverton, Oregon
lahr@us.ibm.com
503-578-3385


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-09-04 16:46           ` Jonathan Lahr
@ 2001-09-04 17:17             ` Jens Axboe
  0 siblings, 0 replies; 14+ messages in thread
From: Jens Axboe @ 2001-09-04 17:17 UTC (permalink / raw)
  To: Jonathan Lahr; +Cc: linux-kernel, linux-scsi

On Tue, Sep 04 2001, Jonathan Lahr wrote:
> 
> > You are now browsing the request list without agreeing on what lock
> > is
> > being held -- what happens to drivers assuming that io_request_lock
> > protects the list? Boom. For 2.4 we simply cannot afford to muck
> > around
> > with this, it's jsut too dangerous. For 2.5 I already completely
> > removed
> > the io_request_lock (also helps to catch references to it from
> > drivers).
> 
> In this patch, io_request_lock and queue_lock are both acquired in  
> generic_unplug_device, so request_fn invocations protect request queue 
> integrity.  __make_request acquires queue_lock instead of
> io_request_lock 
> thus protecting queue integrity while allowing greater concurrency.

You fixed SCSI for q->queue_head usage, that part looks ok. The low
level call backs are a much bigger mess though. And you broke IDE,
cciss, cpqarray, DAC960, etc etc in the process.

> Nevertheless, I understand your unwillingness to change locking as 
> pervasive as io_request_lock.  Such changes would of course involve 
> risk.  I am simply trying to improve 2.4 i/o performance, since 2.4
> could have a long time left to live.  

I can certainly understand that, but I really hope you see what I mean
that we cannot change this locking now.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-09-03  7:07         ` Jens Axboe
  2001-09-04 16:46           ` Jonathan Lahr
@ 2001-09-05 20:30           ` Peter Rival
  2001-09-06  6:03             ` Jens Axboe
  1 sibling, 1 reply; 14+ messages in thread
From: Peter Rival @ 2001-09-05 20:30 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Jonathan Lahr, linux-kernel, linux-scsi

Jens Axboe wrote:
> You are now browsing the request list without agreeing on what lock is
> being held -- what happens to drivers assuming that io_request_lock
> protects the list? Boom. For 2.4 we simply cannot afford to muck around
> with this, it's jsut too dangerous. For 2.5 I already completely removed
> the io_request_lock (also helps to catch references to it from drivers).

Is this part of the bio patches?  (I confess, I haven't had the time to 
look yet.)  If not, do you know when we'll be seeing sneak previews of 
this code?  (Yes, it's me again! ;)

  - Pete


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: io_request_lock/queue_lock patch
  2001-09-05 20:30           ` Peter Rival
@ 2001-09-06  6:03             ` Jens Axboe
  0 siblings, 0 replies; 14+ messages in thread
From: Jens Axboe @ 2001-09-06  6:03 UTC (permalink / raw)
  To: Peter Rival; +Cc: Jonathan Lahr, linux-kernel, linux-scsi

On Wed, Sep 05 2001, Peter Rival wrote:
> Jens Axboe wrote:
> >You are now browsing the request list without agreeing on what lock is
> >being held -- what happens to drivers assuming that io_request_lock
> >protects the list? Boom. For 2.4 we simply cannot afford to muck around
> >with this, it's jsut too dangerous. For 2.5 I already completely removed
> >the io_request_lock (also helps to catch references to it from drivers).
> 
> Is this part of the bio patches?  (I confess, I haven't had the time to 
> look yet.)  If not, do you know when we'll be seeing sneak previews of 
> this code?  (Yes, it's me again! ;)

Yep it's part of the bio patch. bio-14 for 2.4.10-preX is pending today
or tomorrow.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2001-09-06  6:03 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2001-08-30 20:49 io_request_lock/queue_lock patch Jonathan Lahr
2001-08-30 21:32 ` Gérard Roudier
2001-08-30 21:47   ` Eric Youngdale
2001-08-30 23:07     ` Jonathan Lahr
2001-08-31  5:57       ` Jens Axboe
2001-08-31  5:56 ` Jens Axboe
2001-08-31 14:52   ` Jonathan Lahr
2001-08-31 18:03     ` Jens Axboe
2001-08-31 18:33       ` Jonathan Lahr
2001-09-03  7:07         ` Jens Axboe
2001-09-04 16:46           ` Jonathan Lahr
2001-09-04 17:17             ` Jens Axboe
2001-09-05 20:30           ` Peter Rival
2001-09-06  6:03             ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).