linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* cciss updates for 2.6.6xxx [1/2]
@ 2004-04-06 20:10 mikem
  2004-04-07 16:14 ` Jeff Garzik
  0 siblings, 1 reply; 6+ messages in thread
From: mikem @ 2004-04-06 20:10 UTC (permalink / raw)
  To: alpm, asboe; +Cc: linux-kernel

This patch adds per logical device queues to the HP cciss driver. It currently only implements a single lock but when time permits I will provide that funtionality. Thanks to Jeff Garzik for providing some sample code.
This patch built against 2.6.5. Please consider this for inclusion.

mikem
-------------------------------------------------------------------------------

diff -burpN lx265.orig/drivers/block/cciss.c lx265.save/drivers/block/cciss.c
--- lx265.orig/drivers/block/cciss.c	2004-04-03 21:38:20.000000000 -0600
+++ lx265.save/drivers/block/cciss.c	2004-04-06 10:22:08.000000000 -0500
@@ -988,7 +988,7 @@ static int revalidate_allvol(ctlr_info_t
 		drive_info_struct *drv = &(host->drv[i]);
 		if (!drv->nr_blocks)
 			continue;
-		blk_queue_hardsect_size(host->queue, drv->block_size);
+		blk_queue_hardsect_size(drv->queue, drv->block_size);
 		set_capacity(disk, drv->nr_blocks);
 		add_disk(disk);
 	}
@@ -2013,7 +2013,7 @@ static irqreturn_t do_cciss_intr(int irq
 	CommandList_struct *c;
 	unsigned long flags;
 	__u32 a, a1;
-
+	int j;
 
 	/* Is this interrupt for us? */
 	if (( h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0))
@@ -2059,11 +2059,18 @@ static irqreturn_t do_cciss_intr(int irq
 			}
 		}
 	}
-
 	/*
 	 * See if we can queue up some more IO
+	 * check every disk that exists on this controller 
+	 * and start it's IO
 	 */
-	blk_start_queue(h->queue);
+	for(j=0;j < NWD; j++) {
+		/* make sure the disk has been added and the drive is real */
+		/* because this can be called from the middle of init_one */
+		if(!(h->gendisk[j]->queue) || !(h->drv[j].nr_blocks) )
+			continue;
+		blk_start_queue(h->gendisk[j]->queue);
+	}
 	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
 	return IRQ_HANDLED;
 }
@@ -2510,7 +2517,6 @@ static void free_hba(int i)
 static int __devinit cciss_init_one(struct pci_dev *pdev,
 	const struct pci_device_id *ent)
 {
-	request_queue_t *q;
 	int i;
 	int j;
 
@@ -2568,13 +2574,6 @@ static int __devinit cciss_init_one(stru
 	}
 
 	spin_lock_init(&hba[i]->lock);
-	q = blk_init_queue(do_cciss_request, &hba[i]->lock);
-	if (!q)
-		goto clean4;
-
-	q->backing_dev_info.ra_pages = READ_AHEAD;
-	hba[i]->queue = q;
-	q->queuedata = hba[i];
 
 	/* Initialize the pdev driver private data. 
 		have it point to hba[i].  */
@@ -2596,6 +2595,19 @@ static int __devinit cciss_init_one(stru
 
 	cciss_procinit(i);
 
+	for(j=0; j<NWD; j++) {
+		drive_info_struct *drv = &(hba[i]->drv[j]);
+		struct gendisk *disk = hba[i]->gendisk[j];
+		request_queue_t *q;
+
+	        q = blk_init_queue(do_cciss_request, &hba[i]->lock);
+		if (!q) {
+			printk(KERN_ERR 
+			   "cciss:  unable to allocate queue for disk %d\n",
+			   j);
+			break;
+		}
+		drv->queue = q;
 	blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
 
 	/* This is a hardware imposed limit. */
@@ -2606,21 +2618,17 @@ static int __devinit cciss_init_one(stru
 
 	blk_queue_max_sectors(q, 512);
 
-
-	for(j=0; j<NWD; j++) {
-		drive_info_struct *drv = &(hba[i]->drv[j]);
-		struct gendisk *disk = hba[i]->gendisk[j];
-
+		q->queuedata = hba[i];
 		sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
 		sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
 		disk->major = COMPAQ_CISS_MAJOR + i;
 		disk->first_minor = j << NWD_SHIFT;
 		disk->fops = &cciss_fops;
-		disk->queue = hba[i]->queue;
+		disk->queue = q;
 		disk->private_data = drv;
 		if( !(drv->nr_blocks))
 			continue;
-		blk_queue_hardsect_size(hba[i]->queue, drv->block_size);
+		blk_queue_hardsect_size(q, drv->block_size);
 		set_capacity(disk, drv->nr_blocks);
 		add_disk(disk);
 	}
@@ -2690,9 +2698,9 @@ static void __devexit cciss_remove_one (
 		struct gendisk *disk = hba[i]->gendisk[j];
 		if (disk->flags & GENHD_FL_UP)
 			del_gendisk(disk);
+			blk_cleanup_queue(disk->queue);
 	}
 
-	blk_cleanup_queue(hba[i]->queue);
 	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
 			    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
 	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
diff -burpN lx265.orig/drivers/block/cciss.h lx265.save/drivers/block/cciss.h
--- lx265.orig/drivers/block/cciss.h	2004-04-03 21:36:13.000000000 -0600
+++ lx265.save/drivers/block/cciss.h	2004-04-06 10:22:08.000000000 -0500
@@ -27,6 +27,7 @@ typedef struct _drive_info_struct
 {
  	__u32   LunID;	
 	int 	usage_count;
+	struct request_queue *queue;
 	sector_t nr_blocks;
 	int	block_size;
 	int 	heads;
@@ -69,7 +70,6 @@ struct ctlr_info 
 	unsigned int maxQsinceinit;
 	unsigned int maxSG;
 	spinlock_t lock;
-	struct request_queue *queue;
 
 	//* pointers to command and error info pool */ 
 	CommandList_struct 	*cmd_pool;
@@ -252,7 +252,7 @@ struct board_type {
 	struct access_method *access;
 };
 
-#define CCISS_LOCK(i)	(hba[i]->queue->queue_lock)
+#define CCISS_LOCK(i)	(&(hba[i]->lock))
 
 #endif /* CCISS_H */
 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: cciss updates for 2.6.6xxx [1/2]
  2004-04-06 20:10 cciss updates for 2.6.6xxx [1/2] mikem
@ 2004-04-07 16:14 ` Jeff Garzik
  0 siblings, 0 replies; 6+ messages in thread
From: Jeff Garzik @ 2004-04-07 16:14 UTC (permalink / raw)
  To: mike.miller; +Cc: alpm, axboe, linux-kernel

mikem@beardog.cca.cpqcorp.net wrote:
> This patch adds per logical device queues to the HP cciss driver. It currently only implements a single lock but when time permits I will provide that funtionality. Thanks to Jeff Garzik for providing some sample code.
> This patch built against 2.6.5. Please consider this for inclusion.


I appreciate the credit but I don't see that it addressed my original 
objection -- the starvation issue.

Do you cap the number of per-array requests a "1024 / n_arrays", or 
something like that?  You mentioned that the hardware has a maximum of 
1024 outstanding commands, for all devices.  The two typical solutions 
are a round-robin queue (see carmel.c) or limiting each array such that 
if all arrays are full of commands, the total outstanding never exceeds 
1024.

This patch may be OK for -mm, I would rather not see it go upstream -- 
it seems to me you are choosing to decrease stability to obtain a 
performance increase.  I think you can increase performance without 
decreasing stability.

	Jeff




^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: cciss updates for 2.6.6xxx [1/2]
  2004-04-07 19:11 Miller, Mike (OS Dev)
@ 2004-04-07 19:42 ` Jeff Garzik
  0 siblings, 0 replies; 6+ messages in thread
From: Jeff Garzik @ 2004-04-07 19:42 UTC (permalink / raw)
  To: Miller, Mike (OS Dev); +Cc: akpm, axboe, linux-kernel

Miller, Mike (OS Dev) wrote:
> I like the idea of capping max commands based on the number of arrays. One problem is that we can add or remove a logical drive during runtime. How would Linux handle us reshuffling the max commands for each queue?


What is the maximum number of logical drives?

If you always assume the maximum number of logical drives are configured 
(let's say 32), then you can calculate based on that:
	1024 / 32 logical drives == 32 commands per logical drive

It certainly gets a bit more complex, if you truly want to handle 
something other than "hardcode max # of logical drives"...  I would 
suggest going to a round-robin anti-starvation approach at that point, 
like what I do in carmel.c.  The Carmel hardware has a single hardware 
queue for all of its 8 SATA ports, which is similar to cciss having 1024 
commands for all of its logical drives (if I understand things correctly).

	Jeff




^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: cciss updates for 2.6.6xxx [1/2]
@ 2004-04-07 19:11 Miller, Mike (OS Dev)
  2004-04-07 19:42 ` Jeff Garzik
  0 siblings, 1 reply; 6+ messages in thread
From: Miller, Mike (OS Dev) @ 2004-04-07 19:11 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: alpm, axboe, linux-kernel

I like the idea of capping max commands based on the number of arrays. One problem is that we can add or remove a logical drive during runtime. How would Linux handle us reshuffling the max commands for each queue?

mikem

-----Original Message-----
From: Jeff Garzik [mailto:jgarzik@pobox.com]
Sent: Wednesday, April 07, 2004 11:34 AM
To: Miller, Mike (OS Dev)
Cc: alpm@odsl.org; axboe@suse.de; linux-kernel@vger.kernel.org
Subject: Re: cciss updates for 2.6.6xxx [1/2]


Miller, Mike (OS Dev) wrote:
> Yep, you're right. I just regurgitated the same code. I'll pull my head out and try again :(


The easiest thing to do may be to take your patch #1, and then add code 
to clamp the per-queue outstanding-command (tag) depth to
	1024 / n_arrays_found

at initialization time.  Or perhaps s/n_arrays_found/max_arrays_per_hba/

I bet that's just a few additional lines of code, and should work...

Regards,

	Jeff




^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: cciss updates for 2.6.6xxx [1/2]
  2004-04-07 16:21 Miller, Mike (OS Dev)
@ 2004-04-07 16:33 ` Jeff Garzik
  0 siblings, 0 replies; 6+ messages in thread
From: Jeff Garzik @ 2004-04-07 16:33 UTC (permalink / raw)
  To: Miller, Mike (OS Dev); +Cc: alpm, axboe, linux-kernel

Miller, Mike (OS Dev) wrote:
> Yep, you're right. I just regurgitated the same code. I'll pull my head out and try again :(


The easiest thing to do may be to take your patch #1, and then add code 
to clamp the per-queue outstanding-command (tag) depth to
	1024 / n_arrays_found

at initialization time.  Or perhaps s/n_arrays_found/max_arrays_per_hba/

I bet that's just a few additional lines of code, and should work...

Regards,

	Jeff




^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: cciss updates for 2.6.6xxx [1/2]
@ 2004-04-07 16:21 Miller, Mike (OS Dev)
  2004-04-07 16:33 ` Jeff Garzik
  0 siblings, 1 reply; 6+ messages in thread
From: Miller, Mike (OS Dev) @ 2004-04-07 16:21 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: alpm, axboe, linux-kernel

Yep, you're right. I just regurgitated the same code. I'll pull my head out and try again :(

mikem

-----Original Message-----
From: Jeff Garzik [mailto:jgarzik@pobox.com]
Sent: Wednesday, April 07, 2004 11:15 AM
To: Miller, Mike (OS Dev)
Cc: alpm@odsl.org; axboe@suse.de; linux-kernel@vger.kernel.org
Subject: Re: cciss updates for 2.6.6xxx [1/2]


mikem@beardog.cca.cpqcorp.net wrote:
> This patch adds per logical device queues to the HP cciss driver. It currently only implements a single lock but when time permits I will provide that funtionality. Thanks to Jeff Garzik for providing some sample code.
> This patch built against 2.6.5. Please consider this for inclusion.


I appreciate the credit but I don't see that it addressed my original 
objection -- the starvation issue.

Do you cap the number of per-array requests a "1024 / n_arrays", or 
something like that?  You mentioned that the hardware has a maximum of 
1024 outstanding commands, for all devices.  The two typical solutions 
are a round-robin queue (see carmel.c) or limiting each array such that 
if all arrays are full of commands, the total outstanding never exceeds 
1024.

This patch may be OK for -mm, I would rather not see it go upstream -- 
it seems to me you are choosing to decrease stability to obtain a 
performance increase.  I think you can increase performance without 
decreasing stability.

	Jeff




^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2004-04-07 19:42 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2004-04-06 20:10 cciss updates for 2.6.6xxx [1/2] mikem
2004-04-07 16:14 ` Jeff Garzik
2004-04-07 16:21 Miller, Mike (OS Dev)
2004-04-07 16:33 ` Jeff Garzik
2004-04-07 19:11 Miller, Mike (OS Dev)
2004-04-07 19:42 ` Jeff Garzik

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).