linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] deadline io scheduler
@ 2002-09-25 17:20 Jens Axboe
  2002-09-26  6:15 ` Andrew Morton
  2002-09-30  7:45 ` Pavel Machek
  0 siblings, 2 replies; 42+ messages in thread
From: Jens Axboe @ 2002-09-25 17:20 UTC (permalink / raw)
  To: Linux Kernel

[-- Attachment #1: Type: text/plain, Size: 1697 bytes --]

Hi,

Due to recent "problems" (well the vm being just too damn good at keep
disks busy these days), it's become even more apparent that our current
io scheduler just cannot cope with some work loads. Repeated starvartion
of reads is the most important one. The Andrew Morton Interactive
Workload (AMIW) [1] rates the current kernel poorly, on my test machine
it completes in 1-2 minutes depending on your luck. 2.5.38-BK does a lot
better, but mainly because it's being extremely unfair. This deadline io
scheduler finishes the AMIW in anywhere from ~0.5 seconds to ~3-4
seconds, depending on the io load.

I'd like folks to give it a test spin. Make two kernels, a 2.5.38
pristine and a 2.5.38 with this patch applied. Now beat on each of them,
while listening to mp3's. Or read mails and change folders. Or anything
else that gives you a feel for the interactiveness of the machine. Then
report your findings. I'm interested in _anything_.

There are a few tunables, but I'd suggest trying the defaults first.
Then expirement with these two:

static int read_expire = HZ / 2;

This defines the read expire time, current default is 500ms.

static int writes_starved = 2;

This defines how many times reads can starve writes. 2 means that we can
do two rounds of reads for 1 write.

If you are curious how deadline-iosched works, search lkml archives for
previous announcements. I might make a new one if there's any
interesting in a big detailed analysis, since there has been some
changes since last release.

[1] Flush lots of stuff to disk (I start a dbench xxx, or do a dd
if=/dev/zero of=test_file bs=64k), and then time a cat dir/*.c where
dir/ holds lots of source files.

-- 
Jens Axboe


[-- Attachment #2: deadline-iosched-11 --]
[-- Type: text/plain, Size: 15911 bytes --]

# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
#	           ChangeSet	1.607   -> 1.608  
#	include/linux/elevator.h	1.11    -> 1.12   
#	drivers/block/ll_rw_blk.c	1.109   -> 1.110  
#	drivers/block/Makefile	1.9     -> 1.10   
#	               (new)	        -> 1.1     drivers/block/deadline-iosched.c
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 02/09/25	axboe@burns.home.kernel.dk	1.608
# deadline io scheduler
# --------------------------------------------
#
diff -Nru a/drivers/block/Makefile b/drivers/block/Makefile
--- a/drivers/block/Makefile	Wed Sep 25 19:16:26 2002
+++ b/drivers/block/Makefile	Wed Sep 25 19:16:26 2002
@@ -9,9 +9,9 @@
 #
 
 export-objs	:= elevator.o ll_rw_blk.o loop.o genhd.o acsi.o \
-		   block_ioctl.o
+		   block_ioctl.o deadline-iosched.o
 
-obj-y	:= elevator.o ll_rw_blk.o blkpg.o genhd.o block_ioctl.o
+obj-y	:= elevator.o ll_rw_blk.o blkpg.o genhd.o block_ioctl.o deadline-iosched.o
 
 obj-$(CONFIG_MAC_FLOPPY)	+= swim3.o
 obj-$(CONFIG_BLK_DEV_FD)	+= floppy.o
diff -Nru a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/block/deadline-iosched.c	Wed Sep 25 19:16:26 2002
@@ -0,0 +1,557 @@
+/*
+ *  linux/drivers/block/deadline-iosched.c
+ *
+ *  Deadline i/o scheduler.
+ *
+ *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/blk.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/hash.h>
+
+/*
+ * feel free to try other values :-). read_expire value is the timeout for
+ * reads, our goal is to start a request "around" the time when it expires.
+ * fifo_batch is how many steps along the sorted list we will take when the
+ * front fifo request expires.
+ */
+static int read_expire = HZ / 2;	/* 500ms start timeout */
+static int fifo_batch = 64;		/* 4 seeks, or 64 contig */
+static int seek_cost = 16;		/* seek is 16 times more expensive */
+
+/*
+ * how many times reads are allowed to starve writes
+ */
+static int writes_starved = 2;
+
+static const int deadline_hash_shift = 8;
+#define DL_HASH_BLOCK(sec)	((sec) >> 3)
+#define DL_HASH_FN(sec)		(hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
+#define DL_HASH_ENTRIES		(1 << deadline_hash_shift)
+
+#define DL_INVALIDATE_HASH(dd)				\
+	do {						\
+		if (!++(dd)->hash_valid_count)		\
+			(dd)->hash_valid_count = 1;	\
+	} while (0)
+
+struct deadline_data {
+	/*
+	 * run time data
+	 */
+	struct list_head sort_list[2];	/* sorted listed */
+	struct list_head read_fifo;	/* fifo list */
+	struct list_head *dispatch;	/* driver dispatch queue */
+	struct list_head *hash;		/* request hash */
+	sector_t last_sector;		/* last sector sent to drive */
+	unsigned long hash_valid_count;	/* barrier hash count */
+	unsigned int starved;		/* writes starved */
+
+	/*
+	 * settings that change how the i/o scheduler behaves
+	 */
+	unsigned int fifo_batch;
+	unsigned long read_expire;
+	unsigned int seek_cost;
+	unsigned int writes_starved;
+};
+
+/*
+ * pre-request data.
+ */
+struct deadline_rq {
+	struct list_head fifo;
+	struct list_head hash;
+	unsigned long hash_valid_count;
+	struct request *request;
+	unsigned long expires;
+};
+
+static kmem_cache_t *drq_pool;
+
+#define RQ_DATA(rq)	((struct deadline_rq *) (rq)->elevator_private)
+
+/*
+ * rq hash
+ */
+static inline void __deadline_del_rq_hash(struct deadline_rq *drq)
+{
+	drq->hash_valid_count = 0;
+	list_del_init(&drq->hash);
+}
+
+#define ON_HASH(drq)	(drq)->hash_valid_count
+static inline void deadline_del_rq_hash(struct deadline_rq *drq)
+{
+	if (ON_HASH(drq))
+		__deadline_del_rq_hash(drq);
+}
+
+static inline void
+deadline_add_rq_hash(struct deadline_data *dd, struct deadline_rq *drq)
+{
+	struct request *rq = drq->request;
+
+	BUG_ON(ON_HASH(drq));
+
+	drq->hash_valid_count = dd->hash_valid_count;
+	list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq->sector +rq->nr_sectors)]);
+}
+
+#define list_entry_hash(ptr)	list_entry((ptr), struct deadline_rq, hash)
+static struct request *
+deadline_find_hash(struct deadline_data *dd, sector_t offset)
+{
+	struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
+	struct list_head *entry, *next = hash_list->next;
+	struct deadline_rq *drq;
+	struct request *rq = NULL;
+
+	while ((entry = next) != hash_list) {
+		next = entry->next;
+		drq = list_entry_hash(entry);
+
+		BUG_ON(!drq->hash_valid_count);
+
+		if (!rq_mergeable(drq->request)
+		    || drq->hash_valid_count != dd->hash_valid_count) {
+			__deadline_del_rq_hash(drq);
+			continue;
+		}
+
+		if (drq->request->sector + drq->request->nr_sectors == offset) {
+			rq = drq->request;
+			break;
+		}
+	}
+
+	return rq;
+}
+
+static int
+deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+	struct deadline_data *dd = q->elevator.elevator_data;
+	const int data_dir = bio_data_dir(bio);
+	struct list_head *entry, *sort_list;
+	struct deadline_rq *drq;
+	struct request *__rq;
+	int ret = ELEVATOR_NO_MERGE;
+
+	/*
+	 * try last_merge to avoid going to hash
+	 */
+	ret = elv_try_last_merge(q, req, bio);
+	if (ret != ELEVATOR_NO_MERGE)
+		goto out;
+
+	/*
+	 * see if the merge hash can satisfy a back merge
+	 */
+	if ((__rq = deadline_find_hash(dd, bio->bi_sector))) {
+		BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
+
+		if (elv_rq_merge_ok(__rq, bio)) {
+			*req = __rq;
+			q->last_merge = &__rq->queuelist;
+			ret = ELEVATOR_BACK_MERGE;
+			goto out_ret;
+		}
+	}
+
+	entry = sort_list = &dd->sort_list[data_dir];
+	while ((entry = entry->prev) != sort_list) {
+		__rq = list_entry_rq(entry);
+		drq = RQ_DATA(__rq);
+
+		BUG_ON(__rq->flags & REQ_STARTED);
+
+		if (!(__rq->flags & REQ_CMD))
+			continue;
+
+		if (!*req && bio_rq_in_between(bio, __rq, sort_list))
+			*req = __rq;
+
+		if (__rq->flags & REQ_BARRIER)
+			break;
+
+		/*
+		 * checking for a front merge, hash will miss those
+		 */
+		if (__rq->sector - bio_sectors(bio) == bio->bi_sector) {
+			ret = elv_try_merge(__rq, bio);
+			if (ret != ELEVATOR_NO_MERGE) {
+				*req = __rq;
+				q->last_merge = &__rq->queuelist;
+				break;
+			}
+		}
+	}
+
+out:
+	if (ret != ELEVATOR_NO_MERGE) {
+		struct deadline_rq *drq = RQ_DATA(*req);
+
+		deadline_del_rq_hash(drq);
+		deadline_add_rq_hash(dd, drq);
+	}
+out_ret:
+	return ret;
+}
+
+static void
+deadline_merge_request(request_queue_t *q, struct request *req, struct request *next)
+{
+	struct deadline_data *dd = q->elevator.elevator_data;
+	struct deadline_rq *drq = RQ_DATA(req);
+	struct deadline_rq *dnext = RQ_DATA(next);
+
+	BUG_ON(!drq);
+	BUG_ON(!dnext);
+
+	deadline_del_rq_hash(drq);
+	deadline_add_rq_hash(dd, drq);
+
+	/*
+	 * if dnext expires before drq, assign it's expire time to drq
+	 * and move into dnext position (dnext will be deleted) in fifo
+	 */
+	if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
+		if (time_before(dnext->expires, drq->expires)) {
+			list_move(&drq->fifo, &dnext->fifo);
+			drq->expires = dnext->expires;
+		}
+	}
+}
+
+/*
+ * move request from sort list to dispatch queue. maybe remove from rq hash
+ * here too?
+ */
+static inline void
+deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
+{
+	struct deadline_rq *drq = RQ_DATA(rq);
+
+	list_move_tail(&rq->queuelist, dd->dispatch);
+	list_del_init(&drq->fifo);
+}
+
+/*
+ * move along sort list and move entries to dispatch queue, starting from rq
+ */
+static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
+{
+	struct list_head *sort_head = &dd->sort_list[rq_data_dir(rq)];
+	sector_t last_sec = dd->last_sector;
+	int batch_count = dd->fifo_batch;
+
+	do {
+		struct list_head *nxt = rq->queuelist.next;
+
+		/*
+		 * take it off the sort and fifo list, move
+		 * to dispatch queue
+		 */
+		deadline_move_to_dispatch(dd, rq);
+
+		if (rq->sector == last_sec)
+			batch_count--;
+		else
+			batch_count -= dd->seek_cost;
+
+		if (nxt == sort_head)
+			break;
+
+		last_sec = rq->sector + rq->nr_sectors;
+		rq = list_entry_rq(nxt);
+	} while (batch_count > 0);
+}
+
+/*
+ * returns 0 if there are no expired reads on the fifo, 1 otherwise
+ */
+#define list_entry_fifo(ptr)	list_entry((ptr), struct deadline_rq, fifo)
+static inline int deadline_check_fifo(struct deadline_data *dd)
+{
+	struct deadline_rq *drq;
+
+	if (list_empty(&dd->read_fifo))
+		return 0;
+
+	drq = list_entry_fifo(dd->read_fifo.next);
+	if (time_before(jiffies, drq->expires))
+		return 0;
+
+	return 1;
+}
+
+static struct request *deadline_next_request(request_queue_t *q)
+{
+	struct deadline_data *dd = q->elevator.elevator_data;
+	struct deadline_rq *drq;
+	struct list_head *nxt;
+	struct request *rq;
+	int writes;
+
+	/*
+	 * if still requests on the dispatch queue, just grab the first one
+	 */
+	if (!list_empty(&q->queue_head)) {
+dispatch:
+		rq = list_entry_rq(q->queue_head.next);
+		dd->last_sector = rq->sector + rq->nr_sectors;
+		return rq;
+	}
+
+	writes = !list_empty(&dd->sort_list[WRITE]);
+
+	/*
+	 * if we have expired entries on the fifo list, move some to dispatch
+	 */
+	if (deadline_check_fifo(dd)) {
+		if (writes && (dd->starved++ >= dd->writes_starved))
+			goto dispatch_writes;
+
+		nxt = dd->read_fifo.next;
+		drq = list_entry_fifo(nxt);
+		deadline_move_requests(dd, drq->request);
+		goto dispatch;
+	}
+
+	if (!list_empty(&dd->sort_list[READ])) {
+		if (writes && (dd->starved++ >= dd->writes_starved))
+			goto dispatch_writes;
+
+		nxt = dd->sort_list[READ].next;
+		deadline_move_requests(dd, list_entry_rq(nxt));
+		goto dispatch;
+	}
+
+	/*
+	 * either there are no reads expired or on sort list, or the reads
+	 * have starved writes for too long. dispatch some writes
+	 */
+	if (writes) {
+dispatch_writes:
+		nxt = dd->sort_list[WRITE].next;
+		deadline_move_requests(dd, list_entry_rq(nxt));
+		dd->starved = 0;
+		goto dispatch;
+	}
+
+	BUG_ON(!list_empty(&dd->sort_list[READ]));
+	BUG_ON(writes);
+	return NULL;
+}
+
+static void
+deadline_add_request(request_queue_t *q, struct request *rq, struct list_head *insert_here)
+{
+	struct deadline_data *dd = q->elevator.elevator_data;
+	struct deadline_rq *drq = RQ_DATA(rq);
+	const int data_dir = rq_data_dir(rq);
+
+	/*
+	 * flush hash on barrier insert, as not to allow merges before a
+	 * barrier.
+	 */
+	if (unlikely(rq->flags & REQ_BARRIER)) {
+		DL_INVALIDATE_HASH(dd);
+		q->last_merge = NULL;
+	}
+
+	/*
+	 * add to sort list
+	 */
+	if (!insert_here)
+		insert_here = dd->sort_list[data_dir].prev;
+
+	list_add(&rq->queuelist, insert_here);
+
+	if (unlikely(!(rq->flags & REQ_CMD)))
+		return;
+
+	if (rq_mergeable(rq)) {
+		deadline_add_rq_hash(dd, drq);
+
+		if (!q->last_merge)
+			q->last_merge = &rq->queuelist;
+	}
+
+	if (data_dir == READ) {
+		/*
+		 * set expire time and add to fifo list
+		 */
+		drq->expires = jiffies + dd->read_expire;
+		list_add_tail(&drq->fifo, &dd->read_fifo);
+	}
+}
+
+static void deadline_remove_request(request_queue_t *q, struct request *rq)
+{
+	struct deadline_rq *drq = RQ_DATA(rq);
+
+	if (drq) {
+		list_del_init(&drq->fifo);
+		deadline_del_rq_hash(drq);
+	}
+}
+
+static int deadline_queue_empty(request_queue_t *q)
+{
+	struct deadline_data *dd = q->elevator.elevator_data;
+
+	if (!list_empty(&q->queue_head) || !list_empty(&dd->sort_list[READ])
+	    || !list_empty(&dd->sort_list[WRITE]))
+		return 0;
+
+	BUG_ON(!list_empty(&dd->read_fifo));
+	return 1;
+}
+
+static struct list_head *
+deadline_get_sort_head(request_queue_t *q, struct request *rq)
+{
+	struct deadline_data *dd = q->elevator.elevator_data;
+
+	return &dd->sort_list[rq_data_dir(rq)];
+}
+
+static void deadline_exit(request_queue_t *q, elevator_t *e)
+{
+	struct deadline_data *dd = e->elevator_data;
+	struct deadline_rq *drq;
+	struct request *rq;
+	int i;
+
+	BUG_ON(!list_empty(&dd->read_fifo));
+	BUG_ON(!list_empty(&dd->sort_list[READ]));
+	BUG_ON(!list_empty(&dd->sort_list[WRITE]));
+
+	for (i = READ; i <= WRITE; i++) {
+		struct request_list *rl = &q->rq[i];
+		struct list_head *entry = &rl->free;
+
+		if (list_empty(&rl->free))
+			continue;
+	
+		while ((entry = entry->next) != &rl->free) {
+			rq = list_entry_rq(entry);
+
+			if ((drq = RQ_DATA(rq)) == NULL)
+				continue;
+
+			rq->elevator_private = NULL;
+			kmem_cache_free(drq_pool, drq);
+		}
+	}
+
+	kfree(dd->hash);
+	kfree(dd);
+}
+
+/*
+ * initialize elevator private data (deadline_data), and alloc a drq for
+ * each request on the free lists
+ */
+static int deadline_init(request_queue_t *q, elevator_t *e)
+{
+	struct deadline_data *dd;
+	struct deadline_rq *drq;
+	struct request *rq;
+	int i, ret = 0;
+
+	if (!drq_pool)
+		return -ENOMEM;
+
+	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+	if (!dd)
+		return -ENOMEM;
+	memset(dd, 0, sizeof(*dd));
+
+	dd->hash = kmalloc(sizeof(struct list_head)*DL_HASH_ENTRIES,GFP_KERNEL);
+	if (!dd->hash) {
+		kfree(dd);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < DL_HASH_ENTRIES; i++)
+		INIT_LIST_HEAD(&dd->hash[i]);
+
+	INIT_LIST_HEAD(&dd->read_fifo);
+	INIT_LIST_HEAD(&dd->sort_list[READ]);
+	INIT_LIST_HEAD(&dd->sort_list[WRITE]);
+	dd->dispatch = &q->queue_head;
+	dd->fifo_batch = fifo_batch;
+	dd->read_expire = read_expire;
+	dd->seek_cost = seek_cost;
+	dd->hash_valid_count = 1;
+	dd->writes_starved = writes_starved;
+	e->elevator_data = dd;
+
+	for (i = READ; i <= WRITE; i++) {
+		struct request_list *rl = &q->rq[i];
+		struct list_head *entry = &rl->free;
+
+		if (list_empty(&rl->free))
+			continue;
+	
+		while ((entry = entry->next) != &rl->free) {
+			rq = list_entry_rq(entry);
+
+			drq = kmem_cache_alloc(drq_pool, GFP_KERNEL);
+			if (!drq) {
+				ret = -ENOMEM;
+				break;
+			}
+
+			memset(drq, 0, sizeof(*drq));
+			INIT_LIST_HEAD(&drq->fifo);
+			INIT_LIST_HEAD(&drq->hash);
+			drq->request = rq;
+			rq->elevator_private = drq;
+		}
+	}
+
+	if (ret)
+		deadline_exit(q, e);
+
+	return ret;
+}
+
+static int __init deadline_slab_setup(void)
+{
+	drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
+				     0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+	if (!drq_pool)
+		panic("deadline: can't init slab pool\n");
+
+	return 0;
+}
+
+module_init(deadline_slab_setup);
+
+elevator_t iosched_deadline = {
+	.elevator_merge_fn = 		deadline_merge,
+	.elevator_merge_req_fn =	deadline_merge_request,
+	.elevator_next_req_fn =		deadline_next_request,
+	.elevator_add_req_fn =		deadline_add_request,
+	.elevator_remove_req_fn =	deadline_remove_request,
+	.elevator_queue_empty_fn =	deadline_queue_empty,
+	.elevator_get_sort_head_fn =	deadline_get_sort_head,
+	.elevator_init_fn =		deadline_init,
+	.elevator_exit_fn =		deadline_exit,
+};
+
+EXPORT_SYMBOL(iosched_deadline);
diff -Nru a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
--- a/drivers/block/ll_rw_blk.c	Wed Sep 25 19:16:26 2002
+++ b/drivers/block/ll_rw_blk.c	Wed Sep 25 19:16:26 2002
@@ -1175,7 +1175,7 @@
 	if (blk_init_free_list(q))
 		return -ENOMEM;
 
-	if ((ret = elevator_init(q, &q->elevator, elevator_linus))) {
+	if ((ret = elevator_init(q, &q->elevator, iosched_deadline))) {
 		blk_cleanup_queue(q);
 		return ret;
 	}
diff -Nru a/include/linux/elevator.h b/include/linux/elevator.h
--- a/include/linux/elevator.h	Wed Sep 25 19:16:26 2002
+++ b/include/linux/elevator.h	Wed Sep 25 19:16:26 2002
@@ -60,6 +60,12 @@
 #define ELV_LINUS_SEEK_COST	16
 
 /*
+ * deadline i/o scheduler. uses request time outs to prevent indefinite
+ * starvation
+ */
+extern elevator_t iosched_deadline;
+
+/*
  * use the /proc/iosched interface, all the below is history ->
  */
 typedef struct blkelv_ioctl_arg_s {

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-25 17:20 [PATCH] deadline io scheduler Jens Axboe
@ 2002-09-26  6:15 ` Andrew Morton
  2002-09-26  6:27   ` David S. Miller
                     ` (2 more replies)
  2002-09-30  7:45 ` Pavel Machek
  1 sibling, 3 replies; 42+ messages in thread
From: Andrew Morton @ 2002-09-26  6:15 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Linux Kernel


This is looking good.   With a little more tuning and tweaking
this problem is solved.

The horror test was:

	cd /usr/src/linux
	dd if=/dev/zero of=foo bs=1M count=4000
	sleep 5
	time cat kernel/*.c > /dev/null

Testing on IDE (this matters - SCSI is very different)

- On 2.5.38 + souped-up VM it was taking 25 seconds.

- My read-latency patch took 1 second-odd.

- Linus' rework yesterday was taking 0.3 seconds.

- With Linus' current tree (with the deadline scheduler) it now takes
  5 seconds.

Let's see what happens as we vary read_expire:

	read_expire (ms)	time cat kernel/*.c (secs)
		500			5.2
		400			3.8
		300			4.5
		200			3.9
		100			5.1
		 50			5.0

well that was a bit of a placebo ;)

Let's leave read_expire at 500ms and diddle writes_starved:

	writes_starved (units)	time cat kernel/*.c (secs)
		 1			4.8
		 2			4.4
		 4			4.0
		 8			4.9
		16			4.9


Now alter fifo_batch, everything else default:

	fifo_batch (units)	time cat kernel/*.c (secs)
		64			5.0
		32			2.0
		16			0.2
		 8			0.17

OK, that's a winner.


Here's something really nice with the deadline scheduler.  I was
madly catting five separate kernel trees (five reading processes)
and then started a big `dd', tunables at default:

   procs                      memory      swap          io     system      cpu
 r  b  w   swpd   free   buff  cache   si   so    bi    bo   in    cs us sy id
 0  9  0   6008   2460   8304 324716    0    0  2048     0 1102   254 13 88  0
 0  7  0   6008   2600   8288 324480    0    0  1800     0 1114   266  0 100  0
 0  6  0   6008   2452   8292 324520    0    0  2432     0 1126   287 29 71  0
 0  6  0   6008   3160   8292 323952    0    0  3568     0 1132   312  0 100  0
 0  6  0   6008   2860   8296 324148  128    0  2984     0 1119   281 17 83  0
 1  6  0   5984   2856   8264 323816  352    0  5240     0 1162   479  0 100  0
 0  7  1   5984   4152   7876 324068    0    0  1648 28192 1215  1572  1 99  0
 0  9  2   6016   3136   7300 328568    0  180  1232 37248 1324  1201  3 97  0
 0  9  2   6020   5260   5628 329212    0    4  1112 29488 1296   560  0 100  0
 0  9  3   6020   3548   5596 330944    0    0  1064 35240 1302   629  6 94  0
 0  9  3   6020   3412   5572 331352    0    0   744 31744 1298   452  6 94  0
 0  9  2   6020   1516   5576 333352    0    0   888 31488 1283   467  0 100  0
 0  9  2   6020   3528   5580 331396    0    0  1312 20768 1251   385  0 100  0

Note how the read rate maybe halved, and we sustained a high
volume of writeback.  This is excellent.


Let's try it again with fifo_batch at 16:

 0  5  0     80 303936   3960  49288    0    0  2520     0 1092   174  0 100  0
 0  5  0     80 302400   3996  50776    0    0  3040     0 1094   172 20 80  0
 0  5  0     80 301164   4032  51988    0    0  2504     0 1082   150  0 100  0
 0  5  0     80 299708   4060  53412    0    0  2904     0 1084   149  0 100  0
 1  5  1     80 164640   4060 186784    0    0  1344 26720 1104   891  1 99  0
 0  6  2     80 138900   4060 212088    0    0   280  7928 1039   226  0 100  0
 0  6  2     80 134992   4064 215928    0    0  1512  7704 1100   226  0 100  0
 0  6  2     80 130880   4068 219976    0    0  1928  9688 1124   245 17 83  0
 0  6  2     80 123316   4084 227432    0    0  2664  8200 1125   283 11 89  0

That looks acceptable.  Writes took quite a bit of punishment, but
the VM should cope with that OK.

It'd be interesting to know why read_expire and writes_starved have
no effect, while fifo_batch has a huge effect.

I'd like to gain a solid understanding of what these three knobs do.
Could you explain that a little more?

During development I'd suggest the below patch, to add
/proc/sys/vm/read_expire, fifo_batch and writes_starved - it beats
recompiling each time.

I'll test scsi now.



 drivers/block/deadline-iosched.c |   18 +++++++++---------
 kernel/sysctl.c                  |   12 ++++++++++++
 2 files changed, 21 insertions(+), 9 deletions(-)

--- 2.5.38/drivers/block/deadline-iosched.c~akpm-deadline	Wed Sep 25 22:16:36 2002
+++ 2.5.38-akpm/drivers/block/deadline-iosched.c	Wed Sep 25 23:05:45 2002
@@ -24,14 +24,14 @@
  * fifo_batch is how many steps along the sorted list we will take when the
  * front fifo request expires.
  */
-static int read_expire = HZ / 2;	/* 500ms start timeout */
-static int fifo_batch = 64;		/* 4 seeks, or 64 contig */
-static int seek_cost = 16;		/* seek is 16 times more expensive */
+int read_expire = HZ / 2;	/* 500ms start timeout */
+int fifo_batch = 64;		/* 4 seeks, or 64 contig */
+int seek_cost = 16;		/* seek is 16 times more expensive */
 
 /*
  * how many times reads are allowed to starve writes
  */
-static int writes_starved = 2;
+int writes_starved = 2;
 
 static const int deadline_hash_shift = 8;
 #define DL_HASH_BLOCK(sec)	((sec) >> 3)
@@ -253,7 +253,7 @@ static void deadline_move_requests(struc
 {
 	struct list_head *sort_head = &dd->sort_list[rq_data_dir(rq)];
 	sector_t last_sec = dd->last_sector;
-	int batch_count = dd->fifo_batch;
+	int batch_count = fifo_batch;
 
 	do {
 		struct list_head *nxt = rq->queuelist.next;
@@ -267,7 +267,7 @@ static void deadline_move_requests(struc
 		if (rq->sector == last_sec)
 			batch_count--;
 		else
-			batch_count -= dd->seek_cost;
+			batch_count -= seek_cost;
 
 		if (nxt == sort_head)
 			break;
@@ -319,7 +319,7 @@ dispatch:
 	 * if we have expired entries on the fifo list, move some to dispatch
 	 */
 	if (deadline_check_fifo(dd)) {
-		if (writes && (dd->starved++ >= dd->writes_starved))
+		if (writes && (dd->starved++ >= writes_starved))
 			goto dispatch_writes;
 
 		nxt = dd->read_fifo.next;
@@ -329,7 +329,7 @@ dispatch:
 	}
 
 	if (!list_empty(&dd->sort_list[READ])) {
-		if (writes && (dd->starved++ >= dd->writes_starved))
+		if (writes && (dd->starved++ >= writes_starved))
 			goto dispatch_writes;
 
 		nxt = dd->sort_list[READ].next;
@@ -392,7 +392,7 @@ deadline_add_request(request_queue_t *q,
 		/*
 		 * set expire time and add to fifo list
 		 */
-		drq->expires = jiffies + dd->read_expire;
+		drq->expires = jiffies + read_expire;
 		list_add_tail(&drq->fifo, &dd->read_fifo);
 	}
 }
--- 2.5.38/kernel/sysctl.c~akpm-deadline	Wed Sep 25 22:59:48 2002
+++ 2.5.38-akpm/kernel/sysctl.c	Wed Sep 25 23:05:42 2002
@@ -272,6 +272,9 @@ static int zero = 0;
 static int one = 1;
 static int one_hundred = 100;
 
+extern int fifo_batch;
+extern int read_expire;
+extern int writes_starved;
 
 static ctl_table vm_table[] = {
 	{VM_OVERCOMMIT_MEMORY, "overcommit_memory", &sysctl_overcommit_memory,
@@ -314,6 +317,15 @@ static ctl_table vm_table[] = {
 	 {VM_HUGETLB_PAGES, "nr_hugepages", &htlbpage_max, sizeof(int), 0644, NULL, 
 	  &proc_dointvec},
 #endif
+	{90, "read_expire",
+	 &read_expire, sizeof(read_expire), 0644,
+	 NULL, &proc_dointvec},
+	{91, "fifo_batch",
+	 &fifo_batch, sizeof(fifo_batch), 0644,
+	 NULL, &proc_dointvec},
+	{92, "writes_starved",
+	 &writes_starved, sizeof(writes_starved), 0644,
+	 NULL, &proc_dointvec},
 	{0}
 };
 

.

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  6:15 ` Andrew Morton
@ 2002-09-26  6:27   ` David S. Miller
  2002-09-26  6:44   ` Jens Axboe
  2002-09-26  7:12   ` Andrew Morton
  2 siblings, 0 replies; 42+ messages in thread
From: David S. Miller @ 2002-09-26  6:27 UTC (permalink / raw)
  To: akpm; +Cc: axboe, linux-kernel

   From: Andrew Morton <akpm@digeo.com>
   Date: Wed, 25 Sep 2002 23:15:58 -0700
   
   I'd like to gain a solid understanding of what these three knobs do.
   Could you explain that a little more?

My basic understanding of fifo_batch is:

1) fifo_batch is how many contiguous requests can be in
   a "set"

2) we send out one write "set" for every two read "sets"

3) a seek works out to "seek_cost" contiguous requests,
   cost wise, this gets subtracted from how many requests
   the current "set" has left that are allowed to be used

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  6:15 ` Andrew Morton
  2002-09-26  6:27   ` David S. Miller
@ 2002-09-26  6:44   ` Jens Axboe
  2002-09-26  6:59     ` Jens Axboe
  2002-09-26  8:28     ` Daniel Pittman
  2002-09-26  7:12   ` Andrew Morton
  2 siblings, 2 replies; 42+ messages in thread
From: Jens Axboe @ 2002-09-26  6:44 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linux Kernel

On Wed, Sep 25 2002, Andrew Morton wrote:
> 
> This is looking good.   With a little more tuning and tweaking
> this problem is solved.
> 
> The horror test was:
> 
> 	cd /usr/src/linux
> 	dd if=/dev/zero of=foo bs=1M count=4000
> 	sleep 5
> 	time cat kernel/*.c > /dev/null
> 
> Testing on IDE (this matters - SCSI is very different)

Yes, SCSI specific stuff comes next.

> - On 2.5.38 + souped-up VM it was taking 25 seconds.
> 
> - My read-latency patch took 1 second-odd.
> 
> - Linus' rework yesterday was taking 0.3 seconds.
> 
> - With Linus' current tree (with the deadline scheduler) it now takes
>   5 seconds.
> 
> Let's see what happens as we vary read_expire:
> 
> 	read_expire (ms)	time cat kernel/*.c (secs)
> 		500			5.2
> 		400			3.8
> 		300			4.5
> 		200			3.9
> 		100			5.1
> 		 50			5.0
> 
> well that was a bit of a placebo ;)

For this work load, more on that later.

> Let's leave read_expire at 500ms and diddle writes_starved:
> 
> 	writes_starved (units)	time cat kernel/*.c (secs)
> 		 1			4.8
> 		 2			4.4
> 		 4			4.0
> 		 8			4.9
> 		16			4.9

Interesting

> Now alter fifo_batch, everything else default:
> 
> 	fifo_batch (units)	time cat kernel/*.c (secs)
> 		64			5.0
> 		32			2.0
> 		16			0.2
> 		 8			0.17
> 
> OK, that's a winner.

Cool, I'm resting benchmarks with 16 as the default now. I fear this
might be too agressive, and that 32 will be a decent value.

> Here's something really nice with the deadline scheduler.  I was
> madly catting five separate kernel trees (five reading processes)
> and then started a big `dd', tunables at default:
> 
>    procs                      memory      swap          io     system      cpu
>  r  b  w   swpd   free   buff  cache   si   so    bi    bo   in    cs us sy id
>  0  9  0   6008   2460   8304 324716    0    0  2048     0 1102   254 13 88  0
>  0  7  0   6008   2600   8288 324480    0    0  1800     0 1114   266  0 100  0
>  0  6  0   6008   2452   8292 324520    0    0  2432     0 1126   287 29 71  0
>  0  6  0   6008   3160   8292 323952    0    0  3568     0 1132   312  0 100  0
>  0  6  0   6008   2860   8296 324148  128    0  2984     0 1119   281 17 83  0
>  1  6  0   5984   2856   8264 323816  352    0  5240     0 1162   479  0 100  0
>  0  7  1   5984   4152   7876 324068    0    0  1648 28192 1215  1572  1 99  0
>  0  9  2   6016   3136   7300 328568    0  180  1232 37248 1324  1201  3 97  0
>  0  9  2   6020   5260   5628 329212    0    4  1112 29488 1296   560  0 100  0
>  0  9  3   6020   3548   5596 330944    0    0  1064 35240 1302   629  6 94  0
>  0  9  3   6020   3412   5572 331352    0    0   744 31744 1298   452  6 94  0
>  0  9  2   6020   1516   5576 333352    0    0   888 31488 1283   467  0 100  0
>  0  9  2   6020   3528   5580 331396    0    0  1312 20768 1251   385  0 100  0
> 
> Note how the read rate maybe halved, and we sustained a high
> volume of writeback.  This is excellent.

Yep

> Let's try it again with fifo_batch at 16:
> 
>  0  5  0     80 303936   3960  49288    0    0  2520     0 1092   174  0 100  0
>  0  5  0     80 302400   3996  50776    0    0  3040     0 1094   172 20 80  0
>  0  5  0     80 301164   4032  51988    0    0  2504     0 1082   150  0 100  0
>  0  5  0     80 299708   4060  53412    0    0  2904     0 1084   149  0 100  0
>  1  5  1     80 164640   4060 186784    0    0  1344 26720 1104   891  1 99  0
>  0  6  2     80 138900   4060 212088    0    0   280  7928 1039   226  0 100  0
>  0  6  2     80 134992   4064 215928    0    0  1512  7704 1100   226  0 100  0
>  0  6  2     80 130880   4068 219976    0    0  1928  9688 1124   245 17 83  0
>  0  6  2     80 123316   4084 227432    0    0  2664  8200 1125   283 11 89  0
> 
> That looks acceptable.  Writes took quite a bit of punishment, but
> the VM should cope with that OK.
> 
> It'd be interesting to know why read_expire and writes_starved have
> no effect, while fifo_batch has a huge effect.
> 
> I'd like to gain a solid understanding of what these three knobs do.
> Could you explain that a little more?

Sure. The reason you are not seeing a big change with read expire, is
that you basically only have one thread issuing reads. Once you start
flooding the queue with more threads doing reads, then read expire just
puts a lid on the max latency that will incur. So you are probably not
hitting the read expire logic at all, or just slightly.

The three tunables are:

read_expire. This one controls how old a request can be, before we
attempt to move it to the dispatch queue. This is the starvation logic
for the read list. When a read expires, the other nobs control what the
behaviour is.

fifo_batch. This one controls how big a batch of requests we move from
the sort lists to the dispatch queue. The idea was that we don't want to
move single requests, since that might cause seek storms. Instead we
move a batch of request, starting at the expire head for reads if
necessary, along the sorted list to the dispatch queue. fifo_batch is
the total cost that can be endured, a total of seeks and non-seeky
requests. With you fifo_batch at 16, we can only move on seeky request
to the dispatch queue. Or we can move 16 non-seeky requests. Or a few
non-seeky request, and a seeky one. You get the idea.

writes_starved. This controls how many times reads get preferred over
writes. The default is 2, which means that we can serve two batches of
reads over one write batch. A value of 4 would mean that reads could
skip ahead of writes 4 times. A value of 1 would give you 1:1
read:write, ie no read preference. A silly value of 0 would give you
write preference, always.

Hope this helps?

> During development I'd suggest the below patch, to add
> /proc/sys/vm/read_expire, fifo_batch and writes_starved - it beats
> recompiling each time.

It sure does, I either want to talk Al into making the ioschedfs (better
name will be selected :-) or try and do it myself so we can do this
properly.

> I'll test scsi now.

Cool. I found a buglet that causes incorrect accounting when moving
request if the dispatch queue is not empty. Attached.

===== drivers/block/deadline-iosched.c 1.1 vs edited =====
--- 1.1/drivers/block/deadline-iosched.c	Wed Sep 25 21:16:26 2002
+++ edited/drivers/block/deadline-iosched.c	Thu Sep 26 08:33:35 2002
@@ -254,6 +254,15 @@
 	struct list_head *sort_head = &dd->sort_list[rq_data_dir(rq)];
 	sector_t last_sec = dd->last_sector;
 	int batch_count = dd->fifo_batch;
+
+	/*
+	 * if dispatch is non-empty, disregard last_sector and check last one
+	 */
+	if (!list_empty(dd->dispatch)) {
+		struct request *__rq = list_entry_rq(dd->dispatch->prev);
+
+		last_sec = __rq->sector + __rq->nr_sectors;
+	}
 
 	do {
 		struct list_head *nxt = rq->queuelist.next;

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  6:44   ` Jens Axboe
@ 2002-09-26  6:59     ` Jens Axboe
  2002-09-26  7:06       ` William Lee Irwin III
  2002-09-26 15:54       ` Patrick Mansfield
  2002-09-26  8:28     ` Daniel Pittman
  1 sibling, 2 replies; 42+ messages in thread
From: Jens Axboe @ 2002-09-26  6:59 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linux Kernel

On Thu, Sep 26 2002, Jens Axboe wrote:
> > Now alter fifo_batch, everything else default:
> > 
> > 	fifo_batch (units)	time cat kernel/*.c (secs)
> > 		64			5.0
> > 		32			2.0
> > 		16			0.2
> > 		 8			0.17
> > 
> > OK, that's a winner.
> 
> Cool, I'm resting benchmarks with 16 as the default now. I fear this
> might be too agressive, and that 32 will be a decent value.

fifo_batch=16 drops throughput slightly on tiobench, however it also
gives really really good interactive behaviour here. Using 32 doesn't
change that a whole lot, the throughput that is. This might just be
normal deviation between runs, more are needed to be sure.  Note that
I'm testing with the last_sec patch I posted, you should too.

BTW, for SCSI, it would be nice to first convert more drivers to use the
block level queued tagging. That would provide us with a much better
means to control starvation properly on SCSI as well.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  6:59     ` Jens Axboe
@ 2002-09-26  7:06       ` William Lee Irwin III
  2002-09-26  7:06         ` David S. Miller
  2002-09-26  7:11         ` Jeff Garzik
  2002-09-26 15:54       ` Patrick Mansfield
  1 sibling, 2 replies; 42+ messages in thread
From: William Lee Irwin III @ 2002-09-26  7:06 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, Linux Kernel, patman, andmike

On Thu, Sep 26, 2002 at 08:59:51AM +0200, Jens Axboe wrote:
> BTW, for SCSI, it would be nice to first convert more drivers to use the
> block level queued tagging. That would provide us with a much better
> means to control starvation properly on SCSI as well.

Hmm, qlogicisp.c isn't really usable because the disks are too slow, it
needs bounce buffering, and nobody will touch the driver (and I don't
seem to be able to figure out what's going on with it myself), and the
FC stuff seems to need out-of-tree drivers to work. I wonder if I some
help converting them to this might be found.


Thanks,
Bill

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:06       ` William Lee Irwin III
@ 2002-09-26  7:06         ` David S. Miller
  2002-09-26  7:16           ` Jeff Garzik
  2002-09-26  7:23           ` William Lee Irwin III
  2002-09-26  7:11         ` Jeff Garzik
  1 sibling, 2 replies; 42+ messages in thread
From: David S. Miller @ 2002-09-26  7:06 UTC (permalink / raw)
  To: wli; +Cc: axboe, akpm, linux-kernel, patman, andmike

   From: William Lee Irwin III <wli@holomorphy.com>
   Date: Thu, 26 Sep 2002 00:06:15 -0700
   
   Hmm, qlogicisp.c isn't really usable because the disks are too
   slow, it needs bounce buffering, and nobody will touch the driver

I think it's high time to blow away qlogic{fc,isp}.c and put
Matt Jacob's qlogic stuff into 2.5.x

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:06       ` William Lee Irwin III
  2002-09-26  7:06         ` David S. Miller
@ 2002-09-26  7:11         ` Jeff Garzik
  2002-09-26  7:14           ` William Lee Irwin III
  1 sibling, 1 reply; 42+ messages in thread
From: Jeff Garzik @ 2002-09-26  7:11 UTC (permalink / raw)
  To: William Lee Irwin III
  Cc: Jens Axboe, Andrew Morton, Linux Kernel, patman, andmike

William Lee Irwin III wrote:
> Hmm, qlogicisp.c isn't really usable because the disks are too slow, it
> needs bounce buffering, and nobody will touch the driver (and I don't
> seem to be able to figure out what's going on with it myself), and the
> FC stuff seems to need out-of-tree drivers to work. I wonder if I some
> help converting them to this might be found.


I use this driver on my ancient ev56 Alpha, if you need me to do some 
testing.

Unfortunately it is fragile and known to have obscure bugs...   Compaq 
was beating up on this driver for quite a while, but I never saw 
anything but bandaids [and they fully admitted their fixes were bandaids].

There is an out-of-tree qlogic driver that is reported to be far better 
-- but not necessarily close to Linux kernel coding style.

/me wonders if people are encouraged or scared off, at this point...


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  6:15 ` Andrew Morton
  2002-09-26  6:27   ` David S. Miller
  2002-09-26  6:44   ` Jens Axboe
@ 2002-09-26  7:12   ` Andrew Morton
  2002-09-26  7:17     ` Jens Axboe
  2002-09-26  7:34     ` Jens Axboe
  2 siblings, 2 replies; 42+ messages in thread
From: Andrew Morton @ 2002-09-26  7:12 UTC (permalink / raw)
  To: Jens Axboe, Linux Kernel

Andrew Morton wrote:
> 
> I'll test scsi now.
> 

aic7xxx, Fujitsu "MAF3364L SUN36G" (36G SCA-2)


Maximum number of TCQ tags=253

	fifo_batch		time cat kernel/*.c (seconds)
	    64				58
	    32				54
	    16				20
	     8				58
	     4				1:15
	     2				53

Maximum number of TCQ tags=4

	fifo_batch		time cat kernel/*.c (seconds)
	    64				53
	    32				39
	    16				33
	     8				21
	     4				22
	     2				36
	     1				22


Maximum number of TCQ tags = 0:

	fifo_batch		time cat kernel/*.c (seconds)
	    64				22
	    32				10.3
	    16				10.5
	     8				5.5
	     4				3.2
	     2				1.9

I selected fifo_batch=16 and altered writes_starved and read_expires
again.  They made no appreciable difference.

>From this I can only conclude that my poor little read was stuck
in the disk for ages while TCQ busily allowed new incoming writes
to bypass already-sent reads.

A dreadful misdesign.  Unless we can control this with barriers,
and if Fujutsu is typical, TCQ is just uncontrollable.  I, for
one, would not turn it on in a pink fit.

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:16           ` Jeff Garzik
@ 2002-09-26  7:13             ` David S. Miller
  2002-09-26  7:33               ` Jeff Garzik
  0 siblings, 1 reply; 42+ messages in thread
From: David S. Miller @ 2002-09-26  7:13 UTC (permalink / raw)
  To: jgarzik; +Cc: wli, axboe, akpm, linux-kernel, patman, andmike

   From: Jeff Garzik <jgarzik@pobox.com>
   Date: Thu, 26 Sep 2002 03:16:32 -0400

   David S. Miller wrote:
   > I think it's high time to blow away qlogic{fc,isp}.c and put
   > Matt Jacob's qlogic stuff into 2.5.x
   
   Seconded.  Thanks for remembering that name.
   
No problem :)

   Has his stuff been cleaned up, code-wise, in the past few years?  My 
   experience with it was 100% positive from a technical standpoint, but 
   negative from a style standpoint...
   
I think it'll be less work to toss his stuff into the tree
and have some janitor whack on it than try to get someone
to maintain what we have now.

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:11         ` Jeff Garzik
@ 2002-09-26  7:14           ` William Lee Irwin III
  0 siblings, 0 replies; 42+ messages in thread
From: William Lee Irwin III @ 2002-09-26  7:14 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: Jens Axboe, Andrew Morton, Linux Kernel, patman, andmike

On Thu, Sep 26, 2002 at 03:11:31AM -0400, Jeff Garzik wrote:
> I use this driver on my ancient ev56 Alpha, if you need me to do some 
> testing.
> Unfortunately it is fragile and known to have obscure bugs...   Compaq 
> was beating up on this driver for quite a while, but I never saw 
> anything but bandaids [and they fully admitted their fixes were bandaids].
> There is an out-of-tree qlogic driver that is reported to be far better 
> -- but not necessarily close to Linux kernel coding style.
> /me wonders if people are encouraged or scared off, at this point...

I've got no idea what's going on with it. It just happens to explode when
parallel mkfs's are done. It looks like there's a bug where it can walk
off the end of an array when it gets an unexpected message but fixing
that doesn't help.


Thanks,
Bill

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:06         ` David S. Miller
@ 2002-09-26  7:16           ` Jeff Garzik
  2002-09-26  7:13             ` David S. Miller
  2002-09-26  7:23           ` William Lee Irwin III
  1 sibling, 1 reply; 42+ messages in thread
From: Jeff Garzik @ 2002-09-26  7:16 UTC (permalink / raw)
  To: David S. Miller; +Cc: wli, axboe, akpm, linux-kernel, patman, andmike

David S. Miller wrote:
>    From: William Lee Irwin III <wli@holomorphy.com>
>    Date: Thu, 26 Sep 2002 00:06:15 -0700
>    
>    Hmm, qlogicisp.c isn't really usable because the disks are too
>    slow, it needs bounce buffering, and nobody will touch the driver
> 
> I think it's high time to blow away qlogic{fc,isp}.c and put
> Matt Jacob's qlogic stuff into 2.5.x


Seconded.  Thanks for remembering that name.

Has his stuff been cleaned up, code-wise, in the past few years?  My 
experience with it was 100% positive from a technical standpoint, but 
negative from a style standpoint...

	Jeff, volunteering to test the QL-ISP




^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:12   ` Andrew Morton
@ 2002-09-26  7:17     ` Jens Axboe
  2002-09-26  7:34     ` Jens Axboe
  1 sibling, 0 replies; 42+ messages in thread
From: Jens Axboe @ 2002-09-26  7:17 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linux Kernel

On Thu, Sep 26 2002, Andrew Morton wrote:
> Andrew Morton wrote:
> > 
> > I'll test scsi now.
> > 
> 
> aic7xxx, Fujitsu "MAF3364L SUN36G" (36G SCA-2)
> 
> 
> Maximum number of TCQ tags=253
> 
> 	fifo_batch		time cat kernel/*.c (seconds)
> 	    64				58
> 	    32				54
> 	    16				20
> 	     8				58
> 	     4				1:15
> 	     2				53
> 
> Maximum number of TCQ tags=4
> 
> 	fifo_batch		time cat kernel/*.c (seconds)
> 	    64				53
> 	    32				39
> 	    16				33
> 	     8				21
> 	     4				22
> 	     2				36
> 	     1				22
> 
> 
> Maximum number of TCQ tags = 0:
> 
> 	fifo_batch		time cat kernel/*.c (seconds)
> 	    64				22
> 	    32				10.3
> 	    16				10.5
> 	     8				5.5
> 	     4				3.2
> 	     2				1.9
> 
> I selected fifo_batch=16 and altered writes_starved and read_expires
> again.  They made no appreciable difference.

Abysmal. BTW, fifo_batch value less than seek cost doesn't make too much
sense, unless the drive has really slow streaming io performance.

> >From this I can only conclude that my poor little read was stuck
> in the disk for ages while TCQ busily allowed new incoming writes
> to bypass already-sent reads.
> 
> A dreadful misdesign.  Unless we can control this with barriers,
> and if Fujutsu is typical, TCQ is just uncontrollable.  I, for
> one, would not turn it on in a pink fit.

I have this dream that we might be able to control this if we get our
hands on the queueing at the block level. The above looks really really
bad though, in the past I've had quite good experience with a tag depth
of 4. I should try ide tcq again, to see how that goes.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:06         ` David S. Miller
  2002-09-26  7:16           ` Jeff Garzik
@ 2002-09-26  7:23           ` William Lee Irwin III
  1 sibling, 0 replies; 42+ messages in thread
From: William Lee Irwin III @ 2002-09-26  7:23 UTC (permalink / raw)
  To: David S. Miller; +Cc: axboe, akpm, linux-kernel, patman, andmike

From: William Lee Irwin III <wli@holomorphy.com>
Date: Thu, 26 Sep 2002 00:06:15 -0700
>    Hmm, qlogicisp.c isn't really usable because the disks are too
>    slow, it needs bounce buffering, and nobody will touch the driver

On Thu, Sep 26, 2002 at 12:06:20AM -0700, David S. Miller wrote:
> I think it's high time to blow away qlogic{fc,isp}.c and put
> Matt Jacob's qlogic stuff into 2.5.x

Is this different from the v61b5 stuff? I can test it on my qla2310
and ISP1020 if need be.

The main issue with qlogicisp.c is that it's just not modern enough to
keep up with the rest of the system so testing with it is basically a
stress test for how things hold up with lots of highmem, lots of bounce
buffering and with a severely limited ability to perform disk I/O.

qlogicisp.c is also not very reflective of the hardware used in NUMA-Q
systems in the field, it just happened to be available from the scrap heap.


Thanks,
Bill

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:13             ` David S. Miller
@ 2002-09-26  7:33               ` Jeff Garzik
  2002-09-26  7:35                 ` David S. Miller
  2002-09-26  7:41                 ` Jeff Garzik
  0 siblings, 2 replies; 42+ messages in thread
From: Jeff Garzik @ 2002-09-26  7:33 UTC (permalink / raw)
  To: David S. Miller; +Cc: wli, axboe, akpm, linux-kernel, patman, andmike

David S. Miller wrote:
> I think it'll be less work to toss his stuff into the tree
> and have some janitor whack on it than try to get someone
> to maintain what we have now.


Does that mean you're volunteering to throw it into the tree? ;-)

Just dug up the URL, in case anybody is interested:
http://www.feral.com/isp.html


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:12   ` Andrew Morton
  2002-09-26  7:17     ` Jens Axboe
@ 2002-09-26  7:34     ` Jens Axboe
  1 sibling, 0 replies; 42+ messages in thread
From: Jens Axboe @ 2002-09-26  7:34 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linux Kernel

Hi,

I found a small problem where hash would not contain the right request
state. Basically we updated the hash too soon, this bug was introduced
when the merge_cleanup stuff was removed.

It's not a bit deal, it just means that the hash didn't catch as many
merges as it should. However for efficiency it needs to be correct, of
course :-)

Current deadline against 2.5.38-BK attached.

===== drivers/block/deadline-iosched.c 1.1 vs edited =====
--- 1.1/drivers/block/deadline-iosched.c	Wed Sep 25 21:16:26 2002
+++ edited/drivers/block/deadline-iosched.c	Thu Sep 26 09:24:39 2002
@@ -25,7 +25,7 @@
  * front fifo request expires.
  */
 static int read_expire = HZ / 2;	/* 500ms start timeout */
-static int fifo_batch = 64;		/* 4 seeks, or 64 contig */
+static int fifo_batch = 32;		/* 4 seeks, or 64 contig */
 static int seek_cost = 16;		/* seek is 16 times more expensive */
 
 /*
@@ -164,7 +164,7 @@
 			*req = __rq;
 			q->last_merge = &__rq->queuelist;
 			ret = ELEVATOR_BACK_MERGE;
-			goto out_ret;
+			goto out;
 		}
 	}
 
@@ -198,16 +198,18 @@
 	}
 
 out:
-	if (ret != ELEVATOR_NO_MERGE) {
-		struct deadline_rq *drq = RQ_DATA(*req);
-
-		deadline_del_rq_hash(drq);
-		deadline_add_rq_hash(dd, drq);
-	}
-out_ret:
 	return ret;
 }
 
+static void deadline_merged_request(request_queue_t *q, struct request *req)
+{
+	struct deadline_data *dd = q->elevator.elevator_data;
+	struct deadline_rq *drq = RQ_DATA(req);
+
+	deadline_del_rq_hash(drq);
+	deadline_add_rq_hash(dd, drq);
+}
+
 static void
 deadline_merge_request(request_queue_t *q, struct request *req, struct request *next)
 {
@@ -255,6 +257,15 @@
 	sector_t last_sec = dd->last_sector;
 	int batch_count = dd->fifo_batch;
 
+	/*
+	 * if dispatch is non-empty, disregard last_sector and check last one
+	 */
+	if (!list_empty(dd->dispatch)) {
+		struct request *__rq = list_entry_rq(dd->dispatch->prev);
+
+		last_sec = __rq->sector + __rq->nr_sectors;
+	}
+
 	do {
 		struct list_head *nxt = rq->queuelist.next;
 
@@ -544,6 +555,7 @@
 
 elevator_t iosched_deadline = {
 	.elevator_merge_fn = 		deadline_merge,
+	.elevator_merged_fn =		deadline_merged_request,
 	.elevator_merge_req_fn =	deadline_merge_request,
 	.elevator_next_req_fn =		deadline_next_request,
 	.elevator_add_req_fn =		deadline_add_request,
===== drivers/block/elevator.c 1.27 vs edited =====
--- 1.27/drivers/block/elevator.c	Thu Sep 26 08:23:11 2002
+++ edited/drivers/block/elevator.c	Thu Sep 26 09:20:03 2002
@@ -250,6 +250,14 @@
 	return ELEVATOR_NO_MERGE;
 }
 
+void elv_merged_request(request_queue_t *q, struct request *rq)
+{
+	elevator_t *e = &q->elevator;
+
+	if (e->elevator_merged_fn)
+		e->elevator_merged_fn(q, rq);
+}
+
 void elv_merge_requests(request_queue_t *q, struct request *rq,
 			     struct request *next)
 {
===== drivers/block/ll_rw_blk.c 1.111 vs edited =====
--- 1.111/drivers/block/ll_rw_blk.c	Thu Sep 26 08:23:11 2002
+++ edited/drivers/block/ll_rw_blk.c	Thu Sep 26 09:23:05 2002
@@ -1606,6 +1606,7 @@
 			req->biotail = bio;
 			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
 			drive_stat_acct(req, nr_sectors, 0);
+			elv_merged_request(q, req);
 			attempt_back_merge(q, req);
 			goto out;
 
@@ -1629,6 +1630,7 @@
 			req->sector = req->hard_sector = sector;
 			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
 			drive_stat_acct(req, nr_sectors, 0);
+			elv_merged_request(q, req);
 			attempt_front_merge(q, req);
 			goto out;
 
===== include/linux/elevator.h 1.14 vs edited =====
--- 1.14/include/linux/elevator.h	Thu Sep 26 08:23:11 2002
+++ edited/include/linux/elevator.h	Thu Sep 26 09:25:14 2002
@@ -6,6 +6,8 @@
 
 typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *);
 
+typedef void (elevator_merged_fn) (request_queue_t *, struct request *);
+
 typedef struct request *(elevator_next_req_fn) (request_queue_t *);
 
 typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, struct list_head *);
@@ -19,6 +21,7 @@
 struct elevator_s
 {
 	elevator_merge_fn *elevator_merge_fn;
+	elevator_merged_fn *elevator_merged_fn;
 	elevator_merge_req_fn *elevator_merge_req_fn;
 
 	elevator_next_req_fn *elevator_next_req_fn;
@@ -42,6 +45,7 @@
 extern int elv_merge(request_queue_t *, struct request **, struct bio *);
 extern void elv_merge_requests(request_queue_t *, struct request *,
 			       struct request *);
+extern void elv_merged_request(request_queue_t *, struct request *);
 extern void elv_remove_request(request_queue_t *, struct request *);
 extern int elv_queue_empty(request_queue_t *);
 extern inline struct list_head *elv_get_sort_head(request_queue_t *, struct request *);

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:33               ` Jeff Garzik
@ 2002-09-26  7:35                 ` David S. Miller
  2002-09-26  8:15                   ` Michael Clark
  2002-09-26  7:41                 ` Jeff Garzik
  1 sibling, 1 reply; 42+ messages in thread
From: David S. Miller @ 2002-09-26  7:35 UTC (permalink / raw)
  To: jgarzik; +Cc: wli, axboe, akpm, linux-kernel, patman, andmike

   From: Jeff Garzik <jgarzik@pobox.com>
   Date: Thu, 26 Sep 2002 03:33:18 -0400
   
   Just dug up the URL, in case anybody is interested:
   http://www.feral.com/isp.html

Note there is a bitkeeper tree to pull from even :-)

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:33               ` Jeff Garzik
  2002-09-26  7:35                 ` David S. Miller
@ 2002-09-26  7:41                 ` Jeff Garzik
  1 sibling, 0 replies; 42+ messages in thread
From: Jeff Garzik @ 2002-09-26  7:41 UTC (permalink / raw)
  To: linux-kernel; +Cc: David S. Miller, wli, axboe, akpm, patman, andmike

Jeff Garzik wrote:
> Just dug up the URL, in case anybody is interested:
> http://www.feral.com/isp.html

And I just noticed this:

The QLogic driver bundle is also now available under read-only BitKeeper 
(see http://www.bitkeeper.com for information). The BK URL is: 
bk://bitkeeper.feral.com:9002.


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  7:35                 ` David S. Miller
@ 2002-09-26  8:15                   ` Michael Clark
  2002-09-26  8:18                     ` William Lee Irwin III
                                       ` (2 more replies)
  0 siblings, 3 replies; 42+ messages in thread
From: Michael Clark @ 2002-09-26  8:15 UTC (permalink / raw)
  To: David S. Miller; +Cc: jgarzik, wli, axboe, akpm, linux-kernel, patman, andmike



On 09/26/02 15:35, David S. Miller wrote:
>    From: Jeff Garzik <jgarzik@pobox.com>
>    Date: Thu, 26 Sep 2002 03:33:18 -0400
>    
>    Just dug up the URL, in case anybody is interested:
>    http://www.feral.com/isp.html

Would be nice to have a stable qlogic driver in the main kernel.

Although last time i tried Matt Jabob's driver, it locked up
after 30 seconds of running bonnie. At least with Qlogic's
driver I can run bonnie and cerberus continuously for 2 weeks
with no problems (although this may have been because
Matt's driver ignored the command queue throttle set in the
qlogic cards BIOS).

> Note there is a bitkeeper tree to pull from even :-)

The qlogic HBAs are a real problem in choosing which driver
to use out of:

in kernel qlogicfc
Qlogic's qla2x00 v4.x, v5.x, v6.x
Matthew Jacob's isp_mod

What are people out there using with their QLA 2200/2300s?

~mc


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  8:15                   ` Michael Clark
@ 2002-09-26  8:18                     ` William Lee Irwin III
  2002-09-26 17:41                     ` Mike Anderson
  2002-09-26 20:21                     ` Thomas Tonino
  2 siblings, 0 replies; 42+ messages in thread
From: William Lee Irwin III @ 2002-09-26  8:18 UTC (permalink / raw)
  To: Michael Clark
  Cc: David S. Miller, jgarzik, axboe, akpm, linux-kernel, patman, andmike

On Thu, Sep 26, 2002 at 04:15:02PM +0800, Michael Clark wrote:
> What are people out there using with their QLA 2200/2300s?
> ~mc

I'm using the qla 61b5 release of the qla2xxx on a qla2310.
I've not tried Matt Jacob's drivers.


Cheers,
Bil

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  6:44   ` Jens Axboe
  2002-09-26  6:59     ` Jens Axboe
@ 2002-09-26  8:28     ` Daniel Pittman
  2002-09-26  8:29       ` Jens Axboe
  2002-09-26 15:09       ` Rik van Riel
  1 sibling, 2 replies; 42+ messages in thread
From: Daniel Pittman @ 2002-09-26  8:28 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Linux Kernel

On Thu, 26 Sep 2002, Jens Axboe wrote:
> On Wed, Sep 25 2002, Andrew Morton wrote:

[...]

> writes_starved. This controls how many times reads get preferred over
> writes. The default is 2, which means that we can serve two batches of
> reads over one write batch. A value of 4 would mean that reads could
> skip ahead of writes 4 times. A value of 1 would give you 1:1
> read:write, ie no read preference. A silly value of 0 would give you
> write preference, always.

Actually, a value of zero doesn't sound completely silly to me, right
now, since I have been doing a lot of thinking about video capture
recently.

How much is it going to hurt a filesystem like ext[23] if that value is
set to zero while doing large streaming writes -- something like
(almost) uncompressed video at ten to twenty meg a second, for
gigabytes?

This is a situation where, for a dedicated machine, delaying reads
almost forever is actually a valuable thing. At least, valuable until it
stops the writes from being able to proceed.

      Daniel

-- 
The best way to get a bad law repealed is to enforce it strictly.
        -- Abraham Lincoln

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  8:28     ` Daniel Pittman
@ 2002-09-26  8:29       ` Jens Axboe
  2002-09-26 23:23         ` Daniel Pittman
  2002-09-26 15:09       ` Rik van Riel
  1 sibling, 1 reply; 42+ messages in thread
From: Jens Axboe @ 2002-09-26  8:29 UTC (permalink / raw)
  To: Daniel Pittman; +Cc: Linux Kernel

On Thu, Sep 26 2002, Daniel Pittman wrote:
> On Thu, 26 Sep 2002, Jens Axboe wrote:
> > On Wed, Sep 25 2002, Andrew Morton wrote:
> 
> [...]
> 
> > writes_starved. This controls how many times reads get preferred over
> > writes. The default is 2, which means that we can serve two batches of
> > reads over one write batch. A value of 4 would mean that reads could
> > skip ahead of writes 4 times. A value of 1 would give you 1:1
> > read:write, ie no read preference. A silly value of 0 would give you
> > write preference, always.
> 
> Actually, a value of zero doesn't sound completely silly to me, right
> now, since I have been doing a lot of thinking about video capture
> recently.
> 
> How much is it going to hurt a filesystem like ext[23] if that value is
> set to zero while doing large streaming writes -- something like
> (almost) uncompressed video at ten to twenty meg a second, for
> gigabytes?

You are going to stalll all reads indefinately :-)

> This is a situation where, for a dedicated machine, delaying reads
> almost forever is actually a valuable thing. At least, valuable until it
> stops the writes from being able to proceed.

Well 0 should achieve that quite fine

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  8:28     ` Daniel Pittman
  2002-09-26  8:29       ` Jens Axboe
@ 2002-09-26 15:09       ` Rik van Riel
  1 sibling, 0 replies; 42+ messages in thread
From: Rik van Riel @ 2002-09-26 15:09 UTC (permalink / raw)
  To: Daniel Pittman; +Cc: Jens Axboe, Linux Kernel

On Thu, 26 Sep 2002, Daniel Pittman wrote:

> > read:write, ie no read preference. A silly value of 0 would give you
> > write preference, always.

> How much is it going to hurt a filesystem like ext[23] if that value is
> set to zero while doing large streaming writes -- something like
> (almost) uncompressed video at ten to twenty meg a second, for
> gigabytes?

It depends, if you've got 2 video streams to the same
filesystem and one needs to read a block bitmap in order
to allocate more disk blocks you lose...

regards,

Rik
-- 
A: No.
Q: Should I include quotations after my reply?

http://www.surriel.com/		http://distro.conectiva.com/


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  6:59     ` Jens Axboe
  2002-09-26  7:06       ` William Lee Irwin III
@ 2002-09-26 15:54       ` Patrick Mansfield
  2002-09-30  8:15         ` Jens Axboe
  1 sibling, 1 reply; 42+ messages in thread
From: Patrick Mansfield @ 2002-09-26 15:54 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, Linux Kernel

On Thu, Sep 26, 2002 at 08:59:51AM +0200, Jens Axboe wrote:
> On Thu, Sep 26 2002, Jens Axboe wrote:
> BTW, for SCSI, it would be nice to first convert more drivers to use the
> block level queued tagging. That would provide us with a much better
> means to control starvation properly on SCSI as well.
> 
> -- 
> Jens Axboe

I haven't look closely at the block tagging, but for the FCP protocol,
there are no tags, just the type of queueing to use (task attributes)
- like ordered, head of queue, untagged, and some others. The tagging
is normally done on the adapter itself (FCP2 protocol AFAIK). Does this
mean block level queued tagging can't help FCP?

Maybe the same for iSCSI, other protocols, and pseudo adapters -
usb, ide, and raid adapters.

-- Patrick Mansfield

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  8:15                   ` Michael Clark
  2002-09-26  8:18                     ` William Lee Irwin III
@ 2002-09-26 17:41                     ` Mike Anderson
  2002-09-26 18:03                       ` Jeff Garzik
  2002-09-26 20:21                     ` Thomas Tonino
  2 siblings, 1 reply; 42+ messages in thread
From: Mike Anderson @ 2002-09-26 17:41 UTC (permalink / raw)
  To: Michael Clark
  Cc: David S. Miller, jgarzik, wli, axboe, akpm, linux-kernel, patman

Michael Clark [michael@metaparadigm.com] wrote:
> The qlogic HBAs are a real problem in choosing which driver
> to use out of:
> 
> in kernel qlogicfc
> Qlogic's qla2x00 v4.x, v5.x, v6.x
> Matthew Jacob's isp_mod
> 

We have had good results using the Qlogic's driver. We are currently
running the v6.x version with Failover tunred off on 23xx cards. We have
run a lot on > 4GB systems also.

-andmike
--
Michael Anderson
andmike@us.ibm.com


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26 17:41                     ` Mike Anderson
@ 2002-09-26 18:03                       ` Jeff Garzik
  2002-09-26 19:21                         ` Mike Anderson
  2002-09-26 22:41                         ` Matt Porter
  0 siblings, 2 replies; 42+ messages in thread
From: Jeff Garzik @ 2002-09-26 18:03 UTC (permalink / raw)
  To: Mike Anderson
  Cc: Michael Clark, David S. Miller, wli, axboe, akpm, linux-kernel, patman

Mike Anderson wrote:
> We have had good results using the Qlogic's driver. We are currently
> running the v6.x version with Failover tunred off on 23xx cards. We have
> run a lot on > 4GB systems also.


Has anybody put work into cleaning this driver up?

The word from kernel hackers that work on it is, they would rather write 
a new driver than spend weeks cleaning it up :/


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26 18:03                       ` Jeff Garzik
@ 2002-09-26 19:21                         ` Mike Anderson
  2002-09-27  5:41                           ` Andrew Vasquez
  2002-09-26 22:41                         ` Matt Porter
  1 sibling, 1 reply; 42+ messages in thread
From: Mike Anderson @ 2002-09-26 19:21 UTC (permalink / raw)
  To: Jeff Garzik
  Cc: Michael Clark, David S. Miller, wli, axboe, akpm, linux-kernel,
	patmans, andrew.vasquez

Jeff Garzik [jgarzik@pobox.com] wrote:
> Has anybody put work into cleaning this driver up?
> 
> The word from kernel hackers that work on it is, they would rather write 
> a new driver than spend weeks cleaning it up :/
> 

Andrew Vasquez from Qlogic can provide more detailed comments on deltas
between the versions of the driver.

The v6.x driver is cleaner and supporting newer kernel interfaces than
past versions.


-andmike
--
Michael Anderson
andmike@us.ibm.com


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  8:15                   ` Michael Clark
  2002-09-26  8:18                     ` William Lee Irwin III
  2002-09-26 17:41                     ` Mike Anderson
@ 2002-09-26 20:21                     ` Thomas Tonino
  2 siblings, 0 replies; 42+ messages in thread
From: Thomas Tonino @ 2002-09-26 20:21 UTC (permalink / raw)
  To: linux-kernel

Michael Clark wrote:

> Although last time i tried Matt Jabob's driver, it locked up
> after 30 seconds of running bonnie. At least with Qlogic's
> driver I can run bonnie and cerberus continuously for 2 weeks
> with no problems (although this may have been because
> Matt's driver ignored the command queue throttle set in the
> qlogic cards BIOS).

My excerience with a JBOD box is the in kernel driver locking up with the "no 
handle slots, this should not happen" message in half an hour running a 4 MB/sec 
write load.

Then tried the feral.com driver. That one was stable with the same load. Ran 
that one for a month or two.

Then came along the highio patch in -AA. Made me want to switch to the in kernel 
qlogic driver again. This was a good time to try a patch by Andrew Patterson, 
AFAIR upping the number of slots to 255 and fixing the calculations around them. 
This has been running without problems for a few months now.

The patch has been posted to the list. It can be found at 
http://groups.google.com/groups?selm=linux.scsi.1019759258.2413.1.camel%40lvadp.fc.hp.com

> The qlogic HBAs are a real problem in choosing which driver
> to use out of:
> 
> in kernel qlogicfc
> Qlogic's qla2x00 v4.x, v5.x, v6.x
> Matthew Jacob's isp_mod

I never tried Qlogic's driver, probably because of all the versions floating around.


Thomas



^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26 22:41                         ` Matt Porter
@ 2002-09-26 22:35                           ` Mark Bellon
  0 siblings, 0 replies; 42+ messages in thread
From: Mark Bellon @ 2002-09-26 22:35 UTC (permalink / raw)
  To: Matt Porter
  Cc: Jeff Garzik, Mike Anderson, Michael Clark, David S. Miller, wli,
	axboe, akpm, linux-kernel, patman

Matt Porter wrote:

>On Thu, Sep 26, 2002 at 02:03:19PM -0400, Jeff Garzik wrote:
>  
>
>>Mike Anderson wrote:
>>    
>>
>>>We have had good results using the Qlogic's driver. We are currently
>>>running the v6.x version with Failover tunred off on 23xx cards. We have
>>>run a lot on > 4GB systems also.
>>>      
>>>
>>Has anybody put work into cleaning this driver up?
>>
>>The word from kernel hackers that work on it is, they would rather write 
>>a new driver than spend weeks cleaning it up :/
>>    
>>
>
>I added Mark Bellon to this since he has spent a lot of time working
>with QLogic to get this cleaned up for the OSDL tree.  He can probably
>address some specific questions.
>
I fought with them for quite some time to get the major rewrite that 
occured between
level 5 and level 6. The level 6 driver in TLT and OSDL is a version 
that has many of
my suggestions and a few enhancements in it. It is "much better than a 
stick in the eye".
We should now be in sync with their releases. I haven't looked recently 
to see if there
is something newer than the one I checked in.

It still has a long way to go. I have threatened to rewrite it more than 
once. However,
there is a plan to get Qlogic to do all of this and the presentation 
keeps getting put off.
It needs to be rewritten for the "so called" hardend driver stuff and 
that would be a
good juncture to make the rewrite happen.

Hacking the driver source is next to useless - Qlogic releases from 
their own tree
constantly. They are OK about taking things back but are a bit slow.

I can help with any suggested changes and cleanups.

mark




^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26 18:03                       ` Jeff Garzik
  2002-09-26 19:21                         ` Mike Anderson
@ 2002-09-26 22:41                         ` Matt Porter
  2002-09-26 22:35                           ` Mark Bellon
  1 sibling, 1 reply; 42+ messages in thread
From: Matt Porter @ 2002-09-26 22:41 UTC (permalink / raw)
  To: Jeff Garzik
  Cc: Mike Anderson, Michael Clark, David S. Miller, wli, axboe, akpm,
	linux-kernel, patman, mbellon

On Thu, Sep 26, 2002 at 02:03:19PM -0400, Jeff Garzik wrote:
> Mike Anderson wrote:
> > We have had good results using the Qlogic's driver. We are currently
> > running the v6.x version with Failover tunred off on 23xx cards. We have
> > run a lot on > 4GB systems also.
> 
> 
> Has anybody put work into cleaning this driver up?
> 
> The word from kernel hackers that work on it is, they would rather write 
> a new driver than spend weeks cleaning it up :/

I added Mark Bellon to this since he has spent a lot of time working
with QLogic to get this cleaned up for the OSDL tree.  He can probably
address some specific questions.

Regards,
-- 
Matt Porter
porter@cox.net
This is Linux Country. On a quiet night, you can hear Windows reboot.

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26  8:29       ` Jens Axboe
@ 2002-09-26 23:23         ` Daniel Pittman
  2002-09-30  8:10           ` Jens Axboe
  0 siblings, 1 reply; 42+ messages in thread
From: Daniel Pittman @ 2002-09-26 23:23 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Linux Kernel

On Thu, 26 Sep 2002, Jens Axboe wrote:
> On Thu, Sep 26 2002, Daniel Pittman wrote:
>> On Thu, 26 Sep 2002, Jens Axboe wrote:
>> > On Wed, Sep 25 2002, Andrew Morton wrote:
>> 
>> [...]
>> 
>> > writes_starved. This controls how many times reads get preferred
>> > over writes. The default is 2, which means that we can serve two
>> > batches of reads over one write batch. A value of 4 would mean that
>> > reads could skip ahead of writes 4 times. A value of 1 would give
>> > you 1:1 read:write, ie no read preference. A silly value of 0 would
>> > give you write preference, always.
>> 
>> Actually, a value of zero doesn't sound completely silly to me, right
>> now, since I have been doing a lot of thinking about video capture
>> recently.
>> 
>> How much is it going to hurt a filesystem like ext[23] if that value
>> is set to zero while doing large streaming writes -- something like
>> (almost) uncompressed video at ten to twenty meg a second, for
>> gigabytes?
> 
> You are going to stalll all reads indefinately :-)

Which has some potentially fatal consequences, really, if any of the
capture code gets paged out before the streaming write starts, or if the
filesystem needs to read a bitmap block or so, as Rik points out.

>> This is a situation where, for a dedicated machine, delaying reads
>> almost forever is actually a valuable thing. At least, valuable until
>> it stops the writes from being able to proceed.
> 
> Well 0 should achieve that quite fine

Would you consider allowing something akin to 'writes_starved = -4' to
allow writes to bypass reads only 4 times -- a preference for writes,
but not forever?

That's going to express the bias I (think I) want for this case, but
it's not going to be able to stall a read forever...

     Daniel

-- 
It is quite humbling to realize that the storage occupied by the longest line
from a typical Usenet posting is sufficient to provide a state space so vast
that all the computation power in the world can not conquer it.
        -- Dave Wallace

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26 19:21                         ` Mike Anderson
@ 2002-09-27  5:41                           ` Andrew Vasquez
  2002-09-27  5:57                             ` Jeff Garzik
  0 siblings, 1 reply; 42+ messages in thread
From: Andrew Vasquez @ 2002-09-27  5:41 UTC (permalink / raw)
  To: Mike Anderson
  Cc: Jeff Garzik, Michael Clark, David S. Miller, wli, axboe, akpm,
	linux-kernel, patmans, andrew.vasquez

[-- Attachment #1: Type: text/plain, Size: 3670 bytes --]

On Thu, 26 Sep 2002, Mike Anderson wrote:

> Jeff Garzik [jgarzik@pobox.com] wrote:
> > Has anybody put work into cleaning this driver up?
> > 
> > The word from kernel hackers that work on it is, they would rather write 
> > a new driver than spend weeks cleaning it up :/
> > 
> 
> Andrew Vasquez from Qlogic can provide more detailed comments on deltas
> between the versions of the driver.
> 
> The v6.x driver is cleaner and supporting newer kernel interfaces than
> past versions.
> 
All,

I believe we had made some significant progress over the past few
months at delivering a reasonably stable and maintainable device
driver for ISP2100/ISP22xx/ISP23xx chips.  Our original goals for the
6.x series driver included:

	o Stability
		- Failover
		- Fabric topologies
		- Kernel interface integrations

	o Maintainability
		- Code sanitization!
		- Strip dead-code and support for kernels < 2.4.

	o Feature integrations
		- Fast-path streamlining
		- RIO/ZIO
		- IP via FC
		- ISP2100 support 
		- ...

Note:
Most if not all of Arjan van de Ven's (Redhat) changes that are in
later RH kernel errata releases (addon/qla2200) have made it into the
6.x series code.  Much thanks goes out to Arjan for his work, not just
at the technical level, but also, the impact his work had on reshaping
the landscape of attitudes and direction within the Linux Driver Group
at QLogic.

The formal release of 6.01.00 has been completed and should be available
for download 'real soon now' (as it appears 6.01b5 is still the latest
6.x series driver available) -- package has been forwarded to the
website maintainer.  Notable changes from the 6.00 release include:

	o ISP2100 support

	o IP via FC support (RFC 2625)

	o General code-sanitizing
		- locking structures
		- queue structures
		- extraneous NOP*LOCK/UNLOCK macros
		- remove old EH routines
		- remove serial console routines

	o Bug-fixes.

Our current mode of operation for 6.x series work is, choose a driver
subsystem (i.e. command posting, fabric support, failover, or
post-command processing), rehash assuptions and requirements, review
design and role in driver, retool or reimplement, and test.  For
example, changes made to the source-tip since 6.01 include:

	o A complete rewrite of qla2x00_64bit_start_scsi()
		- fix 4gb page boundary limitation
		- correct endian-ness during IOCB preperation
		- simplification

	o Additional 64bit DMA fixes

	o Additional 2.5 support fixes

	o More bug-fixes

There is still alot of challenging work to be done, and perhaps now
would be a good time to ask the community, what they need and would
like to see happen with the QLogic driver?  I'll start of with a brief
list of 'important' TODOs we've compiled:

	o Interrupt handler cleanup

	o ZIO/RIO patches for 23xx

	o Continue support for kernel 2.5 and above

	o Adding support for PCI-HOT plug
		- complete pci_driver interface

	o Fabric management
		- Use login-IOCBs instead of mailbox command
		- GNFT support

	o SNIA API support (version 2.0)

	o Complete command posting module.

	o Alternative persistent binding method (non modules.conf based)

	o Failover processing simplification

	o VI support

I hope this helps to clearup some of the haze and ambiguity
surrounding QLogic's work with the 6.x series driver, and perhaps
at the same time, prepares a medium for discussion regarding the 6.x
series driver.

-- 
Andrew Vasquez | praka@san.rr.com |
        I prefer an accomidating vice to an obstinate virtue
DSS: 0x508316BB, FP: 79BD 4FAC 7E82 FF70 6C2B  7E8B 168F 5529 5083 16BB

[-- Attachment #2: Type: application/pgp-signature, Size: 254 bytes --]

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-27  5:41                           ` Andrew Vasquez
@ 2002-09-27  5:57                             ` Jeff Garzik
  2002-09-27 16:58                               ` Mike Anderson
  0 siblings, 1 reply; 42+ messages in thread
From: Jeff Garzik @ 2002-09-27  5:57 UTC (permalink / raw)
  To: Andrew Vasquez
  Cc: Mike Anderson, Michael Clark, David S. Miller, wli, axboe, akpm,
	linux-kernel, patmans, andrew.vasquez

Andrew Vasquez wrote:
> I hope this helps to clearup some of the haze and ambiguity
> surrounding QLogic's work with the 6.x series driver, and perhaps
> at the same time, prepares a medium for discussion regarding the 6.x
> series driver.


Wow, thanks for all that information, and it's great that you've 
integrated Arjan's work and feedback.

There is one big question left unanswered...  Where can the source for 
the latest version with all this wonderful stuff be found?  :)  I don't 
see a URL even for 6.01b5.

	Jeff




^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-27  5:57                             ` Jeff Garzik
@ 2002-09-27 16:58                               ` Mike Anderson
  0 siblings, 0 replies; 42+ messages in thread
From: Mike Anderson @ 2002-09-27 16:58 UTC (permalink / raw)
  To: Jeff Garzik
  Cc: Andrew Vasquez, Michael Clark, David S. Miller, wli, axboe, akpm,
	linux-kernel, patmans, andrew.vasquez

Jeff Garzik [jgarzik@pobox.com] wrote:
> Wow, thanks for all that information, and it's great that you've 
> integrated Arjan's work and feedback.
> 
> There is one big question left unanswered...  Where can the source for 
> the latest version with all this wonderful stuff be found?  :)  I don't 
> see a URL even for 6.01b5.

I case you already did not get the url.

	http://download.qlogic.com/drivers/5642/qla2x00-v6.1b5-dist.tgz

-andmike
--
Michael Anderson
andmike@us.ibm.com


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-25 17:20 [PATCH] deadline io scheduler Jens Axboe
  2002-09-26  6:15 ` Andrew Morton
@ 2002-09-30  7:45 ` Pavel Machek
  2002-10-02  5:35   ` Jens Axboe
  1 sibling, 1 reply; 42+ messages in thread
From: Pavel Machek @ 2002-09-30  7:45 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Linux Kernel

Hi!

> Due to recent "problems" (well the vm being just too damn good at keep
> disks busy these days), it's become even more apparent that our current
> io scheduler just cannot cope with some work loads. Repeated starvartion
> of reads is the most important one. The Andrew Morton Interactive
> Workload (AMIW) [1] rates the current kernel poorly, on my test machine
> it completes in 1-2 minutes depending on your luck. 2.5.38-BK does a lot
> better, but mainly because it's being extremely unfair. This deadline io
> scheduler finishes the AMIW in anywhere from ~0.5 seconds to ~3-4
> seconds, depending on the io load.

would it be possible to make deadlines per-process to introduce ionice?

ionice -n -5 mpg123 foo.mp3
ionice make

?								Pavel

-- 
Philips Velo 1: 1"x4"x8", 300gram, 60, 12MB, 40bogomips, linux, mutt,
details at http://atrey.karlin.mff.cuni.cz/~pavel/velo/index.html.


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26 23:23         ` Daniel Pittman
@ 2002-09-30  8:10           ` Jens Axboe
  0 siblings, 0 replies; 42+ messages in thread
From: Jens Axboe @ 2002-09-30  8:10 UTC (permalink / raw)
  To: Daniel Pittman; +Cc: Linux Kernel

On Fri, Sep 27 2002, Daniel Pittman wrote:
> >> This is a situation where, for a dedicated machine, delaying reads
> >> almost forever is actually a valuable thing. At least, valuable until
> >> it stops the writes from being able to proceed.
> > 
> > Well 0 should achieve that quite fine
> 
> Would you consider allowing something akin to 'writes_starved = -4' to
> allow writes to bypass reads only 4 times -- a preference for writes,
> but not forever?

Sure yes, that would be an acceptable solution.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-26 15:54       ` Patrick Mansfield
@ 2002-09-30  8:15         ` Jens Axboe
  2002-09-30 15:39           ` Patrick Mansfield
  0 siblings, 1 reply; 42+ messages in thread
From: Jens Axboe @ 2002-09-30  8:15 UTC (permalink / raw)
  To: Andrew Morton, Linux Kernel

On Thu, Sep 26 2002, Patrick Mansfield wrote:
> On Thu, Sep 26, 2002 at 08:59:51AM +0200, Jens Axboe wrote:
> > On Thu, Sep 26 2002, Jens Axboe wrote:
> > BTW, for SCSI, it would be nice to first convert more drivers to use the
> > block level queued tagging. That would provide us with a much better
> > means to control starvation properly on SCSI as well.
> > 
> > -- 
> > Jens Axboe
> 
> I haven't look closely at the block tagging, but for the FCP protocol,
> there are no tags, just the type of queueing to use (task attributes)
> - like ordered, head of queue, untagged, and some others. The tagging
> is normally done on the adapter itself (FCP2 protocol AFAIK). Does this
> mean block level queued tagging can't help FCP?

The generic block level tagging is nothing more than tag management. It
can 'tag' a request (assigning it an integer tag), and later let you
locate that request by giving it the tag.

I suspect you need none of that for FCP. Instead it looks more like you
can set the task attributes based on the type of request itself. So you
would currently set 'ordered' for a request with REQ_BARRIER set. And
you could set 'head of queue' for REQ_URGENT (I'm making this one up
:-), etc.

Do you need any request management to deal with FCP queueing? It doesn't
sound like it.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-30  8:15         ` Jens Axboe
@ 2002-09-30 15:39           ` Patrick Mansfield
  2002-09-30 16:08             ` Jens Axboe
  0 siblings, 1 reply; 42+ messages in thread
From: Patrick Mansfield @ 2002-09-30 15:39 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, Linux Kernel

On Mon, Sep 30, 2002 at 10:15:22AM +0200, Jens Axboe wrote:
> On Thu, Sep 26 2002, Patrick Mansfield wrote:

> > I haven't look closely at the block tagging, but for the FCP protocol,
> > there are no tags, just the type of queueing to use (task attributes)
> > - like ordered, head of queue, untagged, and some others. The tagging
> > is normally done on the adapter itself (FCP2 protocol AFAIK). Does this
> > mean block level queued tagging can't help FCP?
> 
> The generic block level tagging is nothing more than tag management. It
> can 'tag' a request (assigning it an integer tag), and later let you
> locate that request by giving it the tag.
> 
> I suspect you need none of that for FCP. Instead it looks more like you
> can set the task attributes based on the type of request itself. So you
> would currently set 'ordered' for a request with REQ_BARRIER set. And
> you could set 'head of queue' for REQ_URGENT (I'm making this one up
> :-), etc.
> 
> Do you need any request management to deal with FCP queueing? It doesn't
> sound like it.

No.

OK I understand it now - if someone wants to put barrier support in an FCP
adapter driver something like we have in scsi_populate_tag_msg() would be
useful, an inline or macro like:

static inline int scsi_is_ordered(Scsi_Cmnd *SCpnt)
{
	if (SCpnt->request->flags & REQ_BARRIER)
		return 1;
	else
		return 0;
}

-- Patrick Mansfield

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-30 15:39           ` Patrick Mansfield
@ 2002-09-30 16:08             ` Jens Axboe
  0 siblings, 0 replies; 42+ messages in thread
From: Jens Axboe @ 2002-09-30 16:08 UTC (permalink / raw)
  To: Patrick Mansfield; +Cc: Andrew Morton, Linux Kernel

On Mon, Sep 30 2002, Patrick Mansfield wrote:
> On Mon, Sep 30, 2002 at 10:15:22AM +0200, Jens Axboe wrote:
> > On Thu, Sep 26 2002, Patrick Mansfield wrote:
> 
> > > I haven't look closely at the block tagging, but for the FCP protocol,
> > > there are no tags, just the type of queueing to use (task attributes)
> > > - like ordered, head of queue, untagged, and some others. The tagging
> > > is normally done on the adapter itself (FCP2 protocol AFAIK). Does this
> > > mean block level queued tagging can't help FCP?
> > 
> > The generic block level tagging is nothing more than tag management. It
> > can 'tag' a request (assigning it an integer tag), and later let you
> > locate that request by giving it the tag.
> > 
> > I suspect you need none of that for FCP. Instead it looks more like you
> > can set the task attributes based on the type of request itself. So you
> > would currently set 'ordered' for a request with REQ_BARRIER set. And
> > you could set 'head of queue' for REQ_URGENT (I'm making this one up
> > :-), etc.
> > 
> > Do you need any request management to deal with FCP queueing? It doesn't
> > sound like it.
> 
> No.
> 
> OK I understand it now - if someone wants to put barrier support in an FCP
> adapter driver something like we have in scsi_populate_tag_msg() would be
> useful, an inline or macro like:
> 
> static inline int scsi_is_ordered(Scsi_Cmnd *SCpnt)
> {
> 	if (SCpnt->request->flags & REQ_BARRIER)
> 		return 1;
> 	else
> 		return 0;
> }

Exactly

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-30  7:45 ` Pavel Machek
@ 2002-10-02  5:35   ` Jens Axboe
  0 siblings, 0 replies; 42+ messages in thread
From: Jens Axboe @ 2002-10-02  5:35 UTC (permalink / raw)
  To: Pavel Machek; +Cc: Linux Kernel

On Mon, Sep 30 2002, Pavel Machek wrote:
> Hi!
> 
> > Due to recent "problems" (well the vm being just too damn good at keep
> > disks busy these days), it's become even more apparent that our current
> > io scheduler just cannot cope with some work loads. Repeated starvartion
> > of reads is the most important one. The Andrew Morton Interactive
> > Workload (AMIW) [1] rates the current kernel poorly, on my test machine
> > it completes in 1-2 minutes depending on your luck. 2.5.38-BK does a lot
> > better, but mainly because it's being extremely unfair. This deadline io
> > scheduler finishes the AMIW in anywhere from ~0.5 seconds to ~3-4
> > seconds, depending on the io load.
> 
> would it be possible to make deadlines per-process to introduce ionice?
> 
> ionice -n -5 mpg123 foo.mp3
> ionice make

Yes it would be possible, and at least for reads it doesn't require too
many changes to the deadline scheduler. There's even someone working on
it, expect something to play with soon. It bases the io priority on the
process nice levels.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH] deadline io scheduler
  2002-09-27 16:01 Andrew Vasquez
@ 2002-09-27 17:07 ` Mike Anderson
  0 siblings, 0 replies; 42+ messages in thread
From: Mike Anderson @ 2002-09-27 17:07 UTC (permalink / raw)
  To: Andrew Vasquez
  Cc: Jeff Garzik, Andrew Vasquez, Michael Clark, David S. Miller, wli,
	axboe, akpm, linux-kernel, patmans

Andrew Vasquez [andrew.vasquez@qlogic.com] wrote:
> In my mind, a larger question is determining a balance between the 
> 'Release Early, release often' mantra of Linux development and the 
> 'kinder, more conservative pace' of business.  For example, If we 
> cannot setup a 'patch/pre-beta' web-site locally at QLogic, I've 
> considered starting a SourceForge project or hosting it locally 
> through my ISP. 

Currently the release method of not having patches against older
releases of the driver or even an archive of the full older release has
resulted in having others duplicate this functionality.

It would be great if you could set up a patch site.


-andmike
--
Michael Anderson
andmike@us.ibm.com


^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [PATCH] deadline io scheduler
@ 2002-09-27 16:01 Andrew Vasquez
  2002-09-27 17:07 ` Mike Anderson
  0 siblings, 1 reply; 42+ messages in thread
From: Andrew Vasquez @ 2002-09-27 16:01 UTC (permalink / raw)
  To: Jeff Garzik, Andrew Vasquez
  Cc: Mike Anderson, Michael Clark, David S. Miller, wli, axboe, akpm,
	linux-kernel, patmans

> Andrew Vasquez wrote:
> > I hope this helps to clearup some of the haze and ambiguity
> > surrounding QLogic's work with the 6.x series driver, and perhaps
> > at the same time, prepares a medium for discussion regarding the 6.x
> > series driver.
> 
> Wow, thanks for all that information, and it's great that you've 
> integrated Arjan's work and feedback.
> 
> There is one big question left unanswered...  Where can the 
> source for 
> the latest version with all this wonderful stuff be found?  
> :)  I don't 
> see a URL even for 6.01b5.
> 
Sure, the 6.01b5 tarball can be found at:

	http://download.qlogic.com/drivers/5642/qla2x00-v6.1b5-dist.tgz

In general all QLogic drivers are available from the following URL:

	http://www.qlogic.com/support/drivers_software.asp

In my mind, a larger question is determining a balance between the 
'Release Early, release often' mantra of Linux development and the 
'kinder, more conservative pace' of business.  For example, If we 
cannot setup a 'patch/pre-beta' web-site locally at QLogic, I've 
considered starting a SourceForge project or hosting it locally 
through my ISP. 

Regards,
Andrew Vasquez

^ permalink raw reply	[flat|nested] 42+ messages in thread

end of thread, other threads:[~2002-10-02  5:29 UTC | newest]

Thread overview: 42+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2002-09-25 17:20 [PATCH] deadline io scheduler Jens Axboe
2002-09-26  6:15 ` Andrew Morton
2002-09-26  6:27   ` David S. Miller
2002-09-26  6:44   ` Jens Axboe
2002-09-26  6:59     ` Jens Axboe
2002-09-26  7:06       ` William Lee Irwin III
2002-09-26  7:06         ` David S. Miller
2002-09-26  7:16           ` Jeff Garzik
2002-09-26  7:13             ` David S. Miller
2002-09-26  7:33               ` Jeff Garzik
2002-09-26  7:35                 ` David S. Miller
2002-09-26  8:15                   ` Michael Clark
2002-09-26  8:18                     ` William Lee Irwin III
2002-09-26 17:41                     ` Mike Anderson
2002-09-26 18:03                       ` Jeff Garzik
2002-09-26 19:21                         ` Mike Anderson
2002-09-27  5:41                           ` Andrew Vasquez
2002-09-27  5:57                             ` Jeff Garzik
2002-09-27 16:58                               ` Mike Anderson
2002-09-26 22:41                         ` Matt Porter
2002-09-26 22:35                           ` Mark Bellon
2002-09-26 20:21                     ` Thomas Tonino
2002-09-26  7:41                 ` Jeff Garzik
2002-09-26  7:23           ` William Lee Irwin III
2002-09-26  7:11         ` Jeff Garzik
2002-09-26  7:14           ` William Lee Irwin III
2002-09-26 15:54       ` Patrick Mansfield
2002-09-30  8:15         ` Jens Axboe
2002-09-30 15:39           ` Patrick Mansfield
2002-09-30 16:08             ` Jens Axboe
2002-09-26  8:28     ` Daniel Pittman
2002-09-26  8:29       ` Jens Axboe
2002-09-26 23:23         ` Daniel Pittman
2002-09-30  8:10           ` Jens Axboe
2002-09-26 15:09       ` Rik van Riel
2002-09-26  7:12   ` Andrew Morton
2002-09-26  7:17     ` Jens Axboe
2002-09-26  7:34     ` Jens Axboe
2002-09-30  7:45 ` Pavel Machek
2002-10-02  5:35   ` Jens Axboe
2002-09-27 16:01 Andrew Vasquez
2002-09-27 17:07 ` Mike Anderson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).