All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vivek Goyal <vgoyal@redhat.com>
To: linux-kernel@vger.kernel.org,
	containers@lists.linux-foundation.org, dm-devel@redhat.com,
	jens.axboe@oracle.com, nauman@google.com, dpshah@google.com,
	lizf@cn.fujitsu.com, mikew@google.com, fchecconi@gmail.com,
	paolo.valente@unimore.it, ryov@valinux.co.jp,
	fernando@oss.ntt.co.jp, s-uchida@ap.jp.nec.com,
	taka@valinux.co.jp, guijianfeng@cn.fujitsu.com,
	jmoyer@redhat.com, dhaval@linux.vnet.ibm.com,
	balbir@linux.vnet.ibm.com, righi.andrea@gmail.com,
	m-ikeda@ds.jp.nec.com, jbaron@redhat.com
Cc: agk@redhat.com, snitzer@redhat.com, vgoyal@redhat.com,
	akpm@linux-foundation.org, peterz@infradead.org
Subject: [PATCH 20/20] io-controller: experimental debug patch for async queue wait before expiry
Date: Tue, 26 May 2009 18:42:09 -0400	[thread overview]
Message-ID: <1243377729-2176-21-git-send-email-vgoyal@redhat.com> (raw)
In-Reply-To: <1243377729-2176-1-git-send-email-vgoyal@redhat.com>

o A debug patch which does wait for next IO from async queue once it
  becomes empty.

o For async writes, traffic seen by IO scheduler is not in proportion to
  the weight of the cgroup task/page belongs to. So if there are two processes
  doing heavy writeouts in two cgroups with weights 1000 and 500 respectively,
  then IO scheduler does not see more traffic/IO from higher weight cgroup
  even if IO scheduler tries to give it higher disk time. Effectively, the
  async queue belonging to higher weight cgroup becomes empty, and gets out
  of contention for disk and lower weight cgroup gets to use disk giving
  an impression in user space that higher weight cgroup did not get higher
  time to disk.

o This is more of a problem at page cache level where a higher weight
  process might be writing out the pages of lower weight process etc and
  should be fixed there.

o While we fix those issues, introducing this debug patch which allows one
  to idle on async queue (tunable via /sys/blolc/<disk>/queue/async_slice_idle)  so that once a higher weight queue becomes empty, instead of expiring it
  we try to wait for next request to come from that queue hence giving it
  higher disk time. A higher value of async_slice_idle, around 300ms, helps
  me get some right numbers for my setup. Note: higher disk time would not
  necessarily translate in more IO done as higher weight group is not pushing
  enough IO to io scheduler. It is just a debugging aid to prove correctness
  of IO controller by providing higher disk times to higher weight cgroup.

Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
---
 block/blk-sysfs.c   |    7 +++++
 block/elevator-fq.c |   67 +++++++++++++++++++++++++++++++++++++++++++++++---
 block/elevator-fq.h |    7 +++++
 3 files changed, 77 insertions(+), 4 deletions(-)

diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b60b76e..f245f33 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -314,6 +314,12 @@ static struct queue_sysfs_entry queue_slice_idle_entry = {
 	.store = elv_slice_idle_store,
 };
 
+static struct queue_sysfs_entry queue_async_slice_idle_entry = {
+	.attr = {.name = "async_slice_idle", .mode = S_IRUGO | S_IWUSR },
+	.show = elv_async_slice_idle_show,
+	.store = elv_async_slice_idle_store,
+};
+
 static struct queue_sysfs_entry queue_slice_sync_entry = {
 	.attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
 	.show = elv_slice_sync_show,
@@ -349,6 +355,7 @@ static struct attribute *default_attrs[] = {
 	&queue_iostats_entry.attr,
 #ifdef CONFIG_ELV_FAIR_QUEUING
 	&queue_slice_idle_entry.attr,
+	&queue_async_slice_idle_entry.attr,
 	&queue_slice_sync_entry.attr,
 	&queue_slice_async_entry.attr,
 	&queue_fairness_entry.attr,
diff --git a/block/elevator-fq.c b/block/elevator-fq.c
index 6dd8683..f1179aa 100644
--- a/block/elevator-fq.c
+++ b/block/elevator-fq.c
@@ -20,6 +20,7 @@ const int elv_slice_sync = HZ / 10;
 int elv_slice_async = HZ / 25;
 const int elv_slice_async_rq = 2;
 int elv_slice_idle = HZ / 125;
+int elv_async_slice_idle = 0;
 static struct kmem_cache *elv_ioq_pool;
 
 /* Maximum Window length for updating average disk rate */
@@ -2677,6 +2678,46 @@ ssize_t elv_slice_idle_store(struct request_queue *q, const char *name,
 	return count;
 }
 
+/* Functions to show and store elv_idle_slice value through sysfs */
+ssize_t elv_async_slice_idle_show(struct request_queue *q, char *name)
+{
+	struct elv_fq_data *efqd;
+	unsigned int data;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	efqd = &q->elevator->efqd;
+	data = jiffies_to_msecs(efqd->elv_async_slice_idle);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+	return sprintf(name, "%d\n", data);
+}
+
+ssize_t elv_async_slice_idle_store(struct request_queue *q, const char *name,
+			  size_t count)
+{
+	struct elv_fq_data *efqd;
+	unsigned int data;
+	unsigned long flags;
+
+	char *p = (char *)name;
+
+	data = simple_strtoul(p, &p, 10);
+
+	if (data < 0)
+		data = 0;
+	else if (data > INT_MAX)
+		data = INT_MAX;
+
+	data = msecs_to_jiffies(data);
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	efqd = &q->elevator->efqd;
+	efqd->elv_async_slice_idle = data;
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	return count;
+}
+
 /* Functions to show and store elv_slice_sync value through sysfs */
 ssize_t elv_slice_sync_show(struct request_queue *q, char *name)
 {
@@ -2930,8 +2971,10 @@ int elv_init_ioq(struct elevator_queue *eq, struct io_queue *ioq,
 		ioq->pid = current->pid;
 
 	ioq->sched_queue = sched_queue;
-	if (elv_gen_idling_enabled(eq) && is_sync && !elv_ioq_class_idle(ioq))
-		elv_mark_ioq_idle_window(ioq);
+	if (elv_gen_idling_enabled(eq) && !elv_ioq_class_idle(ioq)) {
+		if (is_sync || efqd->fairness)
+			elv_mark_ioq_idle_window(ioq);
+	}
 	bfq_init_entity(&ioq->entity, iog);
 	ioq->entity.budget = elv_prio_to_slice(efqd, ioq);
 	return 0;
@@ -3563,7 +3606,12 @@ void elv_ioq_arm_slice_timer(struct request_queue *q, int wait_for_busy)
 	/*
 	 * idle is disabled, either manually or by past process history
 	 */
-	if (!efqd->elv_slice_idle || !elv_ioq_idle_window(ioq))
+	if ((elv_ioq_sync(ioq) && !efqd->elv_slice_idle) ||
+			!elv_ioq_idle_window(ioq))
+		return;
+
+	/* If this is async queue and async_slice_idle is disabled, return */
+	if (!elv_ioq_sync(ioq) && !efqd->elv_async_slice_idle)
 		return;
 
 	/*
@@ -3572,7 +3620,10 @@ void elv_ioq_arm_slice_timer(struct request_queue *q, int wait_for_busy)
 	 */
 	if (wait_for_busy) {
 		elv_mark_ioq_wait_busy(ioq);
-		sl = efqd->elv_slice_idle;
+		if (elv_ioq_sync(ioq))
+			sl = efqd->elv_slice_idle;
+		else
+			sl = efqd->elv_async_slice_idle;
 		mod_timer(&efqd->idle_slice_timer, jiffies + sl);
 		elv_log_ioq(efqd, ioq, "arm idle: %lu wait busy=1", sl);
 		return;
@@ -3913,6 +3964,13 @@ void elv_ioq_completed_request(struct request_queue *q, struct request *rq)
 			goto done;
 		}
 
+		/* For async queue try to do wait busy */
+		if (efqd->fairness && !elv_ioq_sync(ioq) && !ioq->nr_queued
+		    && (elv_iog_nr_active(iog) <= 1)) {
+			elv_ioq_arm_slice_timer(q, 1);
+			goto done;
+		}
+
 		/*
 		 * If there are no requests waiting in this queue, and
 		 * there are other queues ready to issue requests, AND
@@ -4042,6 +4100,7 @@ int elv_init_fq_data(struct request_queue *q, struct elevator_queue *e)
 	efqd->elv_slice[0] = elv_slice_async;
 	efqd->elv_slice[1] = elv_slice_sync;
 	efqd->elv_slice_idle = elv_slice_idle;
+	efqd->elv_async_slice_idle = elv_async_slice_idle;
 	efqd->hw_tag = 1;
 
 	/* For the time being keep fairness enabled by default */
diff --git a/block/elevator-fq.h b/block/elevator-fq.h
index de0f6b0..1c99e83 100644
--- a/block/elevator-fq.h
+++ b/block/elevator-fq.h
@@ -339,6 +339,8 @@ struct elv_fq_data {
 	 * users of this functionality.
 	 */
 	unsigned int elv_slice_idle;
+	/* idle slice for async queue */
+	unsigned int elv_async_slice_idle;
 	struct timer_list idle_slice_timer;
 	struct work_struct unplug_work;
 
@@ -665,6 +667,11 @@ extern ssize_t elv_slice_idle_store(struct request_queue *q, const char *name,
 extern ssize_t elv_slice_sync_show(struct request_queue *q, char *name);
 extern ssize_t elv_slice_sync_store(struct request_queue *q, const char *name,
 						size_t count);
+
+extern ssize_t elv_async_slice_idle_show(struct request_queue *q, char *name);
+extern ssize_t elv_async_slice_idle_store(struct request_queue *q,
+					const char *name, size_t count);
+
 extern ssize_t elv_slice_async_show(struct request_queue *q, char *name);
 extern ssize_t elv_slice_async_store(struct request_queue *q, const char *name,
 						size_t count);
-- 
1.6.0.1


WARNING: multiple messages have this Message-ID (diff)
From: Vivek Goyal <vgoyal@redhat.com>
To: linux-kernel@vger.kernel.org,
	containers@lists.linux-foundation.org, dm-devel@redhat.com,
	jens.axboe@oracle.com, nauman@google.com, dpshah@google.com,
	lizf@cn.fujitsu.com, mikew@googl
Cc: agk@redhat.com, snitzer@redhat.com, vgoyal@redhat.com,
	akpm@linux-foundation.org, peterz@infradead.org
Subject: [PATCH 20/20] io-controller: experimental debug patch for async queue wait before expiry
Date: Tue, 26 May 2009 18:42:09 -0400	[thread overview]
Message-ID: <1243377729-2176-21-git-send-email-vgoyal@redhat.com> (raw)
In-Reply-To: <1243377729-2176-1-git-send-email-vgoyal@redhat.com>

o A debug patch which does wait for next IO from async queue once it
  becomes empty.

o For async writes, traffic seen by IO scheduler is not in proportion to
  the weight of the cgroup task/page belongs to. So if there are two processes
  doing heavy writeouts in two cgroups with weights 1000 and 500 respectively,
  then IO scheduler does not see more traffic/IO from higher weight cgroup
  even if IO scheduler tries to give it higher disk time. Effectively, the
  async queue belonging to higher weight cgroup becomes empty, and gets out
  of contention for disk and lower weight cgroup gets to use disk giving
  an impression in user space that higher weight cgroup did not get higher
  time to disk.

o This is more of a problem at page cache level where a higher weight
  process might be writing out the pages of lower weight process etc and
  should be fixed there.

o While we fix those issues, introducing this debug patch which allows one
  to idle on async queue (tunable via /sys/blolc/<disk>/queue/async_slice_idle)  so that once a higher weight queue becomes empty, instead of expiring it
  we try to wait for next request to come from that queue hence giving it
  higher disk time. A higher value of async_slice_idle, around 300ms, helps
  me get some right numbers for my setup. Note: higher disk time would not
  necessarily translate in more IO done as higher weight group is not pushing
  enough IO to io scheduler. It is just a debugging aid to prove correctness
  of IO controller by providing higher disk times to higher weight cgroup.

Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
---
 block/blk-sysfs.c   |    7 +++++
 block/elevator-fq.c |   67 +++++++++++++++++++++++++++++++++++++++++++++++---
 block/elevator-fq.h |    7 +++++
 3 files changed, 77 insertions(+), 4 deletions(-)

diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b60b76e..f245f33 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -314,6 +314,12 @@ static struct queue_sysfs_entry queue_slice_idle_entry = {
 	.store = elv_slice_idle_store,
 };
 
+static struct queue_sysfs_entry queue_async_slice_idle_entry = {
+	.attr = {.name = "async_slice_idle", .mode = S_IRUGO | S_IWUSR },
+	.show = elv_async_slice_idle_show,
+	.store = elv_async_slice_idle_store,
+};
+
 static struct queue_sysfs_entry queue_slice_sync_entry = {
 	.attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
 	.show = elv_slice_sync_show,
@@ -349,6 +355,7 @@ static struct attribute *default_attrs[] = {
 	&queue_iostats_entry.attr,
 #ifdef CONFIG_ELV_FAIR_QUEUING
 	&queue_slice_idle_entry.attr,
+	&queue_async_slice_idle_entry.attr,
 	&queue_slice_sync_entry.attr,
 	&queue_slice_async_entry.attr,
 	&queue_fairness_entry.attr,
diff --git a/block/elevator-fq.c b/block/elevator-fq.c
index 6dd8683..f1179aa 100644
--- a/block/elevator-fq.c
+++ b/block/elevator-fq.c
@@ -20,6 +20,7 @@ const int elv_slice_sync = HZ / 10;
 int elv_slice_async = HZ / 25;
 const int elv_slice_async_rq = 2;
 int elv_slice_idle = HZ / 125;
+int elv_async_slice_idle = 0;
 static struct kmem_cache *elv_ioq_pool;
 
 /* Maximum Window length for updating average disk rate */
@@ -2677,6 +2678,46 @@ ssize_t elv_slice_idle_store(struct request_queue *q, const char *name,
 	return count;
 }
 
+/* Functions to show and store elv_idle_slice value through sysfs */
+ssize_t elv_async_slice_idle_show(struct request_queue *q, char *name)
+{
+	struct elv_fq_data *efqd;
+	unsigned int data;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	efqd = &q->elevator->efqd;
+	data = jiffies_to_msecs(efqd->elv_async_slice_idle);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+	return sprintf(name, "%d\n", data);
+}
+
+ssize_t elv_async_slice_idle_store(struct request_queue *q, const char *name,
+			  size_t count)
+{
+	struct elv_fq_data *efqd;
+	unsigned int data;
+	unsigned long flags;
+
+	char *p = (char *)name;
+
+	data = simple_strtoul(p, &p, 10);
+
+	if (data < 0)
+		data = 0;
+	else if (data > INT_MAX)
+		data = INT_MAX;
+
+	data = msecs_to_jiffies(data);
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	efqd = &q->elevator->efqd;
+	efqd->elv_async_slice_idle = data;
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	return count;
+}
+
 /* Functions to show and store elv_slice_sync value through sysfs */
 ssize_t elv_slice_sync_show(struct request_queue *q, char *name)
 {
@@ -2930,8 +2971,10 @@ int elv_init_ioq(struct elevator_queue *eq, struct io_queue *ioq,
 		ioq->pid = current->pid;
 
 	ioq->sched_queue = sched_queue;
-	if (elv_gen_idling_enabled(eq) && is_sync && !elv_ioq_class_idle(ioq))
-		elv_mark_ioq_idle_window(ioq);
+	if (elv_gen_idling_enabled(eq) && !elv_ioq_class_idle(ioq)) {
+		if (is_sync || efqd->fairness)
+			elv_mark_ioq_idle_window(ioq);
+	}
 	bfq_init_entity(&ioq->entity, iog);
 	ioq->entity.budget = elv_prio_to_slice(efqd, ioq);
 	return 0;
@@ -3563,7 +3606,12 @@ void elv_ioq_arm_slice_timer(struct request_queue *q, int wait_for_busy)
 	/*
 	 * idle is disabled, either manually or by past process history
 	 */
-	if (!efqd->elv_slice_idle || !elv_ioq_idle_window(ioq))
+	if ((elv_ioq_sync(ioq) && !efqd->elv_slice_idle) ||
+			!elv_ioq_idle_window(ioq))
+		return;
+
+	/* If this is async queue and async_slice_idle is disabled, return */
+	if (!elv_ioq_sync(ioq) && !efqd->elv_async_slice_idle)
 		return;
 
 	/*
@@ -3572,7 +3620,10 @@ void elv_ioq_arm_slice_timer(struct request_queue *q, int wait_for_busy)
 	 */
 	if (wait_for_busy) {
 		elv_mark_ioq_wait_busy(ioq);
-		sl = efqd->elv_slice_idle;
+		if (elv_ioq_sync(ioq))
+			sl = efqd->elv_slice_idle;
+		else
+			sl = efqd->elv_async_slice_idle;
 		mod_timer(&efqd->idle_slice_timer, jiffies + sl);
 		elv_log_ioq(efqd, ioq, "arm idle: %lu wait busy=1", sl);
 		return;
@@ -3913,6 +3964,13 @@ void elv_ioq_completed_request(struct request_queue *q, struct request *rq)
 			goto done;
 		}
 
+		/* For async queue try to do wait busy */
+		if (efqd->fairness && !elv_ioq_sync(ioq) && !ioq->nr_queued
+		    && (elv_iog_nr_active(iog) <= 1)) {
+			elv_ioq_arm_slice_timer(q, 1);
+			goto done;
+		}
+
 		/*
 		 * If there are no requests waiting in this queue, and
 		 * there are other queues ready to issue requests, AND
@@ -4042,6 +4100,7 @@ int elv_init_fq_data(struct request_queue *q, struct elevator_queue *e)
 	efqd->elv_slice[0] = elv_slice_async;
 	efqd->elv_slice[1] = elv_slice_sync;
 	efqd->elv_slice_idle = elv_slice_idle;
+	efqd->elv_async_slice_idle = elv_async_slice_idle;
 	efqd->hw_tag = 1;
 
 	/* For the time being keep fairness enabled by default */
diff --git a/block/elevator-fq.h b/block/elevator-fq.h
index de0f6b0..1c99e83 100644
--- a/block/elevator-fq.h
+++ b/block/elevator-fq.h
@@ -339,6 +339,8 @@ struct elv_fq_data {
 	 * users of this functionality.
 	 */
 	unsigned int elv_slice_idle;
+	/* idle slice for async queue */
+	unsigned int elv_async_slice_idle;
 	struct timer_list idle_slice_timer;
 	struct work_struct unplug_work;
 
@@ -665,6 +667,11 @@ extern ssize_t elv_slice_idle_store(struct request_queue *q, const char *name,
 extern ssize_t elv_slice_sync_show(struct request_queue *q, char *name);
 extern ssize_t elv_slice_sync_store(struct request_queue *q, const char *name,
 						size_t count);
+
+extern ssize_t elv_async_slice_idle_show(struct request_queue *q, char *name);
+extern ssize_t elv_async_slice_idle_store(struct request_queue *q,
+					const char *name, size_t count);
+
 extern ssize_t elv_slice_async_show(struct request_queue *q, char *name);
 extern ssize_t elv_slice_async_store(struct request_queue *q, const char *name,
 						size_t count);
-- 
1.6.0.1

  parent reply	other threads:[~2009-05-26 22:47 UTC|newest]

Thread overview: 133+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-05-26 22:41 [RFC] IO scheduler based IO controller V3 Vivek Goyal
2009-05-26 22:41 ` Vivek Goyal
2009-05-26 22:41 ` [PATCH 01/20] io-controller: Documentation Vivek Goyal
2009-05-26 22:41   ` Vivek Goyal
2009-05-29 15:42   ` Balbir Singh
2009-05-29 15:42     ` Balbir Singh
     [not found]     ` <20090529154252.GD5587-SINUvgVNF2CyUtPGxGje5AC/G2K4zDHf@public.gmane.org>
2009-05-29 15:53       ` Vivek Goyal
2009-05-29 15:53     ` Vivek Goyal
2009-05-29 15:53       ` Vivek Goyal
     [not found]   ` <1243377729-2176-2-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-29 15:42     ` Balbir Singh
2009-05-26 22:41 ` [PATCH 03/20] io-controller: Charge for time slice based on average disk rate Vivek Goyal
2009-05-26 22:41   ` Vivek Goyal
2009-05-26 22:41 ` [PATCH 04/20] io-controller: Modify cfq to make use of flat elevator fair queuing Vivek Goyal
2009-05-26 22:41   ` Vivek Goyal
2009-05-26 22:41 ` [PATCH 05/20] io-controller: Common hierarchical fair queuing code in elevaotor layer Vivek Goyal
2009-05-26 22:41   ` Vivek Goyal
2009-06-05  9:36   ` Gui Jianfeng
2009-06-05  9:36     ` Gui Jianfeng
     [not found]     ` <4A28E710.5080307-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-06-05 13:21       ` Vivek Goyal
2009-06-05 13:21     ` Vivek Goyal
2009-06-05 13:21       ` Vivek Goyal
     [not found]   ` <1243377729-2176-6-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-06-05  9:36     ` Gui Jianfeng
2009-05-26 22:41 ` [PATCH 07/20] io-controller: Export disk time used and nr sectors dipatched through cgroups Vivek Goyal
2009-05-26 22:41   ` Vivek Goyal
2009-05-26 22:41 ` [PATCH 08/20] io-controller: idle for sometime on sync queue before expiring it Vivek Goyal
2009-05-26 22:41   ` Vivek Goyal
2009-05-26 22:41 ` [PATCH 10/20] io-conroller: Prepare elevator layer for single queue schedulers Vivek Goyal
2009-05-26 22:41   ` Vivek Goyal
2009-06-05  9:17   ` Gui Jianfeng
2009-06-05  9:17     ` Gui Jianfeng
     [not found]     ` <4A28E293.90402-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-06-05 13:22       ` Vivek Goyal
2009-06-05 13:22     ` Vivek Goyal
2009-06-05 13:22       ` Vivek Goyal
     [not found]   ` <1243377729-2176-11-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-06-05  9:17     ` Gui Jianfeng
2009-05-26 22:42 ` [PATCH 11/20] io-controller: noop changes for hierarchical fair queuing Vivek Goyal
2009-05-26 22:42   ` Vivek Goyal
2009-05-26 22:42 ` [PATCH 12/20] io-controller: deadline " Vivek Goyal
2009-05-26 22:42   ` Vivek Goyal
2009-05-26 22:42 ` [PATCH 13/20] io-controller: anticipatory " Vivek Goyal
2009-05-26 22:42   ` Vivek Goyal
2009-05-26 22:42 ` [PATCH 14/20] blkio_cgroup patches from Ryo to track async bios Vivek Goyal
2009-05-26 22:42   ` Vivek Goyal
2009-05-26 22:42 ` [PATCH 15/20] io-controller: map async requests to appropriate cgroup Vivek Goyal
2009-05-26 22:42   ` Vivek Goyal
2009-05-28  9:27   ` Ryo Tsuruta
2009-05-28  9:27     ` Ryo Tsuruta
2009-05-28 16:57     ` Vivek Goyal
2009-05-28 16:57       ` Vivek Goyal
2009-05-28 18:04       ` Nauman Rafique
2009-05-28 18:04         ` Nauman Rafique
     [not found]       ` <20090528165710.GB4335-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-29  3:17         ` Ryo Tsuruta
2009-05-29  3:17       ` Ryo Tsuruta
2009-05-29  3:17         ` Ryo Tsuruta
     [not found]         ` <20090529.121737.189708024.ryov-jCdQPDEk3idL9jVzuh4AOg@public.gmane.org>
2009-05-29 13:38           ` Vivek Goyal
2009-05-29 13:38         ` Vivek Goyal
2009-05-29 13:38           ` Vivek Goyal
2009-06-01 11:25           ` Ryo Tsuruta
     [not found]           ` <20090529133804.GA26962-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-06-01 11:25             ` Ryo Tsuruta
     [not found]     ` <20090528.182740.193697101.ryov-jCdQPDEk3idL9jVzuh4AOg@public.gmane.org>
2009-05-28 16:57       ` Vivek Goyal
     [not found]   ` <1243377729-2176-16-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-28  9:27     ` Ryo Tsuruta
2009-05-26 22:42 ` [PATCH 16/20] io-controller: IO group refcounting support Vivek Goyal
2009-05-26 22:42   ` Vivek Goyal
     [not found]   ` <1243377729-2176-17-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-06-08  2:03     ` Gui Jianfeng
2009-06-08  2:03       ` Gui Jianfeng
2009-06-08 13:53       ` Vivek Goyal
2009-06-08 13:53         ` Vivek Goyal
     [not found]       ` <4A2C716C.8070808-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-06-08 13:53         ` Vivek Goyal
2009-05-26 22:42 ` [PATCH 17/20] io-controller: Per cgroup request descriptor support Vivek Goyal
2009-05-26 22:42   ` Vivek Goyal
2009-05-26 22:42 ` [PATCH 18/20] io-controller: Support per cgroup per device weights and io class Vivek Goyal
2009-05-26 22:42   ` Vivek Goyal
     [not found] ` <1243377729-2176-1-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-26 22:41   ` [PATCH 01/20] io-controller: Documentation Vivek Goyal
2009-05-26 22:41   ` [PATCH 02/20] io-controller: Common flat fair queuing code in elevaotor layer Vivek Goyal
2009-05-26 22:41     ` Vivek Goyal
2009-05-27 20:53     ` Nauman Rafique
2009-05-27 20:53       ` Nauman Rafique
2009-05-28  8:52       ` Fabio Checconi
2009-05-28 16:00       ` Vivek Goyal
2009-05-28 16:00         ` Vivek Goyal
     [not found]         ` <20090528160003.GA4335-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-28 19:41           ` Nauman Rafique
2009-05-28 19:41         ` Nauman Rafique
2009-05-28 19:41           ` Nauman Rafique
2009-05-29 16:06           ` Vivek Goyal
2009-05-29 16:06             ` Vivek Goyal
2009-05-29 16:57             ` Fabio Checconi
2009-05-29 19:06               ` Nauman Rafique
2009-05-29 19:06                 ` Nauman Rafique
     [not found]                 ` <e98e18940905291206p2e05bc5cxf499479270e34074-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2009-05-29 19:16                   ` Vivek Goyal
2009-05-29 19:16                 ` Vivek Goyal
2009-05-29 19:16                   ` Vivek Goyal
     [not found]               ` <20090529165716.GB18141-f9ZlEuEWxVeACYmtYXMKmw@public.gmane.org>
2009-05-29 19:06                 ` Nauman Rafique
     [not found]             ` <20090529160610.GC26962-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-29 16:57               ` Fabio Checconi
     [not found]           ` <e98e18940905281241v4aa24716j91f351a828af604a-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2009-05-29 16:06             ` Vivek Goyal
     [not found]       ` <e98e18940905271353kc6890dbxa1ea63026c8faec1-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2009-05-28  8:52         ` Fabio Checconi
2009-05-28 16:00         ` Vivek Goyal
     [not found]     ` <1243377729-2176-3-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-27 20:53       ` Nauman Rafique
2009-06-08  1:08       ` Gui Jianfeng
2009-06-08  7:44       ` Gui Jianfeng
2009-06-08  7:44         ` Gui Jianfeng
2009-06-08 13:56         ` Vivek Goyal
2009-06-08 13:56           ` Vivek Goyal
     [not found]         ` <4A2CC15F.2010708-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-06-08 13:56           ` Vivek Goyal
2009-06-08  1:08     ` Gui Jianfeng
2009-06-08 12:58       ` Vivek Goyal
2009-06-08 12:58         ` Vivek Goyal
     [not found]       ` <4A2C649C.8070806-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-06-08 12:58         ` Vivek Goyal
2009-05-26 22:41   ` [PATCH 03/20] io-controller: Charge for time slice based on average disk rate Vivek Goyal
2009-05-26 22:41   ` [PATCH 04/20] io-controller: Modify cfq to make use of flat elevator fair queuing Vivek Goyal
2009-05-26 22:41   ` [PATCH 05/20] io-controller: Common hierarchical fair queuing code in elevaotor layer Vivek Goyal
2009-05-26 22:41   ` [PATCH 06/20] io-controller: cfq changes to use " Vivek Goyal
2009-05-26 22:41     ` Vivek Goyal
2009-05-26 22:41   ` [PATCH 07/20] io-controller: Export disk time used and nr sectors dipatched through cgroups Vivek Goyal
2009-05-26 22:41   ` [PATCH 08/20] io-controller: idle for sometime on sync queue before expiring it Vivek Goyal
2009-05-26 22:41   ` [PATCH 09/20] io-controller: Separate out queue and data Vivek Goyal
2009-05-26 22:41     ` Vivek Goyal
2009-05-26 22:41   ` [PATCH 10/20] io-conroller: Prepare elevator layer for single queue schedulers Vivek Goyal
2009-05-26 22:42   ` [PATCH 11/20] io-controller: noop changes for hierarchical fair queuing Vivek Goyal
2009-05-26 22:42   ` [PATCH 12/20] io-controller: deadline " Vivek Goyal
2009-05-26 22:42   ` [PATCH 13/20] io-controller: anticipatory " Vivek Goyal
2009-05-26 22:42   ` [PATCH 14/20] blkio_cgroup patches from Ryo to track async bios Vivek Goyal
2009-05-26 22:42   ` [PATCH 15/20] io-controller: map async requests to appropriate cgroup Vivek Goyal
2009-05-26 22:42   ` [PATCH 16/20] io-controller: IO group refcounting support Vivek Goyal
2009-05-26 22:42   ` [PATCH 17/20] io-controller: Per cgroup request descriptor support Vivek Goyal
2009-05-26 22:42   ` [PATCH 18/20] io-controller: Support per cgroup per device weights and io class Vivek Goyal
2009-05-26 22:42   ` [PATCH 19/20] io-controller: Debug hierarchical IO scheduling Vivek Goyal
2009-05-26 22:42   ` [PATCH 20/20] io-controller: experimental debug patch for async queue wait before expiry Vivek Goyal
2009-05-26 22:42 ` [PATCH 19/20] io-controller: Debug hierarchical IO scheduling Vivek Goyal
2009-05-26 22:42   ` Vivek Goyal
2009-05-26 22:42 ` Vivek Goyal [this message]
2009-05-26 22:42   ` [PATCH 20/20] io-controller: experimental debug patch for async queue wait before expiry Vivek Goyal
2009-06-19 20:37 [RFC] IO scheduler based io controller (V5) Vivek Goyal
2009-06-19 20:37 ` [PATCH 20/20] io-controller: experimental debug patch for async queue wait before expiry Vivek Goyal
2009-06-19 20:37   ` Vivek Goyal
     [not found] ` <1245443858-8487-1-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-06-19 20:37   ` Vivek Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1243377729-2176-21-git-send-email-vgoyal@redhat.com \
    --to=vgoyal@redhat.com \
    --cc=agk@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=containers@lists.linux-foundation.org \
    --cc=dhaval@linux.vnet.ibm.com \
    --cc=dm-devel@redhat.com \
    --cc=dpshah@google.com \
    --cc=fchecconi@gmail.com \
    --cc=fernando@oss.ntt.co.jp \
    --cc=guijianfeng@cn.fujitsu.com \
    --cc=jbaron@redhat.com \
    --cc=jens.axboe@oracle.com \
    --cc=jmoyer@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lizf@cn.fujitsu.com \
    --cc=m-ikeda@ds.jp.nec.com \
    --cc=mikew@google.com \
    --cc=nauman@google.com \
    --cc=paolo.valente@unimore.it \
    --cc=peterz@infradead.org \
    --cc=righi.andrea@gmail.com \
    --cc=ryov@valinux.co.jp \
    --cc=s-uchida@ap.jp.nec.com \
    --cc=snitzer@redhat.com \
    --cc=taka@valinux.co.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.