All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vivek Goyal <vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
To: linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	containers-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org,
	dm-devel-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org,
	jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org,
	nauman-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
	dpshah-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
	lizf-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org
Cc: akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org,
	snitzer-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org,
	agk-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org
Subject: [PATCH 12/25] io-controller: idle for sometime on sync queue before expiring it
Date: Thu,  2 Jul 2009 16:01:44 -0400	[thread overview]
Message-ID: <1246564917-19603-13-git-send-email-vgoyal__18765.2340079241$1246567800$gmane$org@redhat.com> (raw)
In-Reply-To: <1246564917-19603-1-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>

o When a sync queue expires, in many cases it might be empty and then
  it will be deleted from the active tree. This will lead to a scenario
  where out of two competing queues, only one is on the tree and when a
  new queue is selected, vtime jump takes place and we don't see services
  provided in proportion to weight.

o In general this is a fundamental problem with fairness of sync queues
  where queues are not continuously backlogged. Looks like idling is
  only solution to make sure such kind of queues can get some decent amount
  of disk bandwidth in the face of competion from continusouly backlogged
  queues. But excessive idling has potential to reduce performance on SSD
  and disks with commnad queuing.

o This patch experiments with waiting for next request to come before a
  queue is expired after it has consumed its time slice. This can ensure
  more accurate fairness numbers in some cases.

o Introduced a tunable "fairness". If set, io-controller will put more
  focus on getting fairness right than getting throughput right.

o When writes are being done on a file opened with O_SYNC, ioscheduler sees
  synchronous write requests with noidle flag set. But the fact is we are
  seeing a continuous stream of writes with-in 1ms or so. Hence it makes sense
  to wait on these writes. For the time being to achieve fairness for O_SYNC
  writes, continue to idle even if last request was sync write and noidle
  flag was set. (Only done if "fairness" is set). Probably right fix is to
  make sure in O_SYNC path, requests are not marked with noidle flag.

Signed-off-by: Vivek Goyal <vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
---
 block/cfq-iosched.c |    1 +
 block/elevator-fq.c |  133 ++++++++++++++++++++++++++++++++++++++++++++-------
 block/elevator-fq.h |   15 ++++++
 3 files changed, 132 insertions(+), 17 deletions(-)

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 02b5cd5..98a35fd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2004,6 +2004,7 @@ static struct elv_fs_entry cfq_attrs[] = {
 	ELV_ATTR(slice_idle),
 	ELV_ATTR(slice_sync),
 	ELV_ATTR(slice_async),
+	ELV_ATTR(fairness),
 	__ATTR_NULL
 };
 
diff --git a/block/elevator-fq.c b/block/elevator-fq.c
index bab01b5..68be1dc 100644
--- a/block/elevator-fq.c
+++ b/block/elevator-fq.c
@@ -424,6 +424,7 @@ static void bfq_active_insert(struct io_service_tree *st,
 	struct rb_node *node = &entity->rb_node;
 
 	bfq_insert(&st->active, entity);
+	entity->sched_data->nr_active++;
 
 	if (node->rb_left != NULL)
 		node = node->rb_left;
@@ -483,6 +484,7 @@ static void bfq_active_remove(struct io_service_tree *st,
 
 	node = bfq_find_deepest(&entity->rb_node);
 	bfq_remove(&st->active, entity);
+	entity->sched_data->nr_active--;
 
 	if (node != NULL)
 		bfq_update_active_tree(node);
@@ -569,6 +571,21 @@ static void bfq_forget_idle(struct io_service_tree *st)
 		bfq_put_idle_entity(st, first_idle);
 }
 
+/*
+ * Returns the number of active entities a particular io group has. This
+ * includes number of active entities on service tree as well as the active
+ * entity which is being served currently, if any.
+ */
+
+static inline int elv_iog_nr_active(struct io_group *iog)
+{
+	struct io_sched_data *sd = &iog->sched_data;
+
+	if (sd->active_entity)
+		return sd->nr_active + 1;
+	else
+		return sd->nr_active;
+}
 
 static struct io_service_tree *
 __bfq_entity_update_prio(struct io_service_tree *old_st,
@@ -1995,6 +2012,8 @@ SHOW_FUNCTION(elv_slice_sync_show, efqd->elv_slice[1], 1);
 EXPORT_SYMBOL(elv_slice_sync_show);
 SHOW_FUNCTION(elv_slice_async_show, efqd->elv_slice[0], 1);
 EXPORT_SYMBOL(elv_slice_async_show);
+SHOW_FUNCTION(elv_fairness_show, efqd->fairness, 0);
+EXPORT_SYMBOL(elv_fairness_show);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
@@ -2019,6 +2038,8 @@ STORE_FUNCTION(elv_slice_sync_store, &efqd->elv_slice[1], 1, UINT_MAX, 1);
 EXPORT_SYMBOL(elv_slice_sync_store);
 STORE_FUNCTION(elv_slice_async_store, &efqd->elv_slice[0], 1, UINT_MAX, 1);
 EXPORT_SYMBOL(elv_slice_async_store);
+STORE_FUNCTION(elv_fairness_store, &efqd->fairness, 0, 1, 0);
+EXPORT_SYMBOL(elv_fairness_store);
 #undef STORE_FUNCTION
 
 void elv_schedule_dispatch(struct request_queue *q)
@@ -2142,7 +2163,7 @@ static void elv_ioq_update_idle_window(struct elevator_queue *eq,
 	 * io scheduler if it wants to disable idling based on additional
 	 * considrations like seek pattern.
 	 */
-	if (enable_idle) {
+	if (enable_idle && !efqd->fairness) {
 		if (eq->ops->elevator_update_idle_window_fn)
 			enable_idle = eq->ops->elevator_update_idle_window_fn(
 						eq, ioq->sched_queue, rq);
@@ -2328,6 +2349,7 @@ static void __elv_set_active_ioq(struct elv_fq_data *efqd, struct io_queue *ioq,
 
 		elv_clear_ioq_wait_request(ioq);
 		elv_clear_ioq_must_dispatch(ioq);
+		elv_clear_ioq_wait_busy_done(ioq);
 		elv_mark_ioq_slice_new(ioq);
 
 		del_timer(&efqd->idle_slice_timer);
@@ -2483,10 +2505,12 @@ void __elv_ioq_slice_expired(struct request_queue *q, struct io_queue *ioq)
 	assert_spin_locked(q->queue_lock);
 	elv_log_ioq(efqd, ioq, "slice expired");
 
-	if (elv_ioq_wait_request(ioq))
+	if (elv_ioq_wait_request(ioq) || elv_ioq_wait_busy(ioq))
 		del_timer(&efqd->idle_slice_timer);
 
 	elv_clear_ioq_wait_request(ioq);
+	elv_clear_ioq_wait_busy(ioq);
+	elv_clear_ioq_wait_busy_done(ioq);
 
 	/*
 	 * if ioq->slice_end = 0, that means a queue was expired before first
@@ -2659,7 +2683,7 @@ void elv_ioq_request_add(struct request_queue *q, struct request *rq)
 		 * has other work pending, don't risk delaying until the
 		 * idle timer unplug to continue working.
 		 */
-		if (elv_ioq_wait_request(ioq)) {
+		if (elv_ioq_wait_request(ioq) && !elv_ioq_wait_busy(ioq)) {
 			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
 			    efqd->busy_queues > 1) {
 				del_timer(&efqd->idle_slice_timer);
@@ -2667,6 +2691,18 @@ void elv_ioq_request_add(struct request_queue *q, struct request *rq)
 			}
 			elv_mark_ioq_must_dispatch(ioq);
 		}
+
+		/*
+		 * If we were waiting for a request on this queue, wait is
+		 * done. Schedule the next dispatch
+		 */
+		if (elv_ioq_wait_busy(ioq)) {
+			del_timer(&efqd->idle_slice_timer);
+			elv_clear_ioq_wait_busy(ioq);
+			elv_mark_ioq_wait_busy_done(ioq);
+			elv_clear_ioq_must_dispatch(ioq);
+			elv_schedule_dispatch(q);
+		}
 	} else if (elv_should_preempt(q, ioq, rq)) {
 		/*
 		 * not the active queue - expire current slice if it is
@@ -2694,6 +2730,9 @@ static void elv_idle_slice_timer(unsigned long data)
 
 	if (ioq) {
 
+		if (elv_ioq_wait_busy(ioq))
+			goto expire;
+
 		/*
 		 * We saw a request before the queue expired, let it through
 		 */
@@ -2727,7 +2766,7 @@ out_cont:
 	spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
-static void elv_ioq_arm_slice_timer(struct request_queue *q)
+static void elv_ioq_arm_slice_timer(struct request_queue *q, int wait_for_busy)
 {
 	struct elv_fq_data *efqd = &q->elevator->efqd;
 	struct io_queue *ioq = elv_active_ioq(q->elevator);
@@ -2740,26 +2779,38 @@ static void elv_ioq_arm_slice_timer(struct request_queue *q)
 	 * for devices that support queuing, otherwise we still have a problem
 	 * with sync vs async workloads.
 	 */
-	if (blk_queue_nonrot(q) && efqd->hw_tag)
+	if (blk_queue_nonrot(q) && efqd->hw_tag && !efqd->fairness)
 		return;
 
 	/*
-	 * still requests with the driver, don't idle
+	 * idle is disabled, either manually or by past process history
 	 */
-	if (efqd->rq_in_driver)
+	if (!efqd->elv_slice_idle || !elv_ioq_idle_window(ioq))
 		return;
 
 	/*
-	 * idle is disabled, either manually or by past process history
+	 * This queue has consumed its time slice. We are waiting only for
+	 * it to become busy before we select next queue for dispatch.
 	 */
-	if (!efqd->elv_slice_idle || !elv_ioq_idle_window(ioq))
+	if (wait_for_busy) {
+		elv_mark_ioq_wait_busy(ioq);
+		sl = efqd->elv_slice_idle;
+		mod_timer(&efqd->idle_slice_timer, jiffies + sl);
+		elv_log_ioq(efqd, ioq, "arm idle: %lu wait busy=1", sl);
+		return;
+	}
+
+	/*
+	 * still requests with the driver, don't idle
+	 */
+	if (efqd->rq_in_driver && !efqd->fairness)
 		return;
 
 	/*
 	 * may be iosched got its own idling logic. In that case io
 	 * schduler will take care of arming the timer, if need be.
 	 */
-	if (q->elevator->ops->elevator_arm_slice_timer_fn) {
+	if (q->elevator->ops->elevator_arm_slice_timer_fn && !efqd->fairness) {
 		q->elevator->ops->elevator_arm_slice_timer_fn(q,
 						ioq->sched_queue);
 	} else {
@@ -2822,11 +2873,38 @@ void *elv_fq_select_ioq(struct request_queue *q, int force)
 			goto expire;
 	}
 
+	/* We are waiting for this queue to become busy before it expires.*/
+	if (efqd->fairness && elv_ioq_wait_busy(ioq)) {
+		ioq = NULL;
+		goto keep_queue;
+	}
+
 	/*
 	 * The active queue has run out of time, expire it and select new.
 	 */
-	if (elv_ioq_slice_used(ioq) && !elv_ioq_must_dispatch(ioq))
-		goto expire;
+	if (elv_ioq_slice_used(ioq) && !elv_ioq_must_dispatch(ioq)) {
+		/*
+		 * Queue has used up its slice. Wait busy is not on otherwise
+		 * we wouldn't have been here. There is a chance that after
+		 * slice expiry no request from the queue completed hence
+		 * wait busy timer could not be turned on. If that's the case
+		 * don't expire the queue yet. Next request completion from
+		 * the queue will arm the wait busy timer.
+		 *
+		 * Don't wait if this group has other active queues. This
+		 * will make sure that we don't loose fairness at group level
+		 * at the same time in root group we will not see cfq
+		 * regressions.
+		 */
+		if (elv_ioq_sync(ioq) && !ioq->nr_queued
+		    && elv_ioq_nr_dispatched(ioq)
+		    && (elv_iog_nr_active(ioq_to_io_group(ioq)) <= 1)
+		    && !elv_ioq_wait_busy_done(ioq)) {
+			ioq = NULL;
+			goto keep_queue;
+		} else
+			goto expire;
+	}
 
 	/*
 	 * If we have a RT cfqq waiting, then we pre-empt the current non-rt
@@ -2977,11 +3055,13 @@ void elv_ioq_completed_request(struct request_queue *q, struct request *rq)
 	const int sync = rq_is_sync(rq);
 	struct io_queue *ioq;
 	struct elv_fq_data *efqd = &q->elevator->efqd;
+	struct io_group *iog;
 
 	if (!elv_iosched_fair_queuing_enabled(q->elevator))
 		return;
 
 	ioq = rq->ioq;
+	iog = ioq_to_io_group(ioq);
 
 	elv_log_ioq(efqd, ioq, "complete");
 
@@ -3007,6 +3087,12 @@ void elv_ioq_completed_request(struct request_queue *q, struct request *rq)
 			elv_ioq_set_prio_slice(q, ioq);
 			elv_clear_ioq_slice_new(ioq);
 		}
+
+		if (elv_ioq_class_idle(ioq)) {
+			elv_ioq_slice_expired(q);
+			goto done;
+		}
+
 		/*
 		 * If there are no requests waiting in this queue, and
 		 * there are other queues ready to issue requests, AND
@@ -3014,13 +3100,24 @@ void elv_ioq_completed_request(struct request_queue *q, struct request *rq)
 		 * mean seek distance, give them a chance to run instead
 		 * of idling.
 		 */
-		if (elv_ioq_slice_used(ioq) || elv_ioq_class_idle(ioq))
-			elv_ioq_slice_expired(q);
-		else if (!ioq->nr_queued && !elv_close_cooperator(q, ioq, 1)
-			 && sync && !rq_noidle(rq))
-			elv_ioq_arm_slice_timer(q);
+		if (elv_ioq_slice_used(ioq)) {
+			if (sync && !ioq->nr_queued
+			    && (elv_iog_nr_active(iog) <= 1)) {
+				/*
+				 * Idle for one extra period in hierarchical
+				 * setup
+				 */
+				elv_ioq_arm_slice_timer(q, 1);
+			} else {
+				/* Expire the queue */
+				elv_ioq_slice_expired(q);
+			}
+		} else if (!ioq->nr_queued && !elv_close_cooperator(q, ioq, 1)
+			 && sync && (!rq_noidle(rq) || efqd->fairness))
+			elv_ioq_arm_slice_timer(q, 0);
 	}
 
+done:
 	if (!efqd->rq_in_driver)
 		elv_schedule_dispatch(q);
 }
@@ -3125,6 +3222,8 @@ int elv_init_fq_data(struct request_queue *q, struct elevator_queue *e)
 	efqd->elv_slice_idle = elv_slice_idle;
 	efqd->hw_tag = 1;
 
+	/* For the time being keep fairness enabled by default */
+	efqd->fairness = 1;
 	return 0;
 }
 
diff --git a/block/elevator-fq.h b/block/elevator-fq.h
index d76bd96..a414309 100644
--- a/block/elevator-fq.h
+++ b/block/elevator-fq.h
@@ -75,6 +75,7 @@ struct io_service_tree {
 struct io_sched_data {
 	struct io_entity *active_entity;
 	struct io_entity *next_active;
+	int nr_active;
 	struct io_service_tree service_tree[IO_IOPRIO_CLASSES];
 };
 
@@ -337,6 +338,13 @@ struct elv_fq_data {
 	unsigned long long rate_sampling_start; /*sampling window start jifies*/
 	/* number of sectors finished io during current sampling window */
 	unsigned long rate_sectors_current;
+
+	/*
+	 * If set to 1, will disable many optimizations done for boost
+	 * throughput and focus more on providing fairness for sync
+	 * queues.
+	 */
+	unsigned int fairness;
 };
 
 /* Logging facilities. */
@@ -358,6 +366,8 @@ enum elv_queue_state_flags {
 	ELV_QUEUE_FLAG_wait_request,	  /* waiting for a request */
 	ELV_QUEUE_FLAG_must_dispatch,	  /* must be allowed a dispatch */
 	ELV_QUEUE_FLAG_slice_new,	  /* no requests dispatched in slice */
+	ELV_QUEUE_FLAG_wait_busy,	  /* wait for this queue to get busy */
+	ELV_QUEUE_FLAG_wait_busy_done,	  /* Have already waited on this queue*/
 };
 
 #define ELV_IO_QUEUE_FLAG_FNS(name)					\
@@ -380,6 +390,8 @@ ELV_IO_QUEUE_FLAG_FNS(wait_request)
 ELV_IO_QUEUE_FLAG_FNS(must_dispatch)
 ELV_IO_QUEUE_FLAG_FNS(idle_window)
 ELV_IO_QUEUE_FLAG_FNS(slice_new)
+ELV_IO_QUEUE_FLAG_FNS(wait_busy)
+ELV_IO_QUEUE_FLAG_FNS(wait_busy_done)
 
 static inline struct io_service_tree *
 io_entity_service_tree(struct io_entity *entity)
@@ -532,6 +544,9 @@ extern ssize_t elv_slice_sync_store(struct elevator_queue *q, const char *name,
 extern ssize_t elv_slice_async_show(struct elevator_queue *q, char *name);
 extern ssize_t elv_slice_async_store(struct elevator_queue *q, const char *name,
 						size_t count);
+extern ssize_t elv_fairness_show(struct elevator_queue *q, char *name);
+extern ssize_t elv_fairness_store(struct elevator_queue *q, const char *name,
+						size_t count);
 
 /* Functions used by elevator.c */
 extern int elv_init_fq_data(struct request_queue *q, struct elevator_queue *e);
-- 
1.6.0.6

  parent reply	other threads:[~2009-07-02 20:01 UTC|newest]

Thread overview: 191+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-07-02 20:01 [RFC] IO scheduler based IO controller V6 Vivek Goyal
2009-07-02 20:01 ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 01/25] io-controller: Documentation Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 02/25] io-controller: Core of the B-WF2Q+ scheduler Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 03/25] io-controller: bfq support of in-class preemption Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 04/25] io-controller: Common flat fair queuing code in elevaotor layer Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 05/25] io-controller: Charge for time slice based on average disk rate Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 06/25] io-controller: Modify cfq to make use of flat elevator fair queuing Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 07/25] io-controller: core bfq scheduler changes for hierarchical setup Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 08/25] io-controller: cgroup related changes for hierarchical group support Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 09/25] io-controller: Common hierarchical fair queuing code in elevaotor layer Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
     [not found]   ` <1246564917-19603-10-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-06  2:46     ` Gui Jianfeng
2009-07-06  2:46   ` Gui Jianfeng
2009-07-06  2:46     ` Gui Jianfeng
2009-07-06 14:16     ` Vivek Goyal
2009-07-06 14:16       ` Vivek Goyal
     [not found]       ` <20090706141650.GD8279-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-07  1:40         ` [PATCH] io-controller: Get rid of css id from io cgroup Gui Jianfeng
2009-07-07  1:40           ` Gui Jianfeng
2009-07-08 14:04           ` Vivek Goyal
2009-07-08 14:04             ` Vivek Goyal
     [not found]           ` <4A52A77E.8050203-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-07-08 14:04             ` Vivek Goyal
     [not found]     ` <4A51657B.7000008-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-07-06 14:16       ` [PATCH 09/25] io-controller: Common hierarchical fair queuing code in elevaotor layer Vivek Goyal
2009-07-02 20:01 ` [PATCH 10/25] io-controller: cfq changes to use " Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 11/25] io-controller: Export disk time used and nr sectors dipatched through cgroups Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-08  2:16   ` Gui Jianfeng
2009-07-08  2:16     ` Gui Jianfeng
2009-07-08 14:00     ` Vivek Goyal
2009-07-08 14:00       ` Vivek Goyal
     [not found]     ` <4A54018C.5090804-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-07-08 14:00       ` Vivek Goyal
     [not found]   ` <1246564917-19603-12-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-08  2:16     ` Gui Jianfeng
2009-07-02 20:01 ` [PATCH 12/25] io-controller: idle for sometime on sync queue before expiring it Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
     [not found] ` <1246564917-19603-1-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-02 20:01   ` [PATCH 01/25] io-controller: Documentation Vivek Goyal
2009-07-02 20:01   ` [PATCH 02/25] io-controller: Core of the B-WF2Q+ scheduler Vivek Goyal
2009-07-02 20:01   ` [PATCH 03/25] io-controller: bfq support of in-class preemption Vivek Goyal
2009-07-02 20:01   ` [PATCH 04/25] io-controller: Common flat fair queuing code in elevaotor layer Vivek Goyal
2009-07-02 20:01   ` [PATCH 05/25] io-controller: Charge for time slice based on average disk rate Vivek Goyal
2009-07-02 20:01   ` [PATCH 06/25] io-controller: Modify cfq to make use of flat elevator fair queuing Vivek Goyal
2009-07-02 20:01   ` [PATCH 07/25] io-controller: core bfq scheduler changes for hierarchical setup Vivek Goyal
2009-07-02 20:01   ` [PATCH 08/25] io-controller: cgroup related changes for hierarchical group support Vivek Goyal
2009-07-02 20:01   ` [PATCH 09/25] io-controller: Common hierarchical fair queuing code in elevaotor layer Vivek Goyal
2009-07-02 20:01   ` [PATCH 10/25] io-controller: cfq changes to use " Vivek Goyal
2009-07-02 20:01   ` [PATCH 11/25] io-controller: Export disk time used and nr sectors dipatched through cgroups Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal [this message]
2009-07-02 20:01   ` [PATCH 13/25] io-controller: Wait for requests to complete from last queue before new queue is scheduled Vivek Goyal
2009-07-02 20:01   ` [PATCH 14/25] io-controller: Separate out queue and data Vivek Goyal
2009-07-02 20:01   ` [PATCH 15/25] io-conroller: Prepare elevator layer for single queue schedulers Vivek Goyal
2009-07-02 20:01   ` [PATCH 16/25] io-controller: noop changes for hierarchical fair queuing Vivek Goyal
2009-07-02 20:01   ` [PATCH 17/25] io-controller: deadline " Vivek Goyal
2009-07-02 20:01   ` [PATCH 18/25] io-controller: anticipatory " Vivek Goyal
2009-07-02 20:01   ` [PATCH 19/25] blkio_cgroup patches from Ryo to track async bios Vivek Goyal
2009-07-02 20:01   ` [PATCH 20/25] io-controller: map async requests to appropriate cgroup Vivek Goyal
2009-07-02 20:01   ` [PATCH 21/25] io-controller: Per cgroup request descriptor support Vivek Goyal
2009-07-02 20:01   ` [PATCH 22/25] io-controller: Per io group bdi congestion interface Vivek Goyal
2009-07-02 20:01   ` [PATCH 23/25] io-controller: Support per cgroup per device weights and io class Vivek Goyal
2009-07-02 20:01   ` [PATCH 24/25] io-controller: Debug hierarchical IO scheduling Vivek Goyal
2009-07-02 20:01   ` [PATCH 25/25] io-controller: experimental debug patch for async queue wait before expiry Vivek Goyal
2009-07-08  3:56   ` [RFC] IO scheduler based IO controller V6 Balbir Singh
2009-07-10  1:56   ` [PATCH] io-controller: implement per group request allocation limitation Gui Jianfeng
2009-07-27  2:10   ` [RFC] IO scheduler based IO controller V6 Gui Jianfeng
2009-07-02 20:01 ` [PATCH 13/25] io-controller: Wait for requests to complete from last queue before new queue is scheduled Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:09   ` Nauman Rafique
2009-07-02 20:09     ` Nauman Rafique
     [not found]     ` <e98e18940907021309u1f784b3at409b55ba46ed108c-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2009-07-02 20:17       ` Vivek Goyal
2009-07-02 20:17     ` Vivek Goyal
2009-07-02 20:17       ` Vivek Goyal
     [not found]   ` <1246564917-19603-14-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-02 20:09     ` Nauman Rafique
2009-07-02 20:01 ` [PATCH 14/25] io-controller: Separate out queue and data Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 15/25] io-conroller: Prepare elevator layer for single queue schedulers Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 16/25] io-controller: noop changes for hierarchical fair queuing Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 17/25] io-controller: deadline " Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 18/25] io-controller: anticipatory " Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 19/25] blkio_cgroup patches from Ryo to track async bios Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 20/25] io-controller: map async requests to appropriate cgroup Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
     [not found]   ` <1246564917-19603-21-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-08-03  2:13     ` Gui Jianfeng
2009-08-03  2:13   ` Gui Jianfeng
2009-08-03  2:13     ` Gui Jianfeng
     [not found]     ` <4A7647DA.5050607-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-08-04  1:25       ` Vivek Goyal
2009-08-04  1:25     ` Vivek Goyal
2009-08-04  1:25       ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 21/25] io-controller: Per cgroup request descriptor support Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-08  3:27   ` Gui Jianfeng
2009-07-08  3:27     ` Gui Jianfeng
     [not found]     ` <4A54121D.5090008-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-07-08 13:57       ` Vivek Goyal
2009-07-08 13:57     ` Vivek Goyal
2009-07-08 13:57       ` Vivek Goyal
     [not found]   ` <1246564917-19603-22-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-08  3:27     ` Gui Jianfeng
2009-07-21  5:37     ` Gui Jianfeng
2009-07-21  5:37   ` Gui Jianfeng
     [not found]     ` <4A655434.5060404-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-07-21  5:55       ` Nauman Rafique
2009-07-21  5:55     ` Nauman Rafique
2009-07-21  5:55       ` Nauman Rafique
2009-07-21 14:01       ` Vivek Goyal
2009-07-21 14:01         ` Vivek Goyal
     [not found]         ` <20090721140134.GB540-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-21 17:57           ` Nauman Rafique
2009-07-21 17:57         ` Nauman Rafique
2009-07-21 17:57           ` Nauman Rafique
     [not found]       ` <e98e18940907202255y5c7c546ei95d87e5a451ad0c2-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2009-07-21 14:01         ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 22/25] io-controller: Per io group bdi congestion interface Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-17  0:16   ` Munehiro Ikeda
2009-07-17  0:16     ` Munehiro Ikeda
2009-07-17 13:52     ` Vivek Goyal
2009-07-17 13:52       ` Vivek Goyal
     [not found]     ` <4A5FC2CA.1040609-MDRzhb/z0dd8UrSeD/g0lQ@public.gmane.org>
2009-07-17 13:52       ` Vivek Goyal
     [not found]   ` <1246564917-19603-23-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-17  0:16     ` Munehiro Ikeda
2009-07-02 20:01 ` [PATCH 23/25] io-controller: Support per cgroup per device weights and io class Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 24/25] io-controller: Debug hierarchical IO scheduling Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-02 20:01 ` [PATCH 25/25] io-controller: experimental debug patch for async queue wait before expiry Vivek Goyal
2009-07-02 20:01   ` Vivek Goyal
2009-07-08  3:56 ` [RFC] IO scheduler based IO controller V6 Balbir Singh
2009-07-08  3:56   ` Balbir Singh
     [not found]   ` <20090708035621.GB3215-SINUvgVNF2CyUtPGxGje5AC/G2K4zDHf@public.gmane.org>
2009-07-08 13:41     ` Vivek Goyal
2009-07-08 13:41   ` Vivek Goyal
2009-07-08 13:41     ` Vivek Goyal
     [not found]     ` <20090708134114.GA24048-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-08 14:39       ` Balbir Singh
2009-07-08 14:39     ` Balbir Singh
2009-07-08 14:39       ` Balbir Singh
     [not found]       ` <20090708143925.GE3215-SINUvgVNF2CyUtPGxGje5AC/G2K4zDHf@public.gmane.org>
2009-07-09  1:58         ` Vivek Goyal
2009-07-09  1:58       ` Vivek Goyal
2009-07-09  1:58         ` Vivek Goyal
2009-07-10  1:56 ` [PATCH] io-controller: implement per group request allocation limitation Gui Jianfeng
2009-07-10  1:56   ` Gui Jianfeng
2009-07-13 16:03   ` Vivek Goyal
2009-07-13 16:03     ` Vivek Goyal
2009-07-13 21:08     ` Munehiro Ikeda
2009-07-13 21:08       ` Munehiro Ikeda
2009-07-14  7:45       ` Gui Jianfeng
2009-07-14  7:45         ` Gui Jianfeng
2009-08-04  2:00         ` Munehiro Ikeda
2009-08-04  2:00           ` Munehiro Ikeda
     [not found]           ` <4A77964A.7040602-MDRzhb/z0dd8UrSeD/g0lQ@public.gmane.org>
2009-08-04  6:38             ` Gui Jianfeng
2009-08-04 22:37             ` Vivek Goyal
2009-08-04  6:38           ` Gui Jianfeng
2009-08-04  6:38             ` Gui Jianfeng
2009-08-04 22:37           ` Vivek Goyal
2009-08-04 22:37             ` Vivek Goyal
     [not found]         ` <4A5C377F.4040105-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-08-04  2:00           ` Munehiro Ikeda
     [not found]       ` <4A5BA238.3030902-MDRzhb/z0dd8UrSeD/g0lQ@public.gmane.org>
2009-07-14  7:45         ` Gui Jianfeng
     [not found]     ` <20090713160352.GA3714-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-13 21:08       ` Munehiro Ikeda
2009-07-14  7:37       ` Gui Jianfeng
2009-07-14  7:37         ` Gui Jianfeng
2009-08-04  2:02   ` Munehiro Ikeda
2009-08-04  2:02     ` Munehiro Ikeda
2009-08-04  6:41     ` Gui Jianfeng
2009-08-04  6:41       ` Gui Jianfeng
     [not found]     ` <4A7796D2.4030104-MDRzhb/z0dd8UrSeD/g0lQ@public.gmane.org>
2009-08-04  6:41       ` Gui Jianfeng
2009-08-04  2:04   ` Munehiro Ikeda
2009-08-04  2:04     ` Munehiro Ikeda
     [not found]     ` <4A779719.1070900-MDRzhb/z0dd8UrSeD/g0lQ@public.gmane.org>
2009-08-04  6:45       ` Gui Jianfeng
2009-08-04  6:45     ` Gui Jianfeng
2009-08-04  6:45       ` Gui Jianfeng
     [not found]   ` <4A569FC5.7090801-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-07-13 16:03     ` Vivek Goyal
2009-08-04  2:02     ` Munehiro Ikeda
2009-08-04  2:04     ` Munehiro Ikeda
2009-07-27  2:10 ` [RFC] IO scheduler based IO controller V6 Gui Jianfeng
     [not found]   ` <4A6D0C9A.3080600-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-07-27 12:55     ` Vivek Goyal
2009-07-27 12:55   ` Vivek Goyal
2009-07-27 12:55     ` Vivek Goyal
2009-07-28  3:27     ` Vivek Goyal
2009-07-28  3:27       ` Vivek Goyal
     [not found]       ` <20090728032712.GC3620-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-28  3:36         ` Gui Jianfeng
2009-07-28  3:36       ` Gui Jianfeng
2009-07-28  3:36         ` Gui Jianfeng
     [not found]     ` <20090727125503.GA24449-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-07-28  3:27       ` Vivek Goyal
2009-07-28 11:36       ` Gui Jianfeng
2009-07-29  9:07       ` Gui Jianfeng
2009-07-28 11:36     ` Gui Jianfeng
2009-07-29  9:07     ` Gui Jianfeng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='1246564917-19603-13-git-send-email-vgoyal__18765.2340079241$1246567800$gmane$org@redhat.com' \
    --to=vgoyal-h+wxahxf7alqt0dzr+alfa@public.gmane.org \
    --cc=agk-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org \
    --cc=containers-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org \
    --cc=dm-devel-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=dpshah-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org \
    --cc=jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org \
    --cc=linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=lizf-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org \
    --cc=nauman-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org \
    --cc=snitzer-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.