All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vivek Goyal <vgoyal@redhat.com>
To: nauman@google.com, dpshah@google.com, lizf@cn.fujitsu.com,
	mikew@google.com, fchecconi@gmail.com, paolo.valente@unimore.it,
	jens.axboe@oracle.com, ryov@valinux.co.jp,
	fernando@oss.ntt.co.jp, s-uchida@ap.jp.nec.com,
	taka@valinux.co.jp, guijianfeng@cn.fujitsu.com,
	jmoyer@redhat.com, dhaval@linux.vnet.ibm.com,
	balbir@linux.vnet.ibm.com, linux-kernel@vger.kernel.org,
	containers@lists.linux-foundation.org, righi.andrea@gmail.com,
	agk@redhat.com, dm-devel@redhat.com, snitzer@redhat.com,
	m-ikeda@ds.jp.nec.com
Cc: vgoyal@redhat.com, akpm@linux-foundation.org
Subject: [PATCH 17/18] io-controller: IO group refcounting support
Date: Tue,  5 May 2009 15:58:44 -0400	[thread overview]
Message-ID: <1241553525-28095-18-git-send-email-vgoyal@redhat.com> (raw)
In-Reply-To: <1241553525-28095-1-git-send-email-vgoyal@redhat.com>

o In the original BFQ patch once a cgroup is being deleted, it will clean
  up the associated io groups immediately and if there are any active io
  queues with that group, these will be moved to root group. This movement
  of queues is not good from fairness perspective as one can then create
  a cgroup, dump lots of IO and then delete the cgroup and then potentially
  get higher share. Apart from there are more issues hence it was felt that
  we need a io group refcounting mechanism also so that io group can be
  reclaimed asynchronously.

o This is a crude patch to implement io group refcounting. This is still
  work in progress and Nauman and Divyesh are playing with more ideas.

o I can do basic cgroup creation, deletion, task movement operations and
  there are no crashes (As was reported with V1 by Gui). Though I have not
  verified that io groups are actually being freed. Will do it next.

o There are couple of hard to hit race conditions I am aware of. Will fix
  that in upcoming versions. (RCU lookup when group might be going away
  during cgroup deletion).

Signed-off-by: Nauman Rafique <nauman@google.com>
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
---
 block/cfq-iosched.c |   16 ++-
 block/elevator-fq.c |  441 ++++++++++++++++++++++++++++++++++-----------------
 block/elevator-fq.h |   26 ++--
 3 files changed, 320 insertions(+), 163 deletions(-)

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ea71239..cf9d258 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1308,8 +1308,17 @@ static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
 
 	if (sync_cfqq != NULL) {
 		__iog = cfqq_to_io_group(sync_cfqq);
-		if (iog != __iog)
-			io_ioq_move(q->elevator, sync_cfqq->ioq, iog);
+		/*
+		 * Drop reference to sync queue. A new queue sync queue will
+		 * be assigned in new group upon arrival of a fresh request.
+		 * If old queue has got requests, those reuests will be
+		 * dispatched over a period of time and queue will be freed
+		 * automatically.
+		 */
+		if (iog != __iog) {
+			cic_set_cfqq(cic, NULL, 1);
+			cfq_put_queue(sync_cfqq);
+		}
 	}
 
 	spin_unlock_irqrestore(q->queue_lock, flags);
@@ -1422,6 +1431,9 @@ alloc_ioq:
 			elv_mark_ioq_sync(cfqq->ioq);
 		}
 		cfqq->pid = current->pid;
+
+		/* ioq reference on iog */
+		elv_get_iog(iog);
 		cfq_log_cfqq(cfqd, cfqq, "alloced");
 	}
 
diff --git a/block/elevator-fq.c b/block/elevator-fq.c
index bd98317..1dd0bb3 100644
--- a/block/elevator-fq.c
+++ b/block/elevator-fq.c
@@ -36,7 +36,7 @@ static inline struct io_queue *elv_close_cooperator(struct request_queue *q,
 					struct io_queue *ioq, int probe);
 struct io_entity *bfq_lookup_next_entity(struct io_sched_data *sd,
 						 int extract);
-void elv_release_ioq(struct elevator_queue *eq, struct io_queue **ioq_ptr);
+void elv_release_ioq(struct io_queue **ioq_ptr);
 int elv_iosched_expire_ioq(struct request_queue *q, int slice_expired,
 					int force);
 
@@ -108,6 +108,16 @@ static inline void bfq_check_next_active(struct io_sched_data *sd,
 {
 	BUG_ON(sd->next_active != entity);
 }
+
+static inline struct io_group *io_entity_to_iog(struct io_entity *entity)
+{
+	struct io_group *iog = NULL;
+
+	BUG_ON(entity == NULL);
+	if (entity->my_sched_data != NULL)
+		iog = container_of(entity, struct io_group, entity);
+	return iog;
+}
 #else /* GROUP_IOSCHED */
 #define for_each_entity(entity)	\
 	for (; entity != NULL; entity = NULL)
@@ -124,6 +134,11 @@ static inline void bfq_check_next_active(struct io_sched_data *sd,
 					 struct io_entity *entity)
 {
 }
+
+static inline struct io_group *io_entity_to_iog(struct io_entity *entity)
+{
+	return NULL;
+}
 #endif
 
 /*
@@ -224,7 +239,6 @@ static void bfq_idle_extract(struct io_service_tree *st,
 				struct io_entity *entity)
 {
 	struct rb_node *next;
-	struct io_queue *ioq = io_entity_to_ioq(entity);
 
 	BUG_ON(entity->tree != &st->idle);
 
@@ -239,10 +253,6 @@ static void bfq_idle_extract(struct io_service_tree *st,
 	}
 
 	bfq_extract(&st->idle, entity);
-
-	/* Delete queue from idle list */
-	if (ioq)
-		list_del(&ioq->queue_list);
 }
 
 /**
@@ -374,9 +384,12 @@ static void bfq_active_insert(struct io_service_tree *st,
 void bfq_get_entity(struct io_entity *entity)
 {
 	struct io_queue *ioq = io_entity_to_ioq(entity);
+	struct io_group *iog = io_entity_to_iog(entity);
 
 	if (ioq)
 		elv_get_ioq(ioq);
+	else
+		elv_get_iog(iog);
 }
 
 /**
@@ -436,7 +449,6 @@ static void bfq_idle_insert(struct io_service_tree *st,
 {
 	struct io_entity *first_idle = st->first_idle;
 	struct io_entity *last_idle = st->last_idle;
-	struct io_queue *ioq = io_entity_to_ioq(entity);
 
 	if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
 		st->first_idle = entity;
@@ -444,10 +456,6 @@ static void bfq_idle_insert(struct io_service_tree *st,
 		st->last_idle = entity;
 
 	bfq_insert(&st->idle, entity);
-
-	/* Add this queue to idle list */
-	if (ioq)
-		list_add(&ioq->queue_list, &ioq->efqd->idle_list);
 }
 
 /**
@@ -463,14 +471,21 @@ static void bfq_forget_entity(struct io_service_tree *st,
 				struct io_entity *entity)
 {
 	struct io_queue *ioq = NULL;
+	struct io_group *iog = NULL;
 
 	BUG_ON(!entity->on_st);
 	entity->on_st = 0;
 	st->wsum -= entity->weight;
+
 	ioq = io_entity_to_ioq(entity);
-	if (!ioq)
+	if (ioq) {
+		elv_put_ioq(ioq);
 		return;
-	elv_put_ioq(ioq);
+	}
+
+	iog = io_entity_to_iog(entity);
+	if (iog)
+		elv_put_iog(iog);
 }
 
 /**
@@ -909,21 +924,21 @@ void entity_served(struct io_entity *entity, bfq_service_t served,
 /*
  * Release all the io group references to its async queues.
  */
-void io_put_io_group_queues(struct elevator_queue *e, struct io_group *iog)
+void io_put_io_group_queues(struct io_group *iog)
 {
 	int i, j;
 
 	for (i = 0; i < 2; i++)
 		for (j = 0; j < IOPRIO_BE_NR; j++)
-			elv_release_ioq(e, &iog->async_queue[i][j]);
+			elv_release_ioq(&iog->async_queue[i][j]);
 
 	/* Free up async idle queue */
-	elv_release_ioq(e, &iog->async_idle_queue);
+	elv_release_ioq(&iog->async_idle_queue);
 
 #ifdef CONFIG_GROUP_IOSCHED
 	/* Optimization for io schedulers having single ioq */
-	if (elv_iosched_single_ioq(e))
-		elv_release_ioq(e, &iog->ioq);
+	if (iog->ioq)
+		elv_release_ioq(&iog->ioq);
 #endif
 }
 
@@ -1018,6 +1033,9 @@ void io_group_set_parent(struct io_group *iog, struct io_group *parent)
 	entity = &iog->entity;
 	entity->parent = parent->my_entity;
 	entity->sched_data = &parent->sched_data;
+	if (entity->parent)
+		/* Child group reference on parent group */
+		elv_get_iog(parent);
 }
 
 /**
@@ -1210,6 +1228,9 @@ struct io_group *io_group_chain_alloc(struct request_queue *q, void *key,
 		if (!iog)
 			goto cleanup;
 
+		atomic_set(&iog->ref, 0);
+		iog->deleting = 0;
+
 		io_group_init_entity(iocg, iog);
 		iog->my_entity = &iog->entity;
 
@@ -1279,7 +1300,12 @@ void io_group_chain_link(struct request_queue *q, void *key,
 
 		rcu_assign_pointer(leaf->key, key);
 		hlist_add_head_rcu(&leaf->group_node, &iocg->group_data);
+		/* io_cgroup reference on io group */
+		elv_get_iog(leaf);
+
 		hlist_add_head(&leaf->elv_data_node, &efqd->group_list);
+		/* elevator reference on io group */
+		elv_get_iog(leaf);
 
 		spin_unlock_irqrestore(&iocg->lock, flags);
 
@@ -1388,12 +1414,23 @@ struct io_cgroup *get_iocg_from_bio(struct bio *bio)
 	if (!iocg)
 		return &io_root_cgroup;
 
+	/*
+	 * If this cgroup io_cgroup is being deleted, map the bio to
+	 * root cgroup
+	 */
+	if (css_is_removed(&iocg->css))
+		return &io_root_cgroup;
+
 	return iocg;
 }
 
 /*
  * Find the io group bio belongs to.
  * If "create" is set, io group is created if it is not already present.
+ *
+ * Note: There is a narrow window of race where a group is being freed
+ * by cgroup deletion path and some rq has slipped through in this group.
+ * Fix it.
  */
 struct io_group *io_get_io_group_bio(struct request_queue *q, struct bio *bio,
 					int create)
@@ -1440,8 +1477,8 @@ void io_free_root_group(struct elevator_queue *e)
 	spin_lock_irq(&iocg->lock);
 	hlist_del_rcu(&iog->group_node);
 	spin_unlock_irq(&iocg->lock);
-	io_put_io_group_queues(e, iog);
-	kfree(iog);
+	io_put_io_group_queues(iog);
+	elv_put_iog(iog);
 }
 
 struct io_group *io_alloc_root_group(struct request_queue *q,
@@ -1459,11 +1496,15 @@ struct io_group *io_alloc_root_group(struct request_queue *q,
 	for (i = 0; i < IO_IOPRIO_CLASSES; i++)
 		iog->sched_data.service_tree[i] = IO_SERVICE_TREE_INIT;
 
+	atomic_set(&iog->ref, 0);
+
 	blk_init_request_list(&iog->rl);
 
 	iocg = &io_root_cgroup;
 	spin_lock_irq(&iocg->lock);
 	rcu_assign_pointer(iog->key, key);
+	/* elevator reference. */
+	elv_get_iog(iog);
 	hlist_add_head_rcu(&iog->group_node, &iocg->group_data);
 	spin_unlock_irq(&iocg->lock);
 
@@ -1560,105 +1601,109 @@ void iocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
 }
 
 /*
- * Move the queue to the root group if it is active. This is needed when
- * a cgroup is being deleted and all the IO is not done yet. This is not
- * very good scheme as a user might get unfair share. This needs to be
- * fixed.
+ * check whether a given group has got any active entities on any of the
+ * service tree.
  */
-void io_ioq_move(struct elevator_queue *e, struct io_queue *ioq,
-				struct io_group *iog)
+static inline int io_group_has_active_entities(struct io_group *iog)
 {
-	int busy, resume;
-	struct io_entity *entity = &ioq->entity;
-	struct elv_fq_data *efqd = &e->efqd;
-	struct io_service_tree *st = io_entity_service_tree(entity);
+	int i;
+	struct io_service_tree *st;
 
-	busy = elv_ioq_busy(ioq);
-	resume = !!ioq->nr_queued;
+	for (i = 0; i < IO_IOPRIO_CLASSES; i++) {
+		st = iog->sched_data.service_tree + i;
+		if (!RB_EMPTY_ROOT(&st->active))
+			return 1;
+	}
 
-	BUG_ON(resume && !entity->on_st);
-	BUG_ON(busy && !resume && entity->on_st && ioq != efqd->active_queue);
+	return 0;
+}
+
+/*
+ * Should be called with both iocg->lock as well as queue lock held (if
+ * group is still connected on elevator list)
+ */
+void __iocg_destroy(struct io_cgroup *iocg, struct io_group *iog,
+				int queue_lock_held)
+{
+	int i;
+	struct io_service_tree *st;
 
 	/*
-	 * We could be moving an queue which is on idle tree of previous group
-	 * What to do? I guess anyway this queue does not have any requests.
-	 * just forget the entity and free up from idle tree.
-	 *
-	 * This needs cleanup. Hackish.
+	 * If we are here then we got the queue lock if group was still on
+	 * elevator list. If group had already been disconnected from elevator
+	 * list, then we don't need the queue lock.
 	 */
-	if (entity->tree == &st->idle) {
-		BUG_ON(atomic_read(&ioq->ref) < 2);
-		bfq_put_idle_entity(st, entity);
-	}
 
-	if (busy) {
-		BUG_ON(atomic_read(&ioq->ref) < 2);
-
-		if (!resume)
-			elv_del_ioq_busy(e, ioq, 0);
-		else
-			elv_deactivate_ioq(efqd, ioq, 0);
-	}
+	/* Remove io group from cgroup list */
+	hlist_del(&iog->group_node);
 
 	/*
-	 * Here we use a reference to bfqg.  We don't need a refcounter
-	 * as the cgroup reference will not be dropped, so that its
-	 * destroy() callback will not be invoked.
+	 * Mark io group for deletion so that no new entry goes in
+	 * idle tree. Any active queue will be removed from active
+	 * tree and not put in to idle tree.
 	 */
-	entity->parent = iog->my_entity;
-	entity->sched_data = &iog->sched_data;
+	iog->deleting = 1;
 
-	if (busy && resume)
-		elv_activate_ioq(ioq, 0);
-}
-EXPORT_SYMBOL(io_ioq_move);
+	/* Flush idle tree.  */
+	for (i = 0; i < IO_IOPRIO_CLASSES; i++) {
+		st = iog->sched_data.service_tree + i;
+		io_flush_idle_tree(st);
+	}
 
-static void __io_destroy_group(struct elv_fq_data *efqd, struct io_group *iog)
-{
-	struct elevator_queue *eq;
-	struct io_entity *entity = iog->my_entity;
-	struct io_service_tree *st;
-	int i;
+	/*
+	 * Drop io group reference on all async queues. This group is
+	 * going away so once these queues are empty, free those up
+	 * instead of keeping these around in the hope that new IO
+	 * will come.
+	 *
+	 * Note: If this group is disconnected from elevator, elevator
+	 * switch must have already done it.
+	 */
 
-	eq = container_of(efqd, struct elevator_queue, efqd);
-	hlist_del(&iog->elv_data_node);
-	__bfq_deactivate_entity(entity, 0);
-	io_put_io_group_queues(eq, iog);
+	io_put_io_group_queues(iog);
 
-	for (i = 0; i < IO_IOPRIO_CLASSES; i++) {
-		st = iog->sched_data.service_tree + i;
+	if (!io_group_has_active_entities(iog)) {
+		/*
+		 * io group does not have any active entites. Because this
+		 * group has been decoupled from io_cgroup list and this
+		 * cgroup is being deleted, this group should not receive
+		 * any new IO. Hence it should be safe to deactivate this
+		 * io group and remove from the scheduling tree.
+		 */
+		__bfq_deactivate_entity(iog->my_entity, 0);
 
 		/*
-		 * The idle tree may still contain bfq_queues belonging
-		 * to exited task because they never migrated to a different
-		 * cgroup from the one being destroyed now.  Noone else
-		 * can access them so it's safe to act without any lock.
+		 * Because this io group does not have any active entities,
+		 * it should be safe to remove it from elevator list and
+		 * drop elvator reference so that upon dropping io_cgroup
+		 * reference, this io group should be freed and we don't
+		 * wait for elevator switch to happen to free the group
+		 * up.
 		 */
-		io_flush_idle_tree(st);
+		if (queue_lock_held) {
+			hlist_del(&iog->elv_data_node);
+			rcu_assign_pointer(iog->key, NULL);
+			/*
+			 * Drop iog reference taken by elevator
+			 * (efqd->group_list)
+			 */
+			elv_put_iog(iog);
+		}
 
-		BUG_ON(!RB_EMPTY_ROOT(&st->active));
-		BUG_ON(!RB_EMPTY_ROOT(&st->idle));
 	}
 
-	BUG_ON(iog->sched_data.next_active != NULL);
-	BUG_ON(iog->sched_data.active_entity != NULL);
-	BUG_ON(entity->tree != NULL);
+	/* Drop iocg reference on io group */
+	elv_put_iog(iog);
 }
 
-/**
- * bfq_destroy_group - destroy @bfqg.
- * @bgrp: the bfqio_cgroup containing @bfqg.
- * @bfqg: the group being destroyed.
- *
- * Destroy @bfqg, making sure that it is not referenced from its parent.
- */
-static void io_destroy_group(struct io_cgroup *iocg, struct io_group *iog)
+void iocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
 {
-	struct elv_fq_data *efqd = NULL;
-	unsigned long uninitialized_var(flags);
-
-	/* Remove io group from cgroup list */
-	hlist_del(&iog->group_node);
+	struct io_cgroup *iocg = cgroup_to_io_cgroup(cgroup);
+	struct hlist_node *n, *tmp;
+	struct io_group *iog;
+	unsigned long flags;
+	int queue_lock_held = 0;
+	struct elv_fq_data *efqd;
 
 	/*
 	 * io groups are linked in two lists. One list is maintained
@@ -1677,58 +1722,93 @@ static void io_destroy_group(struct io_cgroup *iocg, struct io_group *iog)
 	 * try to free up async queues again or flush the idle tree.
 	 */
 
-	rcu_read_lock();
-	efqd = rcu_dereference(iog->key);
-	if (efqd != NULL) {
-		spin_lock_irqsave(efqd->queue->queue_lock, flags);
-		if (iog->key == efqd)
-			__io_destroy_group(efqd, iog);
-		spin_unlock_irqrestore(efqd->queue->queue_lock, flags);
-	}
-	rcu_read_unlock();
-
-	/*
-	 * No need to defer the kfree() to the end of the RCU grace
-	 * period: we are called from the destroy() callback of our
-	 * cgroup, so we can be sure that noone is a) still using
-	 * this cgroup or b) doing lookups in it.
-	 */
-	kfree(iog);
-}
+retry:
+	spin_lock_irqsave(&iocg->lock, flags);
+	hlist_for_each_entry_safe(iog, n, tmp, &iocg->group_data, group_node) {
+		/* Take the group queue lock */
+		rcu_read_lock();
+		efqd = rcu_dereference(iog->key);
+		if (efqd != NULL) {
+			if (spin_trylock_irq(efqd->queue->queue_lock)) {
+				if (iog->key == efqd) {
+					queue_lock_held = 1;
+					rcu_read_unlock();
+					goto locked;
+				}
 
-void iocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
-{
-	struct io_cgroup *iocg = cgroup_to_io_cgroup(cgroup);
-	struct hlist_node *n, *tmp;
-	struct io_group *iog;
+				/*
+				 * After acquiring the queue lock, we found
+				 * iog->key==NULL, that means elevator switch
+				 * completed, group is no longer connected on
+				 * elevator hence we can proceed safely without
+				 * queue lock.
+				 */
+				spin_unlock_irq(efqd->queue->queue_lock);
+			} else {
+				/*
+				 * Did not get the queue lock while trying.
+				 * Backout. Drop iocg->lock and try again
+				 */
+				rcu_read_unlock();
+				spin_unlock_irqrestore(&iocg->lock, flags);
+				udelay(100);
+				goto retry;
 
-	/*
-	 * Since we are destroying the cgroup, there are no more tasks
-	 * referencing it, and all the RCU grace periods that may have
-	 * referenced it are ended (as the destruction of the parent
-	 * cgroup is RCU-safe); bgrp->group_data will not be accessed by
-	 * anything else and we don't need any synchronization.
-	 */
-	hlist_for_each_entry_safe(iog, n, tmp, &iocg->group_data, group_node)
-		io_destroy_group(iocg, iog);
+			}
+		}
+		/*
+		 * We come here when iog->key==NULL, that means elevator switch
+		 * has already taken place and now this group is no more
+		 * connected on elevator and one does not have to have a
+		 * queue lock to do the cleanup.
+		 */
+		rcu_read_unlock();
+locked:
+		__iocg_destroy(iocg, iog, queue_lock_held);
+		if (queue_lock_held) {
+			spin_unlock_irq(efqd->queue->queue_lock);
+			queue_lock_held = 0;
+		}
+	}
+	spin_unlock_irqrestore(&iocg->lock, flags);
 
 	BUG_ON(!hlist_empty(&iocg->group_data));
 
 	kfree(iocg);
 }
 
+/* Should be called with queue lock held */
 void io_disconnect_groups(struct elevator_queue *e)
 {
 	struct hlist_node *pos, *n;
 	struct io_group *iog;
 	struct elv_fq_data *efqd = &e->efqd;
+	int i;
+	struct io_service_tree *st;
 
 	hlist_for_each_entry_safe(iog, pos, n, &efqd->group_list,
 					elv_data_node) {
-		hlist_del(&iog->elv_data_node);
-
+		/*
+		 * At this point of time group should be on idle tree. This
+		 * would extract the group from idle tree.
+		 */
 		__bfq_deactivate_entity(iog->my_entity, 0);
 
+		/* Flush all the idle trees of the group */
+		for (i = 0; i < IO_IOPRIO_CLASSES; i++) {
+			st = iog->sched_data.service_tree + i;
+			io_flush_idle_tree(st);
+		}
+
+		/*
+		 * This has to be here also apart from cgroup cleanup path
+		 * and the reason being that if async queue reference of the
+		 * group are not dropped, then async ioq as well as associated
+		 * queue will not be reclaimed. Apart from that async cfqq
+		 * has to be cleaned up before elevator goes away.
+		 */
+		io_put_io_group_queues(iog);
+
 		/*
 		 * Don't remove from the group hash, just set an
 		 * invalid key.  No lookups can race with the
@@ -1736,11 +1816,68 @@ void io_disconnect_groups(struct elevator_queue *e)
 		 * implies also that new elements cannot be added
 		 * to the list.
 		 */
+		hlist_del(&iog->elv_data_node);
 		rcu_assign_pointer(iog->key, NULL);
-		io_put_io_group_queues(e, iog);
+		/* Drop iog reference taken by elevator (efqd->group_list)*/
+		elv_put_iog(iog);
 	}
 }
 
+/*
+ * This cleanup function is does the last bit of things to destroy cgroup.
+   It should only get called after io_destroy_group has been invoked.
+ */
+void io_group_cleanup(struct io_group *iog)
+{
+	struct io_service_tree *st;
+	struct io_entity *entity = iog->my_entity;
+	int i;
+
+	for (i = 0; i < IO_IOPRIO_CLASSES; i++) {
+		st = iog->sched_data.service_tree + i;
+
+		BUG_ON(!RB_EMPTY_ROOT(&st->active));
+		BUG_ON(!RB_EMPTY_ROOT(&st->idle));
+		BUG_ON(st->wsum != 0);
+	}
+
+	BUG_ON(iog->sched_data.next_active != NULL);
+	BUG_ON(iog->sched_data.active_entity != NULL);
+	BUG_ON(entity != NULL && entity->tree != NULL);
+
+	kfree(iog);
+}
+
+/*
+ * Should be called with queue lock held. The only case it can be called
+ * without queue lock held is when elevator has gone away leaving behind
+ * dead io groups which are hanging there to be reclaimed when cgroup is
+ * deleted. In case of cgroup deletion, I think there is only one thread
+ * doing deletion and rest of the threads should have been taken care by
+ * cgroup stuff.
+ */
+void elv_put_iog(struct io_group *iog)
+{
+	struct io_group *parent = NULL;
+
+	BUG_ON(!iog);
+
+	BUG_ON(atomic_read(&iog->ref) <= 0);
+	if (!atomic_dec_and_test(&iog->ref))
+		return;
+
+	BUG_ON(iog->entity.on_st);
+
+	if (iog->my_entity)
+		parent = container_of(iog->my_entity->parent,
+				      struct io_group, entity);
+	io_group_cleanup(iog);
+
+	if (parent)
+		elv_put_iog(parent);
+}
+EXPORT_SYMBOL(elv_put_iog);
+
 struct cgroup_subsys io_subsys = {
 	.name = "io",
 	.create = iocg_create,
@@ -1887,6 +2024,8 @@ alloc_ioq:
 		elv_init_ioq(e, ioq, rq->iog, sched_q, IOPRIO_CLASS_BE, 4, 1);
 		io_group_set_ioq(iog, ioq);
 		elv_mark_ioq_sync(ioq);
+		/* ioq reference on iog */
+		elv_get_iog(iog);
 	}
 
 	if (new_sched_q)
@@ -1987,7 +2126,7 @@ EXPORT_SYMBOL(io_get_io_group_bio);
 void io_free_root_group(struct elevator_queue *e)
 {
 	struct io_group *iog = e->efqd.root_group;
-	io_put_io_group_queues(e, iog);
+	io_put_io_group_queues(iog);
 	kfree(iog);
 }
 
@@ -2437,13 +2576,11 @@ void elv_put_ioq(struct io_queue *ioq)
 }
 EXPORT_SYMBOL(elv_put_ioq);
 
-void elv_release_ioq(struct elevator_queue *e, struct io_queue **ioq_ptr)
+void elv_release_ioq(struct io_queue **ioq_ptr)
 {
-	struct io_group *root_group = e->efqd.root_group;
 	struct io_queue *ioq = *ioq_ptr;
 
 	if (ioq != NULL) {
-		io_ioq_move(e, ioq, root_group);
 		/* Drop the reference taken by the io group */
 		elv_put_ioq(ioq);
 		*ioq_ptr = NULL;
@@ -2600,9 +2737,19 @@ void elv_activate_ioq(struct io_queue *ioq, int add_front)
 void elv_deactivate_ioq(struct elv_fq_data *efqd, struct io_queue *ioq,
 					int requeue)
 {
+	struct io_group *iog = ioq_to_io_group(ioq);
+
 	if (ioq == efqd->active_queue)
 		elv_reset_active_ioq(efqd);
 
+	/*
+	 * The io group ioq belongs to is going away. Don't requeue the
+	 * ioq on idle tree. Free it.
+	 */
+#ifdef CONFIG_GROUP_IOSCHED
+	if (iog->deleting == 1)
+		requeue = 0;
+#endif
 	bfq_deactivate_entity(&ioq->entity, requeue);
 }
 
@@ -3002,15 +3149,6 @@ void elv_ioq_arm_slice_timer(struct request_queue *q, int wait_for_busy)
 	}
 }
 
-void elv_free_idle_ioq_list(struct elevator_queue *e)
-{
-	struct io_queue *ioq, *n;
-	struct elv_fq_data *efqd = &e->efqd;
-
-	list_for_each_entry_safe(ioq, n, &efqd->idle_list, queue_list)
-		elv_deactivate_ioq(efqd, ioq, 0);
-}
-
 /*
  * Call iosched to let that elevator wants to expire the queue. This gives
  * iosched like AS to say no (if it is in the middle of batch changeover or
@@ -3427,7 +3565,6 @@ int elv_init_fq_data(struct request_queue *q, struct elevator_queue *e)
 
 	INIT_WORK(&efqd->unplug_work, elv_kick_queue);
 
-	INIT_LIST_HEAD(&efqd->idle_list);
 	INIT_HLIST_HEAD(&efqd->group_list);
 
 	efqd->elv_slice[0] = elv_slice_async;
@@ -3458,9 +3595,19 @@ void elv_exit_fq_data(struct elevator_queue *e)
 	elv_shutdown_timer_wq(e);
 
 	spin_lock_irq(q->queue_lock);
-	/* This should drop all the idle tree references of ioq */
-	elv_free_idle_ioq_list(e);
-	/* This should drop all the io group references of async queues */
+	/*
+	 * This should drop all the references of async queues taken by
+	 * io group.
+	 *
+	 * Also should should deactivate the group and extract from the
+	 * idle tree. (group can not be on active tree now after the
+	 * elevator has been drained).
+	 *
+	 * Should flush idle tree of the group which inturn will drop
+	 * ioq reference taken by active/idle tree.
+	 *
+	 * Drop the iog reference taken by elevator.
+	 */
 	io_disconnect_groups(e);
 	spin_unlock_irq(q->queue_lock);
 
diff --git a/block/elevator-fq.h b/block/elevator-fq.h
index 58543ec..42e3777 100644
--- a/block/elevator-fq.h
+++ b/block/elevator-fq.h
@@ -165,7 +165,6 @@ struct io_queue {
 
 	/* Pointer to generic elevator data structure */
 	struct elv_fq_data *efqd;
-	struct list_head queue_list;
 	pid_t pid;
 
 	/* Number of requests queued on this io queue */
@@ -219,6 +218,7 @@ struct io_queue {
  *    o All the other fields are protected by the @bfqd queue lock.
  */
 struct io_group {
+	atomic_t ref;
 	struct io_entity entity;
 	struct hlist_node elv_data_node;
 	struct hlist_node group_node;
@@ -242,6 +242,9 @@ struct io_group {
 
 	/* request list associated with the group */
 	struct request_list rl;
+
+	/* io group is going away */
+	int deleting;
 };
 
 /**
@@ -279,9 +282,6 @@ struct elv_fq_data {
 	/* List of io groups hanging on this elevator */
 	struct hlist_head group_list;
 
-	/* List of io queues on idle tree. */
-	struct list_head idle_list;
-
 	struct request_queue *queue;
 	unsigned int busy_queues;
 	/*
@@ -504,8 +504,6 @@ static inline struct io_group *ioq_to_io_group(struct io_queue *ioq)
 
 #ifdef CONFIG_GROUP_IOSCHED
 extern int io_group_allow_merge(struct request *rq, struct bio *bio);
-extern void io_ioq_move(struct elevator_queue *e, struct io_queue *ioq,
-					struct io_group *iog);
 extern void elv_fq_set_request_io_group(struct request_queue *q,
 					struct request *rq, struct bio *bio);
 static inline bfq_weight_t iog_weight(struct io_group *iog)
@@ -523,6 +521,8 @@ extern struct io_queue *elv_lookup_ioq_bio(struct request_queue *q,
 extern struct request_list *io_group_get_request_list(struct request_queue *q,
 						struct bio *bio);
 
+extern void elv_put_iog(struct io_group *iog);
+
 /* Returns single ioq associated with the io group. */
 static inline struct io_queue *io_group_ioq(struct io_group *iog)
 {
@@ -545,17 +545,12 @@ static inline struct io_group *rq_iog(struct request_queue *q,
 	return rq->iog;
 }
 
-#else /* !GROUP_IOSCHED */
-/*
- * No ioq movement is needed in case of flat setup. root io group gets cleaned
- * up upon elevator exit and before that it has been made sure that both
- * active and idle tree are empty.
- */
-static inline void io_ioq_move(struct elevator_queue *e, struct io_queue *ioq,
-					struct io_group *iog)
+static inline void elv_get_iog(struct io_group *iog)
 {
+	atomic_inc(&iog->ref);
 }
 
+#else /* !GROUP_IOSCHED */
 static inline int io_group_allow_merge(struct request *rq, struct bio *bio)
 {
 	return 1;
@@ -608,6 +603,9 @@ static inline struct io_queue *elv_lookup_ioq_bio(struct request_queue *q,
 	return NULL;
 }
 
+static inline void elv_get_iog(struct io_group *iog) { }
+
+static inline void elv_put_iog(struct io_group *iog) { }
 
 extern struct io_group *rq_iog(struct request_queue *q, struct request *rq);
 
-- 
1.6.0.1


  parent reply	other threads:[~2009-05-05 20:04 UTC|newest]

Thread overview: 295+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-05-05 19:58 IO scheduler based IO Controller V2 Vivek Goyal
2009-05-05 19:58 ` [PATCH 01/18] io-controller: Documentation Vivek Goyal
2009-05-06  3:16   ` Gui Jianfeng
     [not found]     ` <4A0100F4.4040400-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-06 13:31       ` Vivek Goyal
2009-05-06 13:31     ` Vivek Goyal
     [not found]   ` <1241553525-28095-2-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-06  3:16     ` Gui Jianfeng
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 02/18] io-controller: Common flat fair queuing code in elevaotor layer Vivek Goyal
2009-05-05 19:58 ` [PATCH 03/18] io-controller: Charge for time slice based on average disk rate Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 04/18] io-controller: Modify cfq to make use of flat elevator fair queuing Vivek Goyal
     [not found]   ` <1241553525-28095-5-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-22  8:54     ` Gui Jianfeng
2009-05-22  8:54   ` Gui Jianfeng
     [not found]     ` <4A166829.6070608-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-22 12:33       ` Vivek Goyal
2009-05-22 12:33     ` Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 05/18] io-controller: Common hierarchical fair queuing code in elevaotor layer Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-07  7:42   ` Gui Jianfeng
2009-05-07  8:05     ` Li Zefan
     [not found]     ` <4A0290ED.7080506-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-07  8:05       ` Li Zefan
2009-05-08 12:45       ` Vivek Goyal
2009-05-08 12:45     ` Vivek Goyal
     [not found]   ` <1241553525-28095-6-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-07  7:42     ` Gui Jianfeng
2009-05-08 21:09     ` Andrea Righi
2009-05-08 21:09   ` Andrea Righi
2009-05-08 21:17     ` Vivek Goyal
2009-05-08 21:17     ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 06/18] io-controller: cfq changes to use " Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 07/18] io-controller: Export disk time used and nr sectors dipatched through cgroups Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-13  2:39   ` Gui Jianfeng
     [not found]     ` <4A0A32CB.4020609-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-13 14:51       ` Vivek Goyal
2009-05-13 14:51     ` Vivek Goyal
2009-05-14  7:53       ` Gui Jianfeng
     [not found]       ` <20090513145127.GB7696-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-14  7:53         ` Gui Jianfeng
     [not found]   ` <1241553525-28095-8-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-13  2:39     ` Gui Jianfeng
2009-05-05 19:58 ` [PATCH 08/18] io-controller: idle for sometime on sync queue before expiring it Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-13 15:00   ` Vivek Goyal
2009-05-13 15:00   ` Vivek Goyal
2009-06-09  7:56   ` Gui Jianfeng
2009-06-09 17:51     ` Vivek Goyal
2009-06-09 17:51       ` Vivek Goyal
     [not found]       ` <20090609175131.GB13476-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-06-10  1:30         ` Gui Jianfeng
2009-06-10  1:30       ` Gui Jianfeng
2009-06-10  1:30         ` Gui Jianfeng
2009-06-10 13:26         ` Vivek Goyal
2009-06-10 13:26           ` Vivek Goyal
2009-06-11  1:22           ` Gui Jianfeng
2009-06-11  1:22             ` Gui Jianfeng
     [not found]           ` <20090610132638.GB19680-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-06-11  1:22             ` Gui Jianfeng
     [not found]         ` <4A2F0CBE.8030208-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-06-10 13:26           ` Vivek Goyal
     [not found]     ` <4A2E15B6.8030001-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-06-09 17:51       ` Vivek Goyal
     [not found]   ` <1241553525-28095-9-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-13 15:00     ` Vivek Goyal
2009-06-09  7:56     ` Gui Jianfeng
2009-05-05 19:58 ` [PATCH 09/18] io-controller: Separate out queue and data Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 10/18] io-conroller: Prepare elevator layer for single queue schedulers Vivek Goyal
2009-05-05 19:58 ` [PATCH 11/18] io-controller: noop changes for hierarchical fair queuing Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 12/18] io-controller: deadline " Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 13/18] io-controller: anticipatory " Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 14/18] blkio_cgroup patches from Ryo to track async bios Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 15/18] io-controller: map async requests to appropriate cgroup Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 16/18] io-controller: Per cgroup request descriptor support Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 17/18] io-controller: IO group refcounting support Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal [this message]
     [not found]   ` <1241553525-28095-18-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-08  2:59     ` Gui Jianfeng
2009-05-08  2:59       ` Gui Jianfeng
2009-05-08 12:44       ` Vivek Goyal
     [not found]       ` <4A03A013.9000405-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-08 12:44         ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 18/18] io-controller: Debug hierarchical IO scheduling Vivek Goyal
2009-05-05 19:58 ` Vivek Goyal
2009-05-06 21:40   ` IKEDA, Munehiro
     [not found]     ` <4A0203DB.1090809-MDRzhb/z0dd8UrSeD/g0lQ@public.gmane.org>
2009-05-06 21:58       ` Vivek Goyal
2009-05-06 21:58         ` Vivek Goyal
     [not found]         ` <20090506215833.GK8180-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-06 22:19           ` IKEDA, Munehiro
2009-05-06 22:19             ` IKEDA, Munehiro
     [not found]             ` <4A020CD5.2000308-MDRzhb/z0dd8UrSeD/g0lQ@public.gmane.org>
2009-05-06 22:24               ` Vivek Goyal
2009-05-06 22:24                 ` Vivek Goyal
     [not found]                 ` <20090506222458.GM8180-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-06 23:01                   ` IKEDA, Munehiro
2009-05-06 23:01                     ` IKEDA, Munehiro
     [not found]   ` <1241553525-28095-19-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-06 21:40     ` IKEDA, Munehiro
     [not found] ` <1241553525-28095-1-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-05 19:58   ` [PATCH 01/18] io-controller: Documentation Vivek Goyal
2009-05-05 19:58   ` [PATCH 02/18] io-controller: Common flat fair queuing code in elevaotor layer Vivek Goyal
2009-05-05 19:58     ` Vivek Goyal
2009-05-22  6:43     ` Gui Jianfeng
2009-05-22 12:32       ` Vivek Goyal
2009-05-23 20:04         ` Jens Axboe
     [not found]         ` <20090522123231.GA14972-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-23 20:04           ` Jens Axboe
     [not found]       ` <4A164978.1020604-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-22 12:32         ` Vivek Goyal
     [not found]     ` <1241553525-28095-3-git-send-email-vgoyal-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-22  6:43       ` Gui Jianfeng
2009-05-05 19:58   ` [PATCH 03/18] io-controller: Charge for time slice based on average disk rate Vivek Goyal
2009-05-05 19:58   ` [PATCH 04/18] io-controller: Modify cfq to make use of flat elevator fair queuing Vivek Goyal
2009-05-05 19:58   ` [PATCH 05/18] io-controller: Common hierarchical fair queuing code in elevaotor layer Vivek Goyal
2009-05-05 19:58   ` [PATCH 06/18] io-controller: cfq changes to use " Vivek Goyal
2009-05-05 19:58   ` [PATCH 07/18] io-controller: Export disk time used and nr sectors dipatched through cgroups Vivek Goyal
2009-05-05 19:58   ` [PATCH 08/18] io-controller: idle for sometime on sync queue before expiring it Vivek Goyal
2009-05-05 19:58   ` [PATCH 09/18] io-controller: Separate out queue and data Vivek Goyal
2009-05-05 19:58   ` [PATCH 10/18] io-conroller: Prepare elevator layer for single queue schedulers Vivek Goyal
2009-05-05 19:58     ` Vivek Goyal
2009-05-05 19:58   ` [PATCH 11/18] io-controller: noop changes for hierarchical fair queuing Vivek Goyal
2009-05-05 19:58   ` [PATCH 12/18] io-controller: deadline " Vivek Goyal
2009-05-05 19:58   ` [PATCH 13/18] io-controller: anticipatory " Vivek Goyal
2009-05-05 19:58   ` [PATCH 14/18] blkio_cgroup patches from Ryo to track async bios Vivek Goyal
2009-05-05 19:58   ` [PATCH 15/18] io-controller: map async requests to appropriate cgroup Vivek Goyal
2009-05-05 19:58   ` [PATCH 16/18] io-controller: Per cgroup request descriptor support Vivek Goyal
2009-05-05 19:58   ` [PATCH 17/18] io-controller: IO group refcounting support Vivek Goyal
2009-05-05 19:58   ` [PATCH 18/18] io-controller: Debug hierarchical IO scheduling Vivek Goyal
2009-05-05 20:24   ` IO scheduler based IO Controller V2 Andrew Morton
2009-05-05 20:24     ` Andrew Morton
2009-05-05 22:20     ` Peter Zijlstra
2009-05-06  3:42       ` Balbir Singh
2009-05-06  3:42       ` Balbir Singh
2009-05-06 10:20         ` Fabio Checconi
2009-05-06 17:10           ` Balbir Singh
2009-05-06 17:10             ` Balbir Singh
     [not found]           ` <20090506102030.GB20544-f9ZlEuEWxVeACYmtYXMKmw@public.gmane.org>
2009-05-06 17:10             ` Balbir Singh
2009-05-06 18:47         ` Divyesh Shah
     [not found]         ` <20090506034254.GD4416-SINUvgVNF2CyUtPGxGje5AC/G2K4zDHf@public.gmane.org>
2009-05-06 10:20           ` Fabio Checconi
2009-05-06 18:47           ` Divyesh Shah
2009-05-06 20:42           ` Andrea Righi
2009-05-06 20:42         ` Andrea Righi
2009-05-06  2:33     ` Vivek Goyal
2009-05-06 17:59       ` Nauman Rafique
2009-05-06 20:07       ` Andrea Righi
2009-05-06 21:21         ` Vivek Goyal
2009-05-06 21:21         ` Vivek Goyal
     [not found]           ` <20090506212121.GI8180-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-06 22:02             ` Andrea Righi
2009-05-06 22:02               ` Andrea Righi
2009-05-06 22:17               ` Vivek Goyal
2009-05-06 22:17                 ` Vivek Goyal
     [not found]       ` <20090506023332.GA1212-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-06 17:59         ` Nauman Rafique
2009-05-06 20:07         ` Andrea Righi
2009-05-06 20:32         ` Vivek Goyal
2009-05-07  0:18         ` Ryo Tsuruta
2009-05-06 20:32       ` Vivek Goyal
     [not found]         ` <20090506203228.GH8180-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-06 21:34           ` Andrea Righi
2009-05-06 21:34         ` Andrea Righi
2009-05-06 21:52           ` Vivek Goyal
2009-05-06 21:52             ` Vivek Goyal
2009-05-06 22:35             ` Andrea Righi
2009-05-07  1:48               ` Ryo Tsuruta
2009-05-07  1:48               ` Ryo Tsuruta
     [not found]             ` <20090506215235.GJ8180-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-06 22:35               ` Andrea Righi
2009-05-07  9:04               ` Andrea Righi
2009-05-07  9:04             ` Andrea Righi
2009-05-07 12:22               ` Andrea Righi
2009-05-07 12:22               ` Andrea Righi
2009-05-07 14:11               ` Vivek Goyal
2009-05-07 14:11               ` Vivek Goyal
     [not found]                 ` <20090507141126.GA9463-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-07 14:45                   ` Vivek Goyal
2009-05-07 14:45                     ` Vivek Goyal
     [not found]                     ` <20090507144501.GB9463-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-07 15:36                       ` Vivek Goyal
2009-05-07 15:36                         ` Vivek Goyal
     [not found]                         ` <20090507153642.GC9463-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-07 15:42                           ` Vivek Goyal
2009-05-07 15:42                             ` Vivek Goyal
2009-05-07 22:19                           ` Andrea Righi
2009-05-07 22:19                         ` Andrea Righi
2009-05-08 18:09                           ` Vivek Goyal
2009-05-08 20:05                             ` Andrea Righi
2009-05-08 21:56                               ` Vivek Goyal
2009-05-08 21:56                                 ` Vivek Goyal
2009-05-09  9:22                                 ` Peter Zijlstra
2009-05-14 10:31                                 ` Andrea Righi
     [not found]                                 ` <20090508215618.GJ7293-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-09  9:22                                   ` Peter Zijlstra
2009-05-14 10:31                                   ` Andrea Righi
2009-05-14 16:43                                   ` Dhaval Giani
2009-05-14 16:43                                     ` Dhaval Giani
     [not found]                             ` <20090508180951.GG7293-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-08 20:05                               ` Andrea Righi
2009-05-08 18:09                           ` Vivek Goyal
2009-05-07 22:40                       ` Andrea Righi
2009-05-07 22:40                     ` Andrea Righi
2009-05-07  0:18       ` Ryo Tsuruta
     [not found]         ` <20090507.091858.226775723.ryov-jCdQPDEk3idL9jVzuh4AOg@public.gmane.org>
2009-05-07  1:25           ` Vivek Goyal
2009-05-07  1:25             ` Vivek Goyal
     [not found]             ` <20090507012559.GC4187-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-11 11:23               ` Ryo Tsuruta
2009-05-11 11:23             ` Ryo Tsuruta
     [not found]               ` <20090511.202309.112614168.ryov-jCdQPDEk3idL9jVzuh4AOg@public.gmane.org>
2009-05-11 12:49                 ` Vivek Goyal
2009-05-11 12:49                   ` Vivek Goyal
2009-05-08 14:24           ` Rik van Riel
2009-05-08 14:24         ` Rik van Riel
     [not found]           ` <4A0440B2.7040300-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-11 10:11             ` Ryo Tsuruta
2009-05-11 10:11           ` Ryo Tsuruta
     [not found]     ` <20090505132441.1705bfad.akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>
2009-05-05 22:20       ` Peter Zijlstra
2009-05-06  2:33       ` Vivek Goyal
2009-05-06  3:41       ` Balbir Singh
2009-05-06  3:41     ` Balbir Singh
2009-05-06 13:28       ` Vivek Goyal
2009-05-06 13:28         ` Vivek Goyal
     [not found]       ` <20090506034118.GC4416-SINUvgVNF2CyUtPGxGje5AC/G2K4zDHf@public.gmane.org>
2009-05-06 13:28         ` Vivek Goyal
2009-05-06  8:11   ` Gui Jianfeng
2009-05-08  9:45   ` [PATCH] io-controller: Add io group reference handling for request Gui Jianfeng
2009-05-13  2:00   ` [PATCH] IO Controller: Add per-device weight and ioprio_class handling Gui Jianfeng
2009-05-06  8:11 ` IO scheduler based IO Controller V2 Gui Jianfeng
     [not found]   ` <4A014619.1040000-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-06 16:10     ` Vivek Goyal
2009-05-06 16:10       ` Vivek Goyal
2009-05-07  5:36       ` Li Zefan
     [not found]         ` <4A027348.6000808-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-08 13:37           ` Vivek Goyal
2009-05-08 13:37             ` Vivek Goyal
2009-05-11  2:59             ` Gui Jianfeng
     [not found]             ` <20090508133740.GD7293-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-11  2:59               ` Gui Jianfeng
2009-05-07  5:47       ` Gui Jianfeng
     [not found]       ` <20090506161012.GC8180-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-07  5:36         ` Li Zefan
2009-05-07  5:47         ` Gui Jianfeng
2009-05-08  9:45 ` [PATCH] io-controller: Add io group reference handling for request Gui Jianfeng
     [not found]   ` <4A03FF3C.4020506-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-08 13:57     ` Vivek Goyal
2009-05-08 13:57       ` Vivek Goyal
     [not found]       ` <20090508135724.GE7293-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-08 17:41         ` Nauman Rafique
2009-05-08 17:41       ` Nauman Rafique
2009-05-08 17:41         ` Nauman Rafique
2009-05-08 18:56         ` Vivek Goyal
     [not found]           ` <20090508185644.GH7293-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-08 19:06             ` Nauman Rafique
2009-05-08 19:06           ` Nauman Rafique
2009-05-08 19:06             ` Nauman Rafique
2009-05-11  1:33         ` Gui Jianfeng
2009-05-11 15:41           ` Vivek Goyal
     [not found]             ` <20090511154127.GD6036-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-15  5:15               ` Gui Jianfeng
2009-05-15  5:15                 ` Gui Jianfeng
2009-05-15  7:48                 ` Andrea Righi
2009-05-15  8:16                   ` Gui Jianfeng
2009-05-15  8:16                   ` Gui Jianfeng
     [not found]                     ` <4A0D24E6.6010807-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-15 14:09                       ` Vivek Goyal
2009-05-15 14:09                         ` Vivek Goyal
2009-05-15 14:06                   ` Vivek Goyal
2009-05-15 14:06                   ` Vivek Goyal
2009-05-17 10:26                     ` Andrea Righi
2009-05-18 14:01                       ` Vivek Goyal
2009-05-18 14:01                         ` Vivek Goyal
2009-05-18 14:39                         ` Andrea Righi
2009-05-26 11:34                           ` Ryo Tsuruta
2009-05-26 11:34                           ` Ryo Tsuruta
2009-05-27  6:56                             ` Ryo Tsuruta
2009-05-27  6:56                               ` Ryo Tsuruta
2009-05-27  8:17                               ` Andrea Righi
2009-05-27  8:17                                 ` Andrea Righi
2009-05-27 11:53                                 ` Ryo Tsuruta
2009-05-27 11:53                                 ` Ryo Tsuruta
2009-05-27 17:32                               ` Vivek Goyal
2009-05-27 17:32                                 ` Vivek Goyal
     [not found]                               ` <20090527.155631.226800550.ryov-jCdQPDEk3idL9jVzuh4AOg@public.gmane.org>
2009-05-27  8:17                                 ` Andrea Righi
2009-05-27 17:32                                 ` Vivek Goyal
     [not found]                             ` <20090526.203424.39179999.ryov-jCdQPDEk3idL9jVzuh4AOg@public.gmane.org>
2009-05-27  6:56                               ` Ryo Tsuruta
2009-05-19 12:18                         ` Ryo Tsuruta
     [not found]                         ` <20090518140114.GB27080-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-18 14:39                           ` Andrea Righi
2009-05-19 12:18                           ` Ryo Tsuruta
     [not found]                     ` <20090515140643.GB19350-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-17 10:26                       ` Andrea Righi
     [not found]                 ` <4A0CFA6C.3080609-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-15  7:48                   ` Andrea Righi
2009-05-15  7:40               ` Gui Jianfeng
2009-05-15  7:40                 ` Gui Jianfeng
2009-05-15 14:01                 ` Vivek Goyal
     [not found]                 ` <4A0D1C55.9040700-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-15 14:01                   ` Vivek Goyal
     [not found]           ` <4A078051.5060702-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-11 15:41             ` Vivek Goyal
     [not found]         ` <e98e18940905081041r386e52a5q5a2b1f13f1e8c634-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2009-05-08 18:56           ` Vivek Goyal
2009-05-11  1:33           ` Gui Jianfeng
2009-05-13  2:00 ` [PATCH] IO Controller: Add per-device weight and ioprio_class handling Gui Jianfeng
2009-05-13 14:44   ` Vivek Goyal
     [not found]     ` <20090513144432.GA7696-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-14  0:59       ` Gui Jianfeng
2009-05-14  0:59     ` Gui Jianfeng
2009-05-13 15:29   ` Vivek Goyal
2009-05-14  1:02     ` Gui Jianfeng
     [not found]     ` <20090513152909.GD7696-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-14  1:02       ` Gui Jianfeng
2009-05-13 15:59   ` Vivek Goyal
2009-05-14  1:51     ` Gui Jianfeng
     [not found]     ` <20090513155900.GA15623-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-14  1:51       ` Gui Jianfeng
2009-05-14  2:25       ` Gui Jianfeng
2009-05-14  2:25     ` Gui Jianfeng
     [not found]   ` <4A0A29B5.7030109-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-13 14:44     ` Vivek Goyal
2009-05-13 15:29     ` Vivek Goyal
2009-05-13 15:59     ` Vivek Goyal
2009-05-13 17:17     ` Vivek Goyal
2009-05-13 19:09     ` Vivek Goyal
2009-05-13 17:17   ` Vivek Goyal
     [not found]     ` <20090513171734.GA18371-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-14  1:24       ` Gui Jianfeng
2009-05-14  1:24     ` Gui Jianfeng
2009-05-13 19:09   ` Vivek Goyal
2009-05-14  1:35     ` Gui Jianfeng
     [not found]     ` <20090513190929.GB18371-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2009-05-14  1:35       ` Gui Jianfeng
2009-05-14  7:26       ` Gui Jianfeng
2009-05-14  7:26     ` Gui Jianfeng
2009-05-14 15:15       ` Vivek Goyal
2009-05-18 22:33       ` IKEDA, Munehiro
2009-05-20  1:44         ` Gui Jianfeng
     [not found]           ` <4A136090.5090705-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-20 15:41             ` IKEDA, Munehiro
2009-05-20 15:41               ` IKEDA, Munehiro
     [not found]         ` <4A11E244.2000305-MDRzhb/z0dd8UrSeD/g0lQ@public.gmane.org>
2009-05-20  1:44           ` Gui Jianfeng
     [not found]       ` <4A0BC7AB.8030703-BthXqXjhjHXQFUHtdCDX3A@public.gmane.org>
2009-05-14 15:15         ` Vivek Goyal
2009-05-18 22:33         ` IKEDA, Munehiro

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1241553525-28095-18-git-send-email-vgoyal@redhat.com \
    --to=vgoyal@redhat.com \
    --cc=agk@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=containers@lists.linux-foundation.org \
    --cc=dhaval@linux.vnet.ibm.com \
    --cc=dm-devel@redhat.com \
    --cc=dpshah@google.com \
    --cc=fchecconi@gmail.com \
    --cc=fernando@oss.ntt.co.jp \
    --cc=guijianfeng@cn.fujitsu.com \
    --cc=jens.axboe@oracle.com \
    --cc=jmoyer@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lizf@cn.fujitsu.com \
    --cc=m-ikeda@ds.jp.nec.com \
    --cc=mikew@google.com \
    --cc=nauman@google.com \
    --cc=paolo.valente@unimore.it \
    --cc=righi.andrea@gmail.com \
    --cc=ryov@valinux.co.jp \
    --cc=s-uchida@ap.jp.nec.com \
    --cc=snitzer@redhat.com \
    --cc=taka@valinux.co.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.