* [PATCH 1/2] Allow delaying initialization of queue after allocation
@ 2009-08-08 4:55 ` Nikanth Karthikesan
0 siblings, 0 replies; 11+ messages in thread
From: Nikanth Karthikesan @ 2009-08-08 4:55 UTC (permalink / raw)
To: Jens Axboe, Alasdair G Kergon
Cc: Kiyoshi Ueda, Hannes Reinecke, dm-devel, linux-kernel
Export a way to delay initializing a request_queue after allocating it. This
is needed by device-mapper devices, as they create the queue on device
creation time, but they decide whether it would use the elevator and requests
only after first successful table load. Only request-based dm-devices use the
elevator and requests. Without this either one needs to initialize and free
the mempool and elevator, if it was a bio-based dm-device or leave it
allocated, as it is currently done.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
---
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b45435..5db0772 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -495,6 +495,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
+ q->node = node_id;
+
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
@@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!q)
return NULL;
- q->node = node_id;
- if (blk_init_free_list(q)) {
+ if (blk_init_allocated_queue(q, rfn, lock)) {
+ blk_put_queue(q);
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
+ return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+int blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ int err = 0;
+
+ err = blk_init_free_list(q);
+ if (err)
+ goto out;
+
/*
* if caller didn't supply a lock, they get per-queue locking with
* our embedded lock
@@ -598,15 +613,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
/*
* all done
*/
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ err = elevator_init(q, NULL);
+ if (err)
+ goto free_and_out;
- blk_put_queue(q);
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ return 0;
+
+free_and_out:
+ mempool_destroy(q->rq.rq_pool);
+out:
+ return err;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e7cb5db..9552961 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -901,6 +901,8 @@ extern void blk_abort_queue(struct request_queue *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *q,
+ request_fn_proc *rfn, spinlock_t *lock);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 1/2] Allow delaying initialization of queue after allocation
@ 2009-08-08 4:55 ` Nikanth Karthikesan
0 siblings, 0 replies; 11+ messages in thread
From: Nikanth Karthikesan @ 2009-08-08 4:55 UTC (permalink / raw)
To: Jens Axboe, Alasdair G Kergon; +Cc: Kiyoshi Ueda, dm-devel, linux-kernel
Export a way to delay initializing a request_queue after allocating it. This
is needed by device-mapper devices, as they create the queue on device
creation time, but they decide whether it would use the elevator and requests
only after first successful table load. Only request-based dm-devices use the
elevator and requests. Without this either one needs to initialize and free
the mempool and elevator, if it was a bio-based dm-device or leave it
allocated, as it is currently done.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
---
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b45435..5db0772 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -495,6 +495,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
+ q->node = node_id;
+
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
@@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!q)
return NULL;
- q->node = node_id;
- if (blk_init_free_list(q)) {
+ if (blk_init_allocated_queue(q, rfn, lock)) {
+ blk_put_queue(q);
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
+ return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+int blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ int err = 0;
+
+ err = blk_init_free_list(q);
+ if (err)
+ goto out;
+
/*
* if caller didn't supply a lock, they get per-queue locking with
* our embedded lock
@@ -598,15 +613,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
/*
* all done
*/
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ err = elevator_init(q, NULL);
+ if (err)
+ goto free_and_out;
- blk_put_queue(q);
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ return 0;
+
+free_and_out:
+ mempool_destroy(q->rq.rq_pool);
+out:
+ return err;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e7cb5db..9552961 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -901,6 +901,8 @@ extern void blk_abort_queue(struct request_queue *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *q,
+ request_fn_proc *rfn, spinlock_t *lock);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] Allow delaying initialization of queue after allocation
2009-08-08 4:55 ` Nikanth Karthikesan
@ 2009-08-08 15:42 ` Mike Snitzer
-1 siblings, 0 replies; 11+ messages in thread
From: Mike Snitzer @ 2009-08-08 15:42 UTC (permalink / raw)
To: Nikanth Karthikesan
Cc: Jens Axboe, Alasdair G Kergon, Kiyoshi Ueda, dm-devel, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 3187 bytes --]
On Sat, Aug 08 2009 at 12:55am -0400,
Nikanth Karthikesan <knikanth@suse.de> wrote:
> Export a way to delay initializing a request_queue after allocating it. This
> is needed by device-mapper devices, as they create the queue on device
> creation time, but they decide whether it would use the elevator and requests
> only after first successful table load. Only request-based dm-devices use the
> elevator and requests. Without this either one needs to initialize and free
> the mempool and elevator, if it was a bio-based dm-device or leave it
> allocated, as it is currently done.
>
> Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
This patch needed to be refreshed to account for the changes from this
recent commit: a4e7d46407d73f35d217013b363b79a8f8eafcaa
I've attached a refreshed patch.
Though I still have questions/feedback below.
> diff --git a/block/blk-core.c b/block/blk-core.c
> index 4b45435..5db0772 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
> if (!q)
> return NULL;
>
> - q->node = node_id;
> - if (blk_init_free_list(q)) {
> + if (blk_init_allocated_queue(q, rfn, lock)) {
> + blk_put_queue(q);
> kmem_cache_free(blk_requestq_cachep, q);
> return NULL;
> }
>
> + return q;
> +}
> +EXPORT_SYMBOL(blk_init_queue_node);
> +
> +int blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
> + spinlock_t *lock)
> +{
> + int err = 0;
> +
> + err = blk_init_free_list(q);
> + if (err)
> + goto out;
> +
> /*
> * if caller didn't supply a lock, they get per-queue locking with
> * our embedded lock
> @@ -598,15 +613,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
> /*
> * all done
> */
> - if (!elevator_init(q, NULL)) {
> - blk_queue_congestion_threshold(q);
> - return q;
> - }
> + err = elevator_init(q, NULL);
> + if (err)
> + goto free_and_out;
>
> - blk_put_queue(q);
> - return NULL;
> + blk_queue_congestion_threshold(q);
> +
> + return 0;
> +
> +free_and_out:
> + mempool_destroy(q->rq.rq_pool);
> +out:
> + return err;
> }
> -EXPORT_SYMBOL(blk_init_queue_node);
> +EXPORT_SYMBOL(blk_init_allocated_queue);
>
> int blk_get_queue(struct request_queue *q)
> {
In the previous code blk_init_queue_node() only called blk_put_queue()
iff elevator_init() failed.
Why is blk_init_queue_node() now always calling blk_put_queue() on an
error from blk_init_allocated_queue()? It could be that
blk_init_free_list() was what failed and not elevator_init().
I'd imagine it is because some callers of blk_init_allocated_queue(),
e.g. DM, must not have the queue's refcount dropped on failure? A
comment on _why_ would really help set the caller's expectations. Maybe
at the top of blk_init_allocated_queue()? E.g.:
"It is up to the caller to manage the allocated queue's lifecycle
relative to blk_init_allocated_queue() failure". I guess that is
obvious after having reviewed this but...
Also, a comment that blk_init_allocated_queue()'s mempool_destroy() is
to "cleanup the mempool allocated via blk_init_free_list()" would help.
Thanks,
Mike
[-- Attachment #2: dm1.patch --]
[-- Type: text/plain, Size: 2242 bytes --]
Export a way to delay initializing a request_queue after allocating it. This
is needed by device-mapper devices, as they create the queue on device
creation time, but they decide whether it would use the elevator and requests
only after first successful table load. Only request-based dm-devices use the
elevator and requests. Without this either one needs to initialize and free
the mempool and elevator, if it was a bio-based dm-device or leave it
allocated, as it is currently done.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
---
Index: linux-2.6/block/blk-core.c
===================================================================
--- linux-2.6.orig/block/blk-core.c
+++ linux-2.6/block/blk-core.c
@@ -495,6 +495,8 @@ struct request_queue *blk_alloc_queue_no
if (!q)
return NULL;
+ q->node = node_id;
+
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
@@ -604,15 +606,20 @@ int blk_init_allocated_queue(struct requ
/*
* all done
*/
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ err = elevator_init(q, NULL);
+ if (err)
+ goto free_and_out;
- blk_put_queue(q);
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ return 0;
+
+free_and_out:
+ mempool_destroy(q->rq.rq_pool);
+out:
+ return err;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
Index: linux-2.6/include/linux/blkdev.h
===================================================================
--- linux-2.6.orig/include/linux/blkdev.h
+++ linux-2.6/include/linux/blkdev.h
@@ -901,6 +901,8 @@ extern void blk_abort_queue(struct reque
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *q,
+ request_fn_proc *rfn, spinlock_t *lock);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] Allow delaying initialization of queue after allocation
@ 2009-08-08 15:42 ` Mike Snitzer
0 siblings, 0 replies; 11+ messages in thread
From: Mike Snitzer @ 2009-08-08 15:42 UTC (permalink / raw)
To: Nikanth Karthikesan
Cc: Kiyoshi Ueda, dm-devel, linux-kernel, Alasdair G Kergon, Jens Axboe
[-- Attachment #1: Type: text/plain, Size: 3187 bytes --]
On Sat, Aug 08 2009 at 12:55am -0400,
Nikanth Karthikesan <knikanth@suse.de> wrote:
> Export a way to delay initializing a request_queue after allocating it. This
> is needed by device-mapper devices, as they create the queue on device
> creation time, but they decide whether it would use the elevator and requests
> only after first successful table load. Only request-based dm-devices use the
> elevator and requests. Without this either one needs to initialize and free
> the mempool and elevator, if it was a bio-based dm-device or leave it
> allocated, as it is currently done.
>
> Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
This patch needed to be refreshed to account for the changes from this
recent commit: a4e7d46407d73f35d217013b363b79a8f8eafcaa
I've attached a refreshed patch.
Though I still have questions/feedback below.
> diff --git a/block/blk-core.c b/block/blk-core.c
> index 4b45435..5db0772 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
> if (!q)
> return NULL;
>
> - q->node = node_id;
> - if (blk_init_free_list(q)) {
> + if (blk_init_allocated_queue(q, rfn, lock)) {
> + blk_put_queue(q);
> kmem_cache_free(blk_requestq_cachep, q);
> return NULL;
> }
>
> + return q;
> +}
> +EXPORT_SYMBOL(blk_init_queue_node);
> +
> +int blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
> + spinlock_t *lock)
> +{
> + int err = 0;
> +
> + err = blk_init_free_list(q);
> + if (err)
> + goto out;
> +
> /*
> * if caller didn't supply a lock, they get per-queue locking with
> * our embedded lock
> @@ -598,15 +613,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
> /*
> * all done
> */
> - if (!elevator_init(q, NULL)) {
> - blk_queue_congestion_threshold(q);
> - return q;
> - }
> + err = elevator_init(q, NULL);
> + if (err)
> + goto free_and_out;
>
> - blk_put_queue(q);
> - return NULL;
> + blk_queue_congestion_threshold(q);
> +
> + return 0;
> +
> +free_and_out:
> + mempool_destroy(q->rq.rq_pool);
> +out:
> + return err;
> }
> -EXPORT_SYMBOL(blk_init_queue_node);
> +EXPORT_SYMBOL(blk_init_allocated_queue);
>
> int blk_get_queue(struct request_queue *q)
> {
In the previous code blk_init_queue_node() only called blk_put_queue()
iff elevator_init() failed.
Why is blk_init_queue_node() now always calling blk_put_queue() on an
error from blk_init_allocated_queue()? It could be that
blk_init_free_list() was what failed and not elevator_init().
I'd imagine it is because some callers of blk_init_allocated_queue(),
e.g. DM, must not have the queue's refcount dropped on failure? A
comment on _why_ would really help set the caller's expectations. Maybe
at the top of blk_init_allocated_queue()? E.g.:
"It is up to the caller to manage the allocated queue's lifecycle
relative to blk_init_allocated_queue() failure". I guess that is
obvious after having reviewed this but...
Also, a comment that blk_init_allocated_queue()'s mempool_destroy() is
to "cleanup the mempool allocated via blk_init_free_list()" would help.
Thanks,
Mike
[-- Attachment #2: dm1.patch --]
[-- Type: text/plain, Size: 2242 bytes --]
Export a way to delay initializing a request_queue after allocating it. This
is needed by device-mapper devices, as they create the queue on device
creation time, but they decide whether it would use the elevator and requests
only after first successful table load. Only request-based dm-devices use the
elevator and requests. Without this either one needs to initialize and free
the mempool and elevator, if it was a bio-based dm-device or leave it
allocated, as it is currently done.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
---
Index: linux-2.6/block/blk-core.c
===================================================================
--- linux-2.6.orig/block/blk-core.c
+++ linux-2.6/block/blk-core.c
@@ -495,6 +495,8 @@ struct request_queue *blk_alloc_queue_no
if (!q)
return NULL;
+ q->node = node_id;
+
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
@@ -604,15 +606,20 @@ int blk_init_allocated_queue(struct requ
/*
* all done
*/
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ err = elevator_init(q, NULL);
+ if (err)
+ goto free_and_out;
- blk_put_queue(q);
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ return 0;
+
+free_and_out:
+ mempool_destroy(q->rq.rq_pool);
+out:
+ return err;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
Index: linux-2.6/include/linux/blkdev.h
===================================================================
--- linux-2.6.orig/include/linux/blkdev.h
+++ linux-2.6/include/linux/blkdev.h
@@ -901,6 +901,8 @@ extern void blk_abort_queue(struct reque
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *q,
+ request_fn_proc *rfn, spinlock_t *lock);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
[-- Attachment #3: Type: text/plain, Size: 0 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] Allow delaying initialization of queue after allocation
2009-08-08 15:42 ` Mike Snitzer
(?)
@ 2009-08-08 16:42 ` Mike Snitzer
-1 siblings, 0 replies; 11+ messages in thread
From: Mike Snitzer @ 2009-08-08 16:42 UTC (permalink / raw)
To: Nikanth Karthikesan
Cc: Kiyoshi Ueda, dm-devel, linux-kernel, Alasdair G Kergon, Jens Axboe
On Sat, Aug 08 2009 at 11:42am -0400,
Mike Snitzer <snitzer@redhat.com> wrote:
> On Sat, Aug 08 2009 at 12:55am -0400,
> Nikanth Karthikesan <knikanth@suse.de> wrote:
>
> > Export a way to delay initializing a request_queue after allocating it. This
> > is needed by device-mapper devices, as they create the queue on device
> > creation time, but they decide whether it would use the elevator and requests
> > only after first successful table load. Only request-based dm-devices use the
> > elevator and requests. Without this either one needs to initialize and free
> > the mempool and elevator, if it was a bio-based dm-device or leave it
> > allocated, as it is currently done.
> >
> > Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
>
> This patch needed to be refreshed to account for the changes from this
> recent commit: a4e7d46407d73f35d217013b363b79a8f8eafcaa
>
> I've attached a refreshed patch.
Err, I dropped a hunk in the process... here is the refreshed patch
inlined. Figured I'd save you the busy-work for when you get V2
together.
> Though I still have questions/feedback below.
Hopefully V2 will address my questions/feedback.
Regards,
Mike
---
Export a way to delay initializing a request_queue after allocating it. This
is needed by device-mapper devices, as they create the queue on device
creation time, but they decide whether it would use the elevator and requests
only after first successful table load. Only request-based dm-devices use the
elevator and requests. Without this either one needs to initialize and free
the mempool and elevator, if it was a bio-based dm-device or leave it
allocated, as it is currently done.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
---
Index: linux-2.6/block/blk-core.c
===================================================================
--- linux-2.6.orig/block/blk-core.c
+++ linux-2.6/block/blk-core.c
@@ -495,6 +495,8 @@ struct request_queue *blk_alloc_queue_no
if (!q)
return NULL;
+ q->node = node_id;
+
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
@@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn
if (!q)
return NULL;
- q->node = node_id;
- if (blk_init_free_list(q)) {
+ if (blk_init_allocated_queue(q, rfn, lock)) {
+ blk_put_queue(q);
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
+ return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+int blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ int err = 0;
+
+ err = blk_init_free_list(q);
+ if (err)
+ goto out;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
@@ -591,15 +606,20 @@ blk_init_queue_node(request_fn_proc *rfn
/*
* all done
*/
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ err = elevator_init(q, NULL);
+ if (err)
+ goto free_and_out;
- blk_put_queue(q);
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ return 0;
+
+free_and_out:
+ mempool_destroy(q->rq.rq_pool);
+out:
+ return err;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
Index: linux-2.6/include/linux/blkdev.h
===================================================================
--- linux-2.6.orig/include/linux/blkdev.h
+++ linux-2.6/include/linux/blkdev.h
@@ -901,6 +901,8 @@ extern void blk_abort_queue(struct reque
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *q,
+ request_fn_proc *rfn, spinlock_t *lock);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] Allow delaying initialization of queue after allocation
2009-08-08 15:42 ` Mike Snitzer
@ 2009-08-10 10:21 ` Nikanth Karthikesan
-1 siblings, 0 replies; 11+ messages in thread
From: Nikanth Karthikesan @ 2009-08-10 10:21 UTC (permalink / raw)
To: Mike Snitzer
Cc: Jens Axboe, Alasdair G Kergon, Kiyoshi Ueda, dm-devel, linux-kernel
On Saturday 08 August 2009 21:12:40 Mike Snitzer wrote:
> On Sat, Aug 08 2009 at 12:55am -0400,
>
> Nikanth Karthikesan <knikanth@suse.de> wrote:
> > Export a way to delay initializing a request_queue after allocating it.
> > This is needed by device-mapper devices, as they create the queue on
> > device creation time, but they decide whether it would use the elevator
> > and requests only after first successful table load. Only request-based
> > dm-devices use the elevator and requests. Without this either one needs
> > to initialize and free the mempool and elevator, if it was a bio-based
> > dm-device or leave it allocated, as it is currently done.
> >
> > Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
>
> This patch needed to be refreshed to account for the changes from this
> recent commit: a4e7d46407d73f35d217013b363b79a8f8eafcaa
>
> I've attached a refreshed patch.
>
Thanks.
> Though I still have questions/feedback below.
>
> > diff --git a/block/blk-core.c b/block/blk-core.c
> > index 4b45435..5db0772 100644
> > --- a/block/blk-core.c
> > +++ b/block/blk-core.c
> > @@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn,
> > spinlock_t *lock, int node_id) if (!q)
> > return NULL;
> >
> > - q->node = node_id;
> > - if (blk_init_free_list(q)) {
> > + if (blk_init_allocated_queue(q, rfn, lock)) {
> > + blk_put_queue(q);
> > kmem_cache_free(blk_requestq_cachep, q);
> > return NULL;
> > }
> >
> > + return q;
> > +}
> > +EXPORT_SYMBOL(blk_init_queue_node);
> > +
> > +int blk_init_allocated_queue(struct request_queue *q, request_fn_proc
> > *rfn, + spinlock_t *lock)
> > +{
> > + int err = 0;
> > +
> > + err = blk_init_free_list(q);
> > + if (err)
> > + goto out;
> > +
> > /*
> > * if caller didn't supply a lock, they get per-queue locking with
> > * our embedded lock
> > @@ -598,15 +613,20 @@ blk_init_queue_node(request_fn_proc *rfn,
> > spinlock_t *lock, int node_id) /*
> > * all done
> > */
> > - if (!elevator_init(q, NULL)) {
> > - blk_queue_congestion_threshold(q);
> > - return q;
> > - }
> > + err = elevator_init(q, NULL);
> > + if (err)
> > + goto free_and_out;
> >
> > - blk_put_queue(q);
> > - return NULL;
> > + blk_queue_congestion_threshold(q);
> > +
> > + return 0;
> > +
> > +free_and_out:
> > + mempool_destroy(q->rq.rq_pool);
> > +out:
> > + return err;
> > }
> > -EXPORT_SYMBOL(blk_init_queue_node);
> > +EXPORT_SYMBOL(blk_init_allocated_queue);
> >
> > int blk_get_queue(struct request_queue *q)
> > {
>
> In the previous code blk_init_queue_node() only called blk_put_queue()
> iff elevator_init() failed.
>
> Why is blk_init_queue_node() now always calling blk_put_queue() on an
> error from blk_init_allocated_queue()? It could be that
> blk_init_free_list() was what failed and not elevator_init().
>
I think, it was a bug on not calling blk_put_queue() even when
blk_init_free_list() failed which would be fixed now.
> I'd imagine it is because some callers of blk_init_allocated_queue(),
> e.g. DM, must not have the queue's refcount dropped on failure? A
> comment on _why_ would really help set the caller's expectations. Maybe
> at the top of blk_init_allocated_queue()? E.g.:
>
> "It is up to the caller to manage the allocated queue's lifecycle
> relative to blk_init_allocated_queue() failure". I guess that is
> obvious after having reviewed this but...
>
> Also, a comment that blk_init_allocated_queue()'s mempool_destroy() is
> to "cleanup the mempool allocated via blk_init_free_list()" would help.
>
Will add the comment when I resend the patch.
Thanks for reviewing.
Thanks
Nikanth
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] Allow delaying initialization of queue after allocation
@ 2009-08-10 10:21 ` Nikanth Karthikesan
0 siblings, 0 replies; 11+ messages in thread
From: Nikanth Karthikesan @ 2009-08-10 10:21 UTC (permalink / raw)
To: Mike Snitzer
Cc: Kiyoshi Ueda, dm-devel, linux-kernel, Alasdair G Kergon, Jens Axboe
On Saturday 08 August 2009 21:12:40 Mike Snitzer wrote:
> On Sat, Aug 08 2009 at 12:55am -0400,
>
> Nikanth Karthikesan <knikanth@suse.de> wrote:
> > Export a way to delay initializing a request_queue after allocating it.
> > This is needed by device-mapper devices, as they create the queue on
> > device creation time, but they decide whether it would use the elevator
> > and requests only after first successful table load. Only request-based
> > dm-devices use the elevator and requests. Without this either one needs
> > to initialize and free the mempool and elevator, if it was a bio-based
> > dm-device or leave it allocated, as it is currently done.
> >
> > Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
>
> This patch needed to be refreshed to account for the changes from this
> recent commit: a4e7d46407d73f35d217013b363b79a8f8eafcaa
>
> I've attached a refreshed patch.
>
Thanks.
> Though I still have questions/feedback below.
>
> > diff --git a/block/blk-core.c b/block/blk-core.c
> > index 4b45435..5db0772 100644
> > --- a/block/blk-core.c
> > +++ b/block/blk-core.c
> > @@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn,
> > spinlock_t *lock, int node_id) if (!q)
> > return NULL;
> >
> > - q->node = node_id;
> > - if (blk_init_free_list(q)) {
> > + if (blk_init_allocated_queue(q, rfn, lock)) {
> > + blk_put_queue(q);
> > kmem_cache_free(blk_requestq_cachep, q);
> > return NULL;
> > }
> >
> > + return q;
> > +}
> > +EXPORT_SYMBOL(blk_init_queue_node);
> > +
> > +int blk_init_allocated_queue(struct request_queue *q, request_fn_proc
> > *rfn, + spinlock_t *lock)
> > +{
> > + int err = 0;
> > +
> > + err = blk_init_free_list(q);
> > + if (err)
> > + goto out;
> > +
> > /*
> > * if caller didn't supply a lock, they get per-queue locking with
> > * our embedded lock
> > @@ -598,15 +613,20 @@ blk_init_queue_node(request_fn_proc *rfn,
> > spinlock_t *lock, int node_id) /*
> > * all done
> > */
> > - if (!elevator_init(q, NULL)) {
> > - blk_queue_congestion_threshold(q);
> > - return q;
> > - }
> > + err = elevator_init(q, NULL);
> > + if (err)
> > + goto free_and_out;
> >
> > - blk_put_queue(q);
> > - return NULL;
> > + blk_queue_congestion_threshold(q);
> > +
> > + return 0;
> > +
> > +free_and_out:
> > + mempool_destroy(q->rq.rq_pool);
> > +out:
> > + return err;
> > }
> > -EXPORT_SYMBOL(blk_init_queue_node);
> > +EXPORT_SYMBOL(blk_init_allocated_queue);
> >
> > int blk_get_queue(struct request_queue *q)
> > {
>
> In the previous code blk_init_queue_node() only called blk_put_queue()
> iff elevator_init() failed.
>
> Why is blk_init_queue_node() now always calling blk_put_queue() on an
> error from blk_init_allocated_queue()? It could be that
> blk_init_free_list() was what failed and not elevator_init().
>
I think, it was a bug on not calling blk_put_queue() even when
blk_init_free_list() failed which would be fixed now.
> I'd imagine it is because some callers of blk_init_allocated_queue(),
> e.g. DM, must not have the queue's refcount dropped on failure? A
> comment on _why_ would really help set the caller's expectations. Maybe
> at the top of blk_init_allocated_queue()? E.g.:
>
> "It is up to the caller to manage the allocated queue's lifecycle
> relative to blk_init_allocated_queue() failure". I guess that is
> obvious after having reviewed this but...
>
> Also, a comment that blk_init_allocated_queue()'s mempool_destroy() is
> to "cleanup the mempool allocated via blk_init_free_list()" would help.
>
Will add the comment when I resend the patch.
Thanks for reviewing.
Thanks
Nikanth
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH-v2 1/2] Allow delaying initialization of queue after allocation
2009-08-10 10:21 ` Nikanth Karthikesan
@ 2009-08-10 10:48 ` Nikanth Karthikesan
-1 siblings, 0 replies; 11+ messages in thread
From: Nikanth Karthikesan @ 2009-08-10 10:48 UTC (permalink / raw)
To: Jens Axboe
Cc: Mike Snitzer, Alasdair G Kergon, Kiyoshi Ueda, dm-devel,
linux-kernel, Hannes Reinecke
Export a way to delay initializing a request_queue after allocating it. This
is needed by device-mapper devices, as they create the queue on device
creation time, but they decide whether it would use the elevator and requests
only after first successful table load. Only request-based dm-devices use the
elevator and requests. Without this either one needs to initialize and free
the mempool and elevator, if it was a bio-based dm-device or leave it
allocated, as it is currently done.
This slightly changes the behaviour of block_init_queue_node() such that
blk_put_queue() would be called, even if blk_init_free_list() fails.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
---
diff --git a/block/blk-core.c b/block/blk-core.c
index e3299a7..8b05b3b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -495,6 +495,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
+ q->node = node_id;
+
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
@@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!q)
return NULL;
- q->node = node_id;
- if (blk_init_free_list(q)) {
+ if (blk_init_allocated_queue(q, rfn, lock)) {
+ blk_put_queue(q);
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
+ return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+int blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ int err = 0;
+
+ err = blk_init_free_list(q);
+ if (err)
+ goto out;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
@@ -591,15 +606,23 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
/*
* all done
*/
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ err = elevator_init(q, NULL);
+ if (err)
+ goto free_and_out;
- blk_put_queue(q);
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ return 0;
+
+free_and_out:
+ /*
+ * Cleanup mempool allocated by blk_init_free_list
+ */
+ mempool_destroy(q->rq.rq_pool);
+out:
+ return err;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 69103e0..4a26fc1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -901,6 +901,8 @@ extern void blk_abort_queue(struct request_queue *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *q,
+ request_fn_proc *rfn, spinlock_t *lock);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH-v2 1/2] Allow delaying initialization of queue after allocation
@ 2009-08-10 10:48 ` Nikanth Karthikesan
0 siblings, 0 replies; 11+ messages in thread
From: Nikanth Karthikesan @ 2009-08-10 10:48 UTC (permalink / raw)
To: Jens Axboe
Cc: Kiyoshi Ueda, Mike Snitzer, linux-kernel, dm-devel, Alasdair G Kergon
Export a way to delay initializing a request_queue after allocating it. This
is needed by device-mapper devices, as they create the queue on device
creation time, but they decide whether it would use the elevator and requests
only after first successful table load. Only request-based dm-devices use the
elevator and requests. Without this either one needs to initialize and free
the mempool and elevator, if it was a bio-based dm-device or leave it
allocated, as it is currently done.
This slightly changes the behaviour of block_init_queue_node() such that
blk_put_queue() would be called, even if blk_init_free_list() fails.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
---
diff --git a/block/blk-core.c b/block/blk-core.c
index e3299a7..8b05b3b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -495,6 +495,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
+ q->node = node_id;
+
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
@@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!q)
return NULL;
- q->node = node_id;
- if (blk_init_free_list(q)) {
+ if (blk_init_allocated_queue(q, rfn, lock)) {
+ blk_put_queue(q);
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
+ return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+int blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ int err = 0;
+
+ err = blk_init_free_list(q);
+ if (err)
+ goto out;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
@@ -591,15 +606,23 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
/*
* all done
*/
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ err = elevator_init(q, NULL);
+ if (err)
+ goto free_and_out;
- blk_put_queue(q);
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ return 0;
+
+free_and_out:
+ /*
+ * Cleanup mempool allocated by blk_init_free_list
+ */
+ mempool_destroy(q->rq.rq_pool);
+out:
+ return err;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 69103e0..4a26fc1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -901,6 +901,8 @@ extern void blk_abort_queue(struct request_queue *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *q,
+ request_fn_proc *rfn, spinlock_t *lock);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH-v3 1/2] Allow delaying initialization of queue after allocation
2009-08-10 10:48 ` Nikanth Karthikesan
@ 2009-08-11 9:32 ` Nikanth Karthikesan
-1 siblings, 0 replies; 11+ messages in thread
From: Nikanth Karthikesan @ 2009-08-11 9:32 UTC (permalink / raw)
To: Jens Axboe
Cc: Mike Snitzer, Alasdair G Kergon, Kiyoshi Ueda, dm-devel,
linux-kernel, Hannes Reinecke
Export a way to delay initializing a request_queue after allocating it. This
is needed by device-mapper devices, as they create the queue on device
creation time, but they decide whether it would use the elevator and requests
only after first successful table load. Only request-based dm-devices use the
elevator and requests. Without this either one needs to initialize and free
the mempool and elevator, if it was a bio-based dm-device or leave it
allocated, as it is currently done.
This slightly changes the behaviour of block_init_queue_node() such that
blk_put_queue() would be called, even if blk_init_free_list() fails.
Also export elevator_register_queue() to modules.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
---
diff --git a/block/blk-core.c b/block/blk-core.c
index e3299a7..8b05b3b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -495,6 +495,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
+ q->node = node_id;
+
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
@@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!q)
return NULL;
- q->node = node_id;
- if (blk_init_free_list(q)) {
+ if (blk_init_allocated_queue(q, rfn, lock)) {
+ blk_put_queue(q);
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
+ return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+int blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ int err = 0;
+
+ err = blk_init_free_list(q);
+ if (err)
+ goto out;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
@@ -591,15 +606,23 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
/*
* all done
*/
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ err = elevator_init(q, NULL);
+ if (err)
+ goto free_and_out;
- blk_put_queue(q);
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ return 0;
+
+free_and_out:
+ /*
+ * Cleanup mempool allocated by blk_init_free_list
+ */
+ mempool_destroy(q->rq.rq_pool);
+out:
+ return err;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
diff --git a/block/elevator.c b/block/elevator.c
index 2d511f9..0827cd3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -930,6 +930,7 @@ int elv_register_queue(struct request_queue *q)
}
return error;
}
+EXPORT_SYMBOL(elv_register_queue);
static void __elv_unregister_queue(struct elevator_queue *e)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 69103e0..4a26fc1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -901,6 +901,8 @@ extern void blk_abort_queue(struct request_queue *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *q,
+ request_fn_proc *rfn, spinlock_t *lock);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH-v3 1/2] Allow delaying initialization of queue after allocation
@ 2009-08-11 9:32 ` Nikanth Karthikesan
0 siblings, 0 replies; 11+ messages in thread
From: Nikanth Karthikesan @ 2009-08-11 9:32 UTC (permalink / raw)
To: Jens Axboe
Cc: Kiyoshi Ueda, Mike Snitzer, linux-kernel, dm-devel, Alasdair G Kergon
Export a way to delay initializing a request_queue after allocating it. This
is needed by device-mapper devices, as they create the queue on device
creation time, but they decide whether it would use the elevator and requests
only after first successful table load. Only request-based dm-devices use the
elevator and requests. Without this either one needs to initialize and free
the mempool and elevator, if it was a bio-based dm-device or leave it
allocated, as it is currently done.
This slightly changes the behaviour of block_init_queue_node() such that
blk_put_queue() would be called, even if blk_init_free_list() fails.
Also export elevator_register_queue() to modules.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
---
diff --git a/block/blk-core.c b/block/blk-core.c
index e3299a7..8b05b3b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -495,6 +495,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
+ q->node = node_id;
+
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
@@ -569,12 +571,25 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!q)
return NULL;
- q->node = node_id;
- if (blk_init_free_list(q)) {
+ if (blk_init_allocated_queue(q, rfn, lock)) {
+ blk_put_queue(q);
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
+ return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+int blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ int err = 0;
+
+ err = blk_init_free_list(q);
+ if (err)
+ goto out;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
@@ -591,15 +606,23 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
/*
* all done
*/
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ err = elevator_init(q, NULL);
+ if (err)
+ goto free_and_out;
- blk_put_queue(q);
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ return 0;
+
+free_and_out:
+ /*
+ * Cleanup mempool allocated by blk_init_free_list
+ */
+ mempool_destroy(q->rq.rq_pool);
+out:
+ return err;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
diff --git a/block/elevator.c b/block/elevator.c
index 2d511f9..0827cd3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -930,6 +930,7 @@ int elv_register_queue(struct request_queue *q)
}
return error;
}
+EXPORT_SYMBOL(elv_register_queue);
static void __elv_unregister_queue(struct elevator_queue *e)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 69103e0..4a26fc1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -901,6 +901,8 @@ extern void blk_abort_queue(struct request_queue *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *q,
+ request_fn_proc *rfn, spinlock_t *lock);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
^ permalink raw reply related [flat|nested] 11+ messages in thread
end of thread, other threads:[~2009-08-11 11:57 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-08-08 4:55 [PATCH 1/2] Allow delaying initialization of queue after allocation Nikanth Karthikesan
2009-08-08 4:55 ` Nikanth Karthikesan
2009-08-08 15:42 ` Mike Snitzer
2009-08-08 15:42 ` Mike Snitzer
2009-08-08 16:42 ` Mike Snitzer
2009-08-10 10:21 ` Nikanth Karthikesan
2009-08-10 10:21 ` Nikanth Karthikesan
2009-08-10 10:48 ` [PATCH-v2 " Nikanth Karthikesan
2009-08-10 10:48 ` Nikanth Karthikesan
2009-08-11 9:32 ` [PATCH-v3 " Nikanth Karthikesan
2009-08-11 9:32 ` Nikanth Karthikesan
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.