All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] dm-mpath: Work with blk multi-queue drivers
@ 2014-09-23 17:03 Keith Busch
  2014-09-24  9:02 ` Hannes Reinecke
  2014-09-24 14:52 ` Christoph Hellwig
  0 siblings, 2 replies; 14+ messages in thread
From: Keith Busch @ 2014-09-23 17:03 UTC (permalink / raw)
  To: dm-devel; +Cc: Keith Busch, Mike Snitzer

I'm working with multipathing nvme devices using the blk-mq version of
the nvme driver, but dm-mpath only works with the older request based
drivers. This patch proposes to enable dm-mpath to work with both types
of request queues and is succesfull with my dual ported nvme drives.

I think there may still be fix ups to do around submission side error
handling, but I think it's at a decent stopping point to solicit feedback
before I pursue taking it further. I hear there may be some resistance
to add blk-mq support to dm-mpath anyway, but it seems too easy to add
support to not at least try. :)

To work, this has dm allocate requests from the request_queue for
the device-mapper type rather than allocate one on its own, so the
cloned request is properly allocated and initialized for the device's
request_queue. The original request's 'special' now points to the
dm_rq_target_io rather than at the cloned request because the clone
is allocated later by the block layer rather than by dm, and then all
the other back referencing to the original seems to work out. The block
layer then inserts the cloned reqest using the appropriate function for
the request_queue type rather than just calling q->request_fn().

Compile tested on 3.17-rc6; runtime teseted on Matias Bjorling's
linux-collab nvmemq_review using 3.16.

Signed-off-by: Keith Busch <keith.busch@intel.com> Cc: Alasdair Kergon
<agk@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
---
 block/blk-core.c              |    7 ++-
 block/blk-mq.c                |    1 +
 drivers/md/dm-mpath.c         |   15 ++++---
 drivers/md/dm-target.c        |    4 +-
 drivers/md/dm.c               |   96 ++++++++++++++++++-----------------------
 include/linux/device-mapper.h |    4 +-
 6 files changed, 62 insertions(+), 65 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index bf930f4..4c5952b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2031,6 +2031,11 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
 		return -EIO;
 
+	if (q->mq_ops) {
+		blk_mq_insert_request(rq, false, true, true);
+		return 0;
+	}
+
 	spin_lock_irqsave(q->queue_lock, flags);
 	if (unlikely(blk_queue_dying(q))) {
 		spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2928,8 +2933,6 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 	if (!bs)
 		bs = fs_bio_set;
 
-	blk_rq_init(NULL, rq);
-
 	__rq_for_each_bio(bio_src, rq_src) {
 		bio = bio_clone_bioset(bio_src, gfp_mask, bs);
 		if (!bio)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 383ea0c..eb53d1c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -172,6 +172,7 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 	rq->nr_integrity_segments = 0;
 #endif
+	rq->bio = NULL;
 	rq->special = NULL;
 	/* tag was already set */
 	rq->errors = 0;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 833d7e7..efdc06b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -11,6 +11,7 @@
 #include "dm-path-selector.h"
 #include "dm-uevent.h"
 
+#include <linux/blkdev.h>
 #include <linux/ctype.h>
 #include <linux/init.h>
 #include <linux/mempool.h>
@@ -376,12 +377,12 @@ static int __must_push_back(struct multipath *m)
 /*
  * Map cloned requests
  */
-static int multipath_map(struct dm_target *ti, struct request *clone,
-			 union map_info *map_context)
+static int multipath_map(struct dm_target *ti, struct request *rq,
+		struct request **clone, union map_info *map_context)
 {
 	struct multipath *m = (struct multipath *) ti->private;
 	int r = DM_MAPIO_REQUEUE;
-	size_t nr_bytes = blk_rq_bytes(clone);
+	size_t nr_bytes = blk_rq_bytes(rq);
 	unsigned long flags;
 	struct pgpath *pgpath;
 	struct block_device *bdev;
@@ -410,9 +411,11 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
 		goto out_unlock;
 
 	bdev = pgpath->path.dev->bdev;
-	clone->q = bdev_get_queue(bdev);
-	clone->rq_disk = bdev->bd_disk;
-	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+	*clone = blk_get_request(bdev_get_queue(bdev), rq_data_dir(rq),
+							GFP_KERNEL);
+	if (!(*clone))
+		goto out_unlock;
+	(*clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 	mpio = map_context->ptr;
 	mpio->pgpath = pgpath;
 	mpio->nr_bytes = nr_bytes;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 242e3ce..2bcbf6e 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,8 +131,8 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
 	return -EIO;
 }
 
-static int io_err_map_rq(struct dm_target *ti, struct request *clone,
-			 union map_info *map_context)
+static int io_err_map_rq(struct dm_target *ti, struct request *rq,
+			struct request **clone,  union map_info *map_context)
 {
 	return -EIO;
 }
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 32b958d..4e49bd2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -77,7 +77,7 @@ struct dm_io {
 struct dm_rq_target_io {
 	struct mapped_device *md;
 	struct dm_target *ti;
-	struct request *orig, clone;
+	struct request *orig, *clone;
 	int error;
 	union map_info info;
 };
@@ -889,11 +889,13 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
 	dm_put(md);
 }
 
-static void free_rq_clone(struct request *clone)
+static void free_rq_clone(struct request *rq)
 {
-	struct dm_rq_target_io *tio = clone->end_io_data;
+	struct dm_rq_target_io *tio = rq->special;
+	struct request *clone = tio->clone;
 
 	blk_rq_unprep_clone(clone);
+	blk_put_request(clone);
 	free_rq_tio(tio);
 }
 
@@ -921,14 +923,15 @@ static void dm_end_request(struct request *clone, int error)
 			rq->sense_len = clone->sense_len;
 	}
 
-	free_rq_clone(clone);
+	free_rq_clone(rq);
 	blk_end_request_all(rq, error);
 	rq_completed(md, rw, true);
 }
 
 static void dm_unprep_request(struct request *rq)
 {
-	struct request *clone = rq->special;
+	struct dm_rq_target_io *tio = rq->special;
+	struct request *clone = tio->clone;
 
 	rq->special = NULL;
 	rq->cmd_flags &= ~REQ_DONTPREP;
@@ -1038,13 +1041,12 @@ static void dm_softirq_done(struct request *rq)
  * Complete the clone and the original request with the error status
  * through softirq context.
  */
-static void dm_complete_request(struct request *clone, int error)
+static void dm_complete_request(struct request *rq, int error)
 {
-	struct dm_rq_target_io *tio = clone->end_io_data;
-	struct request *rq = tio->orig;
+	struct dm_rq_target_io *tio = rq->special;
 
 	tio->error = error;
-	rq->completion_data = clone;
+	rq->completion_data = tio->clone;
 	blk_complete_request(rq);
 }
 
@@ -1054,13 +1056,10 @@ static void dm_complete_request(struct request *clone, int error)
  * Target's rq_end_io() function isn't called.
  * This may be used when the target's map_rq() function fails.
  */
-void dm_kill_unmapped_request(struct request *clone, int error)
+void dm_kill_unmapped_request(struct request *rq, int error)
 {
-	struct dm_rq_target_io *tio = clone->end_io_data;
-	struct request *rq = tio->orig;
-
 	rq->cmd_flags |= REQ_FAILED;
-	dm_complete_request(clone, error);
+	dm_complete_request(rq, error);
 }
 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
 
@@ -1069,13 +1068,8 @@ EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
  */
 static void end_clone_request(struct request *clone, int error)
 {
-	/*
-	 * For just cleaning up the information of the queue in which
-	 * the clone was dispatched.
-	 * The clone is *NOT* freed actually here because it is alloced from
-	 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
-	 */
-	__blk_put_request(clone->q, clone);
+	struct dm_rq_target_io *tio = clone->end_io_data;
+	struct request *rq = tio->orig;
 
 	/*
 	 * Actual request completion is done in a softirq context which doesn't
@@ -1085,7 +1079,7 @@ static void end_clone_request(struct request *clone, int error)
 	 *     - the submission which requires queue lock may be done
 	 *       against this queue
 	 */
-	dm_complete_request(clone, error);
+	dm_complete_request(rq, error);
 }
 
 /*
@@ -1619,10 +1613,9 @@ static int setup_clone(struct request *clone, struct request *rq,
 	return 0;
 }
 
-static struct request *clone_rq(struct request *rq, struct mapped_device *md,
-				gfp_t gfp_mask)
+static struct dm_rq_target_io *prep_tio(struct request *rq,
+			struct mapped_device *md, gfp_t gfp_mask)
 {
-	struct request *clone;
 	struct dm_rq_target_io *tio;
 
 	tio = alloc_rq_tio(md, gfp_mask);
@@ -1635,14 +1628,7 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
 	tio->error = 0;
 	memset(&tio->info, 0, sizeof(tio->info));
 
-	clone = &tio->clone;
-	if (setup_clone(clone, rq, tio)) {
-		/* -ENOMEM */
-		free_rq_tio(tio);
-		return NULL;
-	}
-
-	return clone;
+	return tio;
 }
 
 /*
@@ -1651,18 +1637,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
 static int dm_prep_fn(struct request_queue *q, struct request *rq)
 {
 	struct mapped_device *md = q->queuedata;
-	struct request *clone;
+	struct dm_rq_target_io *tio;
 
 	if (unlikely(rq->special)) {
 		DMWARN("Already has something in rq->special.");
 		return BLKPREP_KILL;
 	}
 
-	clone = clone_rq(rq, md, GFP_ATOMIC);
-	if (!clone)
+	tio = prep_tio(rq, md, GFP_ATOMIC);
+	if (!tio)
 		return BLKPREP_DEFER;
 
-	rq->special = clone;
+	rq->special = tio;
 	rq->cmd_flags |= REQ_DONTPREP;
 
 	return BLKPREP_OK;
@@ -1673,14 +1659,24 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
  * 0  : the request has been processed (not requeued)
  * !0 : the request has been requeued
  */
-static int map_request(struct dm_target *ti, struct request *clone,
-		       struct mapped_device *md)
+static int map_request(struct dm_target *ti, struct mapped_device *md,
+						struct request *rq)
 {
+	struct request *clone = NULL;
 	int r, requeued = 0;
-	struct dm_rq_target_io *tio = clone->end_io_data;
+	struct dm_rq_target_io *tio = rq->special;
 
 	tio->ti = ti;
-	r = ti->type->map_rq(ti, clone, &tio->info);
+	r = ti->type->map_rq(ti, rq, &clone, &tio->info);
+	if (!clone)
+		return -ENOMEM;
+	if (setup_clone(clone, rq, tio)) {
+		blk_put_request(clone);
+		return -ENOMEM;
+	}
+	tio->clone = clone;
+	atomic_inc(&md->pending[rq_data_dir(clone)]);
+
 	switch (r) {
 	case DM_MAPIO_SUBMITTED:
 		/* The target has taken the I/O to submit by itself later */
@@ -1710,13 +1706,9 @@ static int map_request(struct dm_target *ti, struct request *clone,
 	return requeued;
 }
 
-static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
+static void dm_start_request(struct mapped_device *md, struct request *orig)
 {
-	struct request *clone;
-
 	blk_start_request(orig);
-	clone = orig->special;
-	atomic_inc(&md->pending[rq_data_dir(clone)]);
 
 	/*
 	 * Hold the md reference here for the in-flight I/O.
@@ -1726,8 +1718,6 @@ static struct request *dm_start_request(struct mapped_device *md, struct request
 	 * See the comment in rq_completed() too.
 	 */
 	dm_get(md);
-
-	return clone;
 }
 
 /*
@@ -1740,7 +1730,7 @@ static void dm_request_fn(struct request_queue *q)
 	int srcu_idx;
 	struct dm_table *map = dm_get_live_table(md, &srcu_idx);
 	struct dm_target *ti;
-	struct request *rq, *clone;
+	struct request *rq;
 	sector_t pos;
 
 	/*
@@ -1766,18 +1756,18 @@ static void dm_request_fn(struct request_queue *q)
 			 * before calling dm_kill_unmapped_request
 			 */
 			DMERR_LIMIT("request attempted access beyond the end of device");
-			clone = dm_start_request(md, rq);
-			dm_kill_unmapped_request(clone, -EIO);
+			dm_start_request(md, rq);
+			dm_kill_unmapped_request(rq, -EIO);
 			continue;
 		}
 
 		if (ti->type->busy && ti->type->busy(ti))
 			goto delay_and_out;
 
-		clone = dm_start_request(md, rq);
+		dm_start_request(md, rq);
 
 		spin_unlock(q->queue_lock);
-		if (map_request(ti, clone, md))
+		if (map_request(ti, md, rq))
 			goto requeued;
 
 		BUG_ON(!irqs_disabled());
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e1707de..d9ac281 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -46,8 +46,8 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
  * = 2: The target wants to push back the io
  */
 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
-typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
-				  union map_info *map_context);
+typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *rq,
+			struct request **clone, union map_info *map_context);
 
 /*
  * Returns:
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH] dm-mpath: Work with blk multi-queue drivers
  2014-09-23 17:03 [PATCH] dm-mpath: Work with blk multi-queue drivers Keith Busch
@ 2014-09-24  9:02 ` Hannes Reinecke
  2014-09-24 14:27   ` Christoph Hellwig
  2014-09-24 14:52 ` Christoph Hellwig
  1 sibling, 1 reply; 14+ messages in thread
From: Hannes Reinecke @ 2014-09-24  9:02 UTC (permalink / raw)
  To: dm-devel

On 09/23/2014 07:03 PM, Keith Busch wrote:
> I'm working with multipathing nvme devices using the blk-mq version of
> the nvme driver, but dm-mpath only works with the older request based
> drivers. This patch proposes to enable dm-mpath to work with both types
> of request queues and is succesfull with my dual ported nvme drives.
> 
> I think there may still be fix ups to do around submission side error
> handling, but I think it's at a decent stopping point to solicit feedback
> before I pursue taking it further. I hear there may be some resistance
> to add blk-mq support to dm-mpath anyway, but it seems too easy to add
> support to not at least try. :)
> 
> To work, this has dm allocate requests from the request_queue for
> the device-mapper type rather than allocate one on its own, so the
> cloned request is properly allocated and initialized for the device's
> request_queue. The original request's 'special' now points to the
> dm_rq_target_io rather than at the cloned request because the clone
> is allocated later by the block layer rather than by dm, and then all
> the other back referencing to the original seems to work out. The block
> layer then inserts the cloned reqest using the appropriate function for
> the request_queue type rather than just calling q->request_fn().
> 
> Compile tested on 3.17-rc6; runtime teseted on Matias Bjorling's
> linux-collab nvmemq_review using 3.16.
> 
The resistance wasn't so much for enabling multipath for block-mq,
it was _how_ multipath should be modelled on top of block-mq.

With a simple enabling we actually have two layers of I/O
scheduling; once in multipathing to select between the individual
queues, and once in block-mq to select the correct hardware context.
So we end up with a four-tiered hierarchy:

m priority groups - n pg_paths/request_queues -> o cpus -> p hctx

Giving us a full m * n * p (hctx are tagged per cpu) variety where
the I/Os might be send.

Performance wise it might be beneficial to tag a hardware context
to a given path, effectively removing I/O scheduling from
block-mq. But this would require some substantial update to the
current blk-mq design (blocked paths, dynamic reconfiguration).

However, this looks like a good starting point.
I'll give it a go and see how far I'll be getting with it.

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		      zSeries & Storage
hare@suse.de			      +49 911 74053 688
SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: J. Hawn, J. Guild, F. Imendörffer, HRB 16746 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] dm-mpath: Work with blk multi-queue drivers
  2014-09-24  9:02 ` Hannes Reinecke
@ 2014-09-24 14:27   ` Christoph Hellwig
  0 siblings, 0 replies; 14+ messages in thread
From: Christoph Hellwig @ 2014-09-24 14:27 UTC (permalink / raw)
  To: Hannes Reinecke, Keith Busch, device-mapper development

On Wed, Sep 24, 2014 at 11:02:30AM +0200, Hannes Reinecke wrote:
> The resistance wasn't so much for enabling multipath for block-mq,
> it was _how_ multipath should be modelled on top of block-mq.

As well as finding someone to do the work..  We now found Keith, so
thanks on that already!
> 
> With a simple enabling we actually have two layers of I/O
> scheduling; once in multipathing to select between the individual
> queues, and once in block-mq to select the correct hardware context.
> So we end up with a four-tiered hierarchy:

Assuming we have multiple queues in the low level driver..

> However, this looks like a good starting point.
> I'll give it a go and see how far I'll be getting with it.

Yes, the first priority should be to make dm-mpath to work on a blk-mq
device at all, which is very important for scsi-mq adoption, which
will include a lot of single queue devices.  After that come various
levels of efficiency optimizations.  The first priority should be
to get rid of the horrible bio clones by hooking into the right place
in the I/O completion, second one would be to revamp the I/O submission
path, for example by hooking into ->map_queue.  With fairly limited work
that does sound doable as long as all paths use the same HBA/driver
which is not an unusual limitation in other multipathing
implementations.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] dm-mpath: Work with blk multi-queue drivers
  2014-09-23 17:03 [PATCH] dm-mpath: Work with blk multi-queue drivers Keith Busch
  2014-09-24  9:02 ` Hannes Reinecke
@ 2014-09-24 14:52 ` Christoph Hellwig
  2014-09-24 17:20   ` Keith Busch
  1 sibling, 1 reply; 14+ messages in thread
From: Christoph Hellwig @ 2014-09-24 14:52 UTC (permalink / raw)
  To: Keith Busch; +Cc: dm-devel, Mike Snitzer

> index bf930f4..4c5952b 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c

Can you split the block core changes into separate patches in a
series?

> @@ -2031,6 +2031,11 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
>  	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
>  		return -EIO;
>  
> +	if (q->mq_ops) {
> +		blk_mq_insert_request(rq, false, true, true);
> +		return 0;
> +	}

In the case of a torn down queue this one will complete the request,
while the !mq case will return an error.  Does the upper level code
handle that difference fine?

>  	spin_lock_irqsave(q->queue_lock, flags);
>  	if (unlikely(blk_queue_dying(q))) {
>  		spin_unlock_irqrestore(q->queue_lock, flags);
> @@ -2928,8 +2933,6 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
>  	if (!bs)
>  		bs = fs_bio_set;
>  
> -	blk_rq_init(NULL, rq);
> -

Moving this into the caller in a preparatory patch would be useful, too.

> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 383ea0c..eb53d1c 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -172,6 +172,7 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
>  #if defined(CONFIG_BLK_DEV_INTEGRITY)
>  	rq->nr_integrity_segments = 0;
>  #endif
> +	rq->bio = NULL;

Jens has been trying to micro-optimize this area and avoid additional
overhead for the fast path.  It would be better to do this in dm-mpath.

>  	unsigned long flags;
>  	struct pgpath *pgpath;
>  	struct block_device *bdev;
> @@ -410,9 +411,11 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
>  		goto out_unlock;
>  
>  	bdev = pgpath->path.dev->bdev;
> -	clone->q = bdev_get_queue(bdev);
> -	clone->rq_disk = bdev->bd_disk;
> -	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
> +	*clone = blk_get_request(bdev_get_queue(bdev), rq_data_dir(rq),
> +							GFP_KERNEL);

I suspect this should be GFP_NOWAIT so that we can push the request back
up the stack if none are available.

> +	if (!(*clone))
> +		goto out_unlock;

No need for the inner braces.

>   * This may be used when the target's map_rq() function fails.
>   */
> -void dm_kill_unmapped_request(struct request *clone, int error)
> +void dm_kill_unmapped_request(struct request *rq, int error)
>  {
> -	struct dm_rq_target_io *tio = clone->end_io_data;
> -	struct request *rq = tio->orig;
> -
>  	rq->cmd_flags |= REQ_FAILED;
> -	dm_complete_request(clone, error);
> +	dm_complete_request(rq, error);
>  }
>  EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);

At this point it might be worth to just kill the
dm_kill_unmapped_request wrapper?

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] dm-mpath: Work with blk multi-queue drivers
  2014-09-24 14:52 ` Christoph Hellwig
@ 2014-09-24 17:20   ` Keith Busch
  2014-09-24 18:34     ` Mike Snitzer
  0 siblings, 1 reply; 14+ messages in thread
From: Keith Busch @ 2014-09-24 17:20 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Keith Busch, dm-devel, Mike Snitzer

On Wed, 24 Sep 2014, Christoph Hellwig wrote:
>> index bf930f4..4c5952b 100644
>> --- a/block/blk-core.c
>> +++ b/block/blk-core.c
>
> Can you split the block core changes into separate patches in a
> series?

Sounds good, will do. I'll have to follow your later suggestion to move
the blk_rq_init() to the caller early in the series so no part of the
patch set breaks anything.

>> @@ -2031,6 +2031,11 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
>>  	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
>>  		return -EIO;
>>
>> +	if (q->mq_ops) {
>> +		blk_mq_insert_request(rq, false, true, true);
>> +		return 0;
>> +	}
>
> In the case of a torn down queue this one will complete the request,
> while the !mq case will return an error.  Does the upper level code
> handle that difference fine?

I wasn't entirely sure about all the error handling. After re-examining,
I think there is a problem, but maybe a little different than what
you're suggesting: if the queue is torn down, blk_get_request() for the
clone returns NULL and the original request will be orphaned, so that's
no good. Need to somehow distinguish between a torn down queue vs. no
request available and requeue/fail as appropriate.

Both mq and !mq appear to work correctly if the queue happens to get
taken down after the request is obtained but before it is submitted,
but I'll synthesize this error to be sure.

>>  	unsigned long flags;
>>  	struct pgpath *pgpath;
>>  	struct block_device *bdev;
>> @@ -410,9 +411,11 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
>>  		goto out_unlock;
>>
>>  	bdev = pgpath->path.dev->bdev;
>> -	clone->q = bdev_get_queue(bdev);
>> -	clone->rq_disk = bdev->bd_disk;
>> -	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
>> +	*clone = blk_get_request(bdev_get_queue(bdev), rq_data_dir(rq),
>> +							GFP_KERNEL);
>
> I suspect this should be GFP_NOWAIT so that we can push the request back
> up the stack if none are available.

Aha, nice catch. I'd have hit the BUG_ON() in dm_request_fn() the way
I wrote this since irq's could have been re-enabled if a request wasn't
immediately available.

Thanks for the feedback!

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: dm-mpath: Work with blk multi-queue drivers
  2014-09-24 17:20   ` Keith Busch
@ 2014-09-24 18:34     ` Mike Snitzer
  2014-09-24 18:48       ` Mike Snitzer
  0 siblings, 1 reply; 14+ messages in thread
From: Mike Snitzer @ 2014-09-24 18:34 UTC (permalink / raw)
  To: Keith Busch; +Cc: Christoph Hellwig, Jun'ichi Nomura, dm-devel

On Tue, Sep 23 2014 at  1:03pm -0400,
Keith Busch <keith.busch@intel.com> wrote:

> I hear there may be some resistance to add blk-mq support to dm-mpath
> anyway, but it seems too easy to add support to not at least try. :)

Why?  Not sure who you heard that from but I'm not opposed to it if it
is done properly.


On Wed, Sep 24 2014 at  1:20pm -0400,
Keith Busch <keith.busch@intel.com> wrote:

> On Wed, 24 Sep 2014, Christoph Hellwig wrote:
> >>index bf930f4..4c5952b 100644
> >>--- a/block/blk-core.c
> >>+++ b/block/blk-core.c
> >
... 
> >> 	unsigned long flags;
> >> 	struct pgpath *pgpath;
> >> 	struct block_device *bdev;
> >>@@ -410,9 +411,11 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
> >> 		goto out_unlock;
> >>
> >> 	bdev = pgpath->path.dev->bdev;
> >>-	clone->q = bdev_get_queue(bdev);
> >>-	clone->rq_disk = bdev->bd_disk;
> >>-	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
> >>+	*clone = blk_get_request(bdev_get_queue(bdev), rq_data_dir(rq),
> >>+							GFP_KERNEL);
> >
> >I suspect this should be GFP_NOWAIT so that we can push the request back
> >up the stack if none are available.
> 
> Aha, nice catch. I'd have hit the BUG_ON() in dm_request_fn() the way
> I wrote this since irq's could have been re-enabled if a request wasn't
> immediately available.
> 
> Thanks for the feedback!

We have never allowed new memory allocation in a DM target's .map or
.map_rq method.  The only allocations that are allowed are those backed
by a mempool (see md->io_pool usage in alloc_rq_tio, before the clone's
struct request was embedded in struct dm_rq_target_io).  This was done
to allow request-based DM devices to be stacked on each other.  Even
though nothing ever made use of stacking request-based DM devices; doing
so is still a DM design possibility.  Could be we can simply say "kill
request-based DM device stacking, it isn't needed".. if we went that far
then the per-rq-based-device mempool becomes much less important.

But building on this concern, historically each request-based DM device
has maintained a reserve for cloning a request (so that forward progress
can be ensured even if system memory is unavailable).  So AFAICT
Christoph's suggestion to switch to GFP_NOWAIT from GFP_KERNEL still
doesn't do enough to preserve the higher standards rq-based DM had
established for guaranteed ability to clone a request.

Taking a step back, I'm really _not_ liking the duality of the DM core
with regard to the cloning of a request.  Where a request-based DM
target (e.g. mpath) is tasked with calling blk_get_request() but then DM
core is left to manage the life-cycle of that clone that the target
allocated.  If anything a new dm_get_request() wrapper should be added
to DM core, subtle difference but then at least one can easily see
balanced request management within the DM core?

In general I'd really like to see a bit more care taken to improve the
block interface that DM is using for request-based DM.  What I mean is,
Hannes proposed a series that eliminated request-based DM's cloning of
requests (and associated bios), see thread:
http://www.redhat.com/archives/dm-devel/2014-June/msg00023.html

(that series was just a "test balloon" as Hannes put it, but it offered
some serious "cleanup" by nuking a lot of fiddly block layer code in DM
core)

I never did take the time to properly review Hannes' proposal but now
that you're floating this blk-mq support for DM core (and DM mpath) I'm
clearly going to have to take this all on in a much more focused way.

Christoph/Hannes/Junichi/Keith/others, can you see a way forward that
offers a lighter request-based DM that makes required callouts to (new?)
block interfaces that helps us abstract the old request and blk-mq
request allocation, etc?

Mike

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: dm-mpath: Work with blk multi-queue drivers
  2014-09-24 18:34     ` Mike Snitzer
@ 2014-09-24 18:48       ` Mike Snitzer
  2014-09-25  0:13         ` Mike Snitzer
  0 siblings, 1 reply; 14+ messages in thread
From: Mike Snitzer @ 2014-09-24 18:48 UTC (permalink / raw)
  To: Keith Busch; +Cc: Christoph Hellwig, Jun'ichi Nomura, dm-devel

On Wed, Sep 24 2014 at  2:34pm -0400,
Mike Snitzer <snitzer@redhat.com> wrote:

> 
> I never did take the time to properly review Hannes' proposal but now
> that you're floating this blk-mq support for DM core (and DM mpath) I'm
> clearly going to have to take this all on in a much more focused way.
> 
> Christoph/Hannes/Junichi/Keith/others, can you see a way forward that
> offers a lighter request-based DM that makes required callouts to (new?)
> block interfaces that helps us abstract the old request and blk-mq
> request allocation, etc?

(sorry about replying to myself...)

SO revisiting that thread from above, these posts stand out:
http://www.redhat.com/archives/dm-devel/2014-June/msg00026.html
http://www.redhat.com/archives/dm-devel/2014-June/msg00028.html

I'd love to see us get rid of request-based DM's bio cloning for each
cloned request (we never did get an answer from the NEC guys to know
_why_ that was done).

http://www.redhat.com/archives/dm-devel/2014-June/msg00029.html

But I now see what Christoph was saying about needing the call
blk_get_request() against the low level path... and that completely
avoiding request cloning like Hannes did is a non-starter for blk-mq.

So if we could:
1) rip out the rq-based DM's cloning of all bios in a request
2) rebase Keith's approach ontop of 1) then we could go from there
   - but happy to put more thought in upfront to avoid busy work; and
     I'd encourage everyone else to do the same...

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: dm-mpath: Work with blk multi-queue drivers
  2014-09-24 18:48       ` Mike Snitzer
@ 2014-09-25  0:13         ` Mike Snitzer
  2014-09-25 15:57           ` Keith Busch
  0 siblings, 1 reply; 14+ messages in thread
From: Mike Snitzer @ 2014-09-25  0:13 UTC (permalink / raw)
  To: Keith Busch; +Cc: Christoph Hellwig, Jun'ichi Nomura, dm-devel

On Wed, Sep 24 2014 at  2:48pm -0400,
Mike Snitzer <snitzer@redhat.com> wrote:

> On Wed, Sep 24 2014 at  2:34pm -0400,
> Mike Snitzer <snitzer@redhat.com> wrote:
> 
> > 
> > I never did take the time to properly review Hannes' proposal but now
> > that you're floating this blk-mq support for DM core (and DM mpath) I'm
> > clearly going to have to take this all on in a much more focused way.
> > 
> > Christoph/Hannes/Junichi/Keith/others, can you see a way forward that
> > offers a lighter request-based DM that makes required callouts to (new?)
> > block interfaces that helps us abstract the old request and blk-mq
> > request allocation, etc?
> 
> (sorry about replying to myself...)
> 
> SO revisiting that thread from above, these posts stand out:
> http://www.redhat.com/archives/dm-devel/2014-June/msg00026.html
> http://www.redhat.com/archives/dm-devel/2014-June/msg00028.html
> 
> I'd love to see us get rid of request-based DM's bio cloning for each
> cloned request (we never did get an answer from the NEC guys to know
> _why_ that was done).

Actually, Junichi did respond with why:
http://www.redhat.com/archives/dm-devel/2014-June/msg00033.html

So this needs more review and thought.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: dm-mpath: Work with blk multi-queue drivers
  2014-09-25  0:13         ` Mike Snitzer
@ 2014-09-25 15:57           ` Keith Busch
  2014-09-25 16:08             ` Christoph Hellwig
  2014-09-25 16:12             ` Mike Snitzer
  0 siblings, 2 replies; 14+ messages in thread
From: Keith Busch @ 2014-09-25 15:57 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: Keith Busch, Christoph Hellwig, Jun'ichi Nomura, dm-devel

On Wed, 24 Sep 2014, Mike Snitzer wrote:
> Mike Snitzer <snitzer@redhat.com> wrote:
>> On Wed, Sep 24 2014 at  2:34pm -0400,
>> Mike Snitzer <snitzer@redhat.com> wrote:
>>> I never did take the time to properly review Hannes' proposal but now
>>> that you're floating this blk-mq support for DM core (and DM mpath) I'm
>>> clearly going to have to take this all on in a much more focused way.
>>>
>>> Christoph/Hannes/Junichi/Keith/others, can you see a way forward that
>>> offers a lighter request-based DM that makes required callouts to (new?)
>>> block interfaces that helps us abstract the old request and blk-mq
>>> request allocation, etc?
>>
>> (sorry about replying to myself...)
>>
>> SO revisiting that thread from above, these posts stand out:
>> http://www.redhat.com/archives/dm-devel/2014-June/msg00026.html
>> http://www.redhat.com/archives/dm-devel/2014-June/msg00028.html
>>
>> I'd love to see us get rid of request-based DM's bio cloning for each
>> cloned request (we never did get an answer from the NEC guys to know
>> _why_ that was done).
>
> Actually, Junichi did respond with why:
> http://www.redhat.com/archives/dm-devel/2014-June/msg00033.html
>
> So this needs more review and thought.

Thank you for all the background information. This definitely gives me
a lot more to think about.

For my part, the goal was to change as little as possible to get basic
blk-mq support working safely without regressing, and performance is
not even on my radar yet. I purposefully did not try to understand the
existing design well enough to propose re-arching. If we can address the
'request' life cycle management duality issue, would this be acceptable
as a stopgap for blk-mq support?

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: dm-mpath: Work with blk multi-queue drivers
  2014-09-25 15:57           ` Keith Busch
@ 2014-09-25 16:08             ` Christoph Hellwig
  2014-09-25 16:12             ` Mike Snitzer
  1 sibling, 0 replies; 14+ messages in thread
From: Christoph Hellwig @ 2014-09-25 16:08 UTC (permalink / raw)
  To: Keith Busch
  Cc: Christoph Hellwig, Jun'ichi Nomura, dm-devel, Mike Snitzer

On Thu, Sep 25, 2014 at 09:57:41AM -0600, Keith Busch wrote:
> Thank you for all the background information. This definitely gives me
> a lot more to think about.
> 
> For my part, the goal was to change as little as possible to get basic
> blk-mq support working safely without regressing, and performance is
> not even on my radar yet. I purposefully did not try to understand the
> existing design well enough to propose re-arching. If we can address the
> 'request' life cycle management duality issue, would this be acceptable
> as a stopgap for blk-mq support?

I fully agree with going for the stop gap for now.  I tried going the
long way when I gave it a try and got stuck.

If people believe the get_request in the map path is harmful for the old
code we might have to make your change conditional just for blk-mq.  For
blk-mq request allocation never dips into the general purpose memory
pool, so it should be fine for that case.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: dm-mpath: Work with blk multi-queue drivers
  2014-09-25 15:57           ` Keith Busch
  2014-09-25 16:08             ` Christoph Hellwig
@ 2014-09-25 16:12             ` Mike Snitzer
  2014-09-29 23:58               ` Junichi Nomura
  1 sibling, 1 reply; 14+ messages in thread
From: Mike Snitzer @ 2014-09-25 16:12 UTC (permalink / raw)
  To: Keith Busch; +Cc: Christoph Hellwig, Jun'ichi Nomura, dm-devel

On Thu, Sep 25 2014 at 11:57am -0400,
Keith Busch <keith.busch@intel.com> wrote:

> On Wed, 24 Sep 2014, Mike Snitzer wrote:
> >Mike Snitzer <snitzer@redhat.com> wrote:
> >>On Wed, Sep 24 2014 at  2:34pm -0400,
> >>Mike Snitzer <snitzer@redhat.com> wrote:
> >>>I never did take the time to properly review Hannes' proposal but now
> >>>that you're floating this blk-mq support for DM core (and DM mpath) I'm
> >>>clearly going to have to take this all on in a much more focused way.
> >>>
> >>>Christoph/Hannes/Junichi/Keith/others, can you see a way forward that
> >>>offers a lighter request-based DM that makes required callouts to (new?)
> >>>block interfaces that helps us abstract the old request and blk-mq
> >>>request allocation, etc?
> >>
> >>(sorry about replying to myself...)
> >>
> >>SO revisiting that thread from above, these posts stand out:
> >>http://www.redhat.com/archives/dm-devel/2014-June/msg00026.html
> >>http://www.redhat.com/archives/dm-devel/2014-June/msg00028.html
> >>
> >>I'd love to see us get rid of request-based DM's bio cloning for each
> >>cloned request (we never did get an answer from the NEC guys to know
> >>_why_ that was done).
> >
> >Actually, Junichi did respond with why:
> >http://www.redhat.com/archives/dm-devel/2014-June/msg00033.html
> >
> >So this needs more review and thought.
> 
> Thank you for all the background information. This definitely gives me
> a lot more to think about.
> 
> For my part, the goal was to change as little as possible to get basic
> blk-mq support working safely without regressing, and performance is
> not even on my radar yet. I purposefully did not try to understand the
> existing design well enough to propose re-arching. If we can address the
> 'request' life cycle management duality issue, would this be acceptable
> as a stopgap for blk-mq support?

We can ignore my desire to cleanup existing request-based DM's bio
cloning for now.  And yes, resolving the duality issue would need to
happen.  But your proposed change still has the issue of no longer using
a dedicated mempool per rq-based DM device to allocate requests from.
If we were to do that I'm pretty sure this new dm.c:dm_get_request()
wrapper would need to call blk_get_request() with GFP_ATOMIC.

Either GFP_ATOMIC or I think we _could_ relax to GFP_NOWAIT if and only
if we were willing to explicitly disallow stacking request-based DM
devices (which nothing uses at this point).  So I'd like to get Junichi
and Alasdair's feedback on the implications.  Junichi and/or Alasdair?

Mike

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: dm-mpath: Work with blk multi-queue drivers
  2014-09-25 16:12             ` Mike Snitzer
@ 2014-09-29 23:58               ` Junichi Nomura
  2014-09-30 14:18                 ` Mike Snitzer
  0 siblings, 1 reply; 14+ messages in thread
From: Junichi Nomura @ 2014-09-29 23:58 UTC (permalink / raw)
  To: Mike Snitzer, Keith Busch, Christoph Hellwig; +Cc: dm-devel

On 09/26/14 01:12, Mike Snitzer wrote:
> On Thu, Sep 25 2014 at 11:57am -0400,
> Keith Busch <keith.busch@intel.com> wrote:
>> For my part, the goal was to change as little as possible to get basic
>> blk-mq support working safely without regressing, and performance is
>> not even on my radar yet. I purposefully did not try to understand the
>> existing design well enough to propose re-arching. If we can address the
>> 'request' life cycle management duality issue, would this be acceptable
>> as a stopgap for blk-mq support?
> 
> We can ignore my desire to cleanup existing request-based DM's bio
> cloning for now.  And yes, resolving the duality issue would need to
> happen.  But your proposed change still has the issue of no longer using
> a dedicated mempool per rq-based DM device to allocate requests from.
> If we were to do that I'm pretty sure this new dm.c:dm_get_request()
> wrapper would need to call blk_get_request() with GFP_ATOMIC.
> 
> Either GFP_ATOMIC or I think we _could_ relax to GFP_NOWAIT if and only
> if we were willing to explicitly disallow stacking request-based DM
> devices (which nothing uses at this point).  So I'd like to get Junichi
> and Alasdair's feedback on the implications.  Junichi and/or Alasdair?

The problem with "stacking request-based DM devices" is
caused by shared mempool (i.e. the pool gets emptied by
upper layer and we can't make forward progress).
So it should be ok if request has per-device mempool
(I think it does.)

However, using blk_get_request() in map function will
require more changes in the code as blk_get_request()
assumes interrupt-enabled context.

-- 
Jun'ichi Nomura, NEC Corporation

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: dm-mpath: Work with blk multi-queue drivers
  2014-09-29 23:58               ` Junichi Nomura
@ 2014-09-30 14:18                 ` Mike Snitzer
  2014-09-30 23:43                   ` Junichi Nomura
  0 siblings, 1 reply; 14+ messages in thread
From: Mike Snitzer @ 2014-09-30 14:18 UTC (permalink / raw)
  To: Junichi Nomura; +Cc: Keith Busch, Christoph Hellwig, dm-devel

On Mon, Sep 29 2014 at  7:58pm -0400,
Junichi Nomura <j-nomura@ce.jp.nec.com> wrote:

> On 09/26/14 01:12, Mike Snitzer wrote:
> > On Thu, Sep 25 2014 at 11:57am -0400,
> > Keith Busch <keith.busch@intel.com> wrote:
> >> For my part, the goal was to change as little as possible to get basic
> >> blk-mq support working safely without regressing, and performance is
> >> not even on my radar yet. I purposefully did not try to understand the
> >> existing design well enough to propose re-arching. If we can address the
> >> 'request' life cycle management duality issue, would this be acceptable
> >> as a stopgap for blk-mq support?
> > 
> > We can ignore my desire to cleanup existing request-based DM's bio
> > cloning for now.  And yes, resolving the duality issue would need to
> > happen.  But your proposed change still has the issue of no longer using
> > a dedicated mempool per rq-based DM device to allocate requests from.
> > If we were to do that I'm pretty sure this new dm.c:dm_get_request()
> > wrapper would need to call blk_get_request() with GFP_ATOMIC.
> > 
> > Either GFP_ATOMIC or I think we _could_ relax to GFP_NOWAIT if and only
> > if we were willing to explicitly disallow stacking request-based DM
> > devices (which nothing uses at this point).  So I'd like to get Junichi
> > and Alasdair's feedback on the implications.  Junichi and/or Alasdair?
> 
> The problem with "stacking request-based DM devices" is
> caused by shared mempool (i.e. the pool gets emptied by
> upper layer and we can't make forward progress).
> So it should be ok if request has per-device mempool
> (I think it does.)

Current request-based DM provides a per-device mempool that all cloned
requests are allocated from.  But Keith's approach to have map_rq call
blk_get_request will no longer make use of that DM provided mempool.

But are you referring to the request_queue's use of a mempool that is
initialized with blk_init_rl() in blk_init_allocated_queue()?

> However, using blk_get_request() in map function will
> require more changes in the code as blk_get_request()
> assumes interrupt-enabled context.

Ah yes, blk_get_request will unconditionally disable interrupts using
spin_lock_irq.  Not yet looked at the implications though.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: dm-mpath: Work with blk multi-queue drivers
  2014-09-30 14:18                 ` Mike Snitzer
@ 2014-09-30 23:43                   ` Junichi Nomura
  0 siblings, 0 replies; 14+ messages in thread
From: Junichi Nomura @ 2014-09-30 23:43 UTC (permalink / raw)
  To: Mike Snitzer, Keith Busch, Christoph Hellwig; +Cc: dm-devel

On 09/30/14 23:18, Mike Snitzer wrote:
> On Mon, Sep 29 2014 at  7:58pm -0400,
> Junichi Nomura <j-nomura@ce.jp.nec.com> wrote:
> 
>> On 09/26/14 01:12, Mike Snitzer wrote:
>>> On Thu, Sep 25 2014 at 11:57am -0400,
>>> Keith Busch <keith.busch@intel.com> wrote:
>>>> For my part, the goal was to change as little as possible to get basic
>>>> blk-mq support working safely without regressing, and performance is
>>>> not even on my radar yet. I purposefully did not try to understand the
>>>> existing design well enough to propose re-arching. If we can address the
>>>> 'request' life cycle management duality issue, would this be acceptable
>>>> as a stopgap for blk-mq support?
>>>
>>> We can ignore my desire to cleanup existing request-based DM's bio
>>> cloning for now.  And yes, resolving the duality issue would need to
>>> happen.  But your proposed change still has the issue of no longer using
>>> a dedicated mempool per rq-based DM device to allocate requests from.
>>> If we were to do that I'm pretty sure this new dm.c:dm_get_request()
>>> wrapper would need to call blk_get_request() with GFP_ATOMIC.
>>>
>>> Either GFP_ATOMIC or I think we _could_ relax to GFP_NOWAIT if and only
>>> if we were willing to explicitly disallow stacking request-based DM
>>> devices (which nothing uses at this point).  So I'd like to get Junichi
>>> and Alasdair's feedback on the implications.  Junichi and/or Alasdair?
>>
>> The problem with "stacking request-based DM devices" is
>> caused by shared mempool (i.e. the pool gets emptied by
>> upper layer and we can't make forward progress).
>> So it should be ok if request has per-device mempool
>> (I think it does.)
> 
> Current request-based DM provides a per-device mempool that all cloned
> requests are allocated from.  But Keith's approach to have map_rq call
> blk_get_request will no longer make use of that DM provided mempool.
> 
> But are you referring to the request_queue's use of a mempool that is
> initialized with blk_init_rl() in blk_init_allocated_queue()?

Yes.

>> However, using blk_get_request() in map function will
>> require more changes in the code as blk_get_request()
>> assumes interrupt-enabled context.
> 
> Ah yes, blk_get_request will unconditionally disable interrupts using
> spin_lock_irq.  Not yet looked at the implications though.

Actually, early implementation of request-based DM had tried to
use blk_get_request() by converting them to irqsave/irqrestore
variants. However, since (old, non-mq version of) blk_get_request
is designed to be called in process context, such a change
could have confused the interface.
As a result, current DM code implements pre-allocation of memory
and mapping separately.
I think blk-mq already does pre-allocation of requests internally
and mq version of blk_get_request is actually a mapping function
in this case.

So, I suspect DM functions for pre-allocation (clone_rq) and mapping
(map_request) are good place for containing the duality inside.

-- 
Jun'ichi Nomura, NEC Corporation

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2014-09-30 23:43 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-09-23 17:03 [PATCH] dm-mpath: Work with blk multi-queue drivers Keith Busch
2014-09-24  9:02 ` Hannes Reinecke
2014-09-24 14:27   ` Christoph Hellwig
2014-09-24 14:52 ` Christoph Hellwig
2014-09-24 17:20   ` Keith Busch
2014-09-24 18:34     ` Mike Snitzer
2014-09-24 18:48       ` Mike Snitzer
2014-09-25  0:13         ` Mike Snitzer
2014-09-25 15:57           ` Keith Busch
2014-09-25 16:08             ` Christoph Hellwig
2014-09-25 16:12             ` Mike Snitzer
2014-09-29 23:58               ` Junichi Nomura
2014-09-30 14:18                 ` Mike Snitzer
2014-09-30 23:43                   ` Junichi Nomura

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.