linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* blk-mq: refactor request allocation
@ 2014-05-27 18:59 Christoph Hellwig
  2014-05-27 18:59 ` [PATCH 1/5] blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request Christoph Hellwig
                   ` (5 more replies)
  0 siblings, 6 replies; 10+ messages in thread
From: Christoph Hellwig @ 2014-05-27 18:59 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel

This series streamlines the request allocation path.


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/5] blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request
  2014-05-27 18:59 blk-mq: refactor request allocation Christoph Hellwig
@ 2014-05-27 18:59 ` Christoph Hellwig
  2014-05-27 18:59 ` [PATCH 2/5] blk-mq: initialize request in __blk_mq_alloc_request Christoph Hellwig
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 10+ messages in thread
From: Christoph Hellwig @ 2014-05-27 18:59 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel

Instead of having two almost identical copies of the same code just let
the callers pass in the reserved flag directly.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-core.c                  |    2 +-
 block/blk-mq.c                    |   20 +++-----------------
 drivers/block/mtip32xx/mtip32xx.c |    2 +-
 include/linux/blk-mq.h            |    4 ++--
 4 files changed, 7 insertions(+), 21 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index 29d5fba..d87be5b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1173,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 {
 	if (q->mq_ops)
-		return blk_mq_alloc_request(q, rw, gfp_mask);
+		return blk_mq_alloc_request(q, rw, gfp_mask, false);
 	else
 		return blk_old_get_request(q, rw, gfp_mask);
 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 30bad93..f8f84e2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -294,35 +294,21 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
 	return rq;
 }
 
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
+		bool reserved)
 {
 	struct request *rq;
 
 	if (blk_mq_queue_enter(q))
 		return NULL;
 
-	rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
+	rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
 	if (rq)
 		blk_mq_put_ctx(rq->mq_ctx);
 	return rq;
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
 
-struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
-					      gfp_t gfp)
-{
-	struct request *rq;
-
-	if (blk_mq_queue_enter(q))
-		return NULL;
-
-	rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
-	if (rq)
-		blk_mq_put_ctx(rq->mq_ctx);
-	return rq;
-}
-EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
-
 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 				  struct blk_mq_ctx *ctx, struct request *rq)
 {
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index ae331ab..ea323e9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -178,7 +178,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
 {
 	struct request *rq;
 
-	rq = blk_mq_alloc_reserved_request(dd->queue, 0, __GFP_WAIT);
+	rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true);
 	return blk_mq_rq_to_pdu(rq);
 }
 
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index afeb934..2aefc3c 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -160,8 +160,8 @@ void blk_mq_insert_request(struct request *, bool, bool, bool);
 void blk_mq_run_queues(struct request_queue *q, bool async);
 void blk_mq_free_request(struct request *rq);
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
-struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
+		gfp_t gfp, bool reserved);
 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
 
 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/5] blk-mq: initialize request in __blk_mq_alloc_request
  2014-05-27 18:59 blk-mq: refactor request allocation Christoph Hellwig
  2014-05-27 18:59 ` [PATCH 1/5] blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request Christoph Hellwig
@ 2014-05-27 18:59 ` Christoph Hellwig
  2014-05-27 18:59 ` [PATCH 3/5] blk-mq: remove blk_mq_wait_for_tags Christoph Hellwig
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 10+ messages in thread
From: Christoph Hellwig @ 2014-05-27 18:59 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel

Both callers if __blk_mq_alloc_request want to initialize the request, so
lift it into the common path.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c |   62 +++++++++++++++++++++++++++-----------------------------
 1 file changed, 30 insertions(+), 32 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index f8f84e2..848b3b6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -92,30 +92,6 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
 	clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
 }
 
-static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
-					      struct blk_mq_ctx *ctx,
-					      gfp_t gfp, bool reserved)
-{
-	struct request *rq;
-	unsigned int tag;
-
-	tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
-	if (tag != BLK_MQ_TAG_FAIL) {
-		rq = hctx->tags->rqs[tag];
-
-		rq->cmd_flags = 0;
-		if (blk_mq_tag_busy(hctx)) {
-			rq->cmd_flags = REQ_MQ_INFLIGHT;
-			atomic_inc(&hctx->nr_active);
-		}
-
-		rq->tag = tag;
-		return rq;
-	}
-
-	return NULL;
-}
-
 static int blk_mq_queue_enter(struct request_queue *q)
 {
 	int ret;
@@ -263,6 +239,32 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
 }
 
+static struct request *
+__blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+		struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved)
+{
+	struct request *rq;
+	unsigned int tag;
+
+	tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
+	if (tag != BLK_MQ_TAG_FAIL) {
+		rq = hctx->tags->rqs[tag];
+
+		rq->cmd_flags = 0;
+		if (blk_mq_tag_busy(hctx)) {
+			rq->cmd_flags = REQ_MQ_INFLIGHT;
+			atomic_inc(&hctx->nr_active);
+		}
+
+		rq->tag = tag;
+		blk_mq_rq_ctx_init(q, ctx, rq, rw);
+		return rq;
+	}
+
+	return NULL;
+}
+
+
 static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
 						   int rw, gfp_t gfp,
 						   bool reserved)
@@ -273,12 +275,10 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
 		struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
 		struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-		rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT,
+		rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
 						reserved);
-		if (rq) {
-			blk_mq_rq_ctx_init(q, ctx, rq, rw);
+		if (rq)
 			break;
-		}
 
 		if (gfp & __GFP_WAIT) {
 			__blk_mq_run_hw_queue(hctx);
@@ -1114,10 +1114,8 @@ static struct request *blk_mq_map_request(struct request_queue *q,
 		rw |= REQ_SYNC;
 
 	trace_block_getrq(q, bio, rw);
-	rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false);
-	if (likely(rq))
-		blk_mq_rq_ctx_init(q, ctx, rq, rw);
-	else {
+	rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
+	if (unlikely(!rq)) {
 		blk_mq_put_ctx(ctx);
 		trace_block_sleeprq(q, bio, rw);
 		rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 3/5] blk-mq: remove blk_mq_wait_for_tags
  2014-05-27 18:59 blk-mq: refactor request allocation Christoph Hellwig
  2014-05-27 18:59 ` [PATCH 1/5] blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request Christoph Hellwig
  2014-05-27 18:59 ` [PATCH 2/5] blk-mq: initialize request in __blk_mq_alloc_request Christoph Hellwig
@ 2014-05-27 18:59 ` Christoph Hellwig
  2014-05-27 18:59 ` [PATCH 4/5] blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request Christoph Hellwig
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 10+ messages in thread
From: Christoph Hellwig @ 2014-05-27 18:59 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel

The current logic for blocking tag allocation is rather confusing, as we
first allocated and then free again a tag in blk_mq_wait_for_tags, just
to attempt a non-blocking allocation and then repeat if someone else
managed to grab the tag before us.

Instead change blk_mq_alloc_request_pinned to simply do a blocking tag
allocation itself and use the request we get back from it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq-tag.c |    8 --------
 block/blk-mq-tag.h |    1 -
 block/blk-mq.c     |   13 ++++++-------
 3 files changed, 6 insertions(+), 16 deletions(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 05e2baf..0d0640d 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -7,14 +7,6 @@
 #include "blk-mq.h"
 #include "blk-mq-tag.h"
 
-void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved)
-{
-	int tag, zero = 0;
-
-	tag = blk_mq_get_tag(hctx, &zero, __GFP_WAIT, reserved);
-	blk_mq_put_tag(hctx, tag, &zero);
-}
-
 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
 {
 	int i;
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 2e5e687..c959de5 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -49,7 +49,6 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
 extern void blk_mq_free_tags(struct blk_mq_tags *tags);
 
 extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved);
-extern void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved);
 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
 extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
 extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 848b3b6..44ee79c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -264,31 +264,30 @@ __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
 	return NULL;
 }
 
-
 static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
 						   int rw, gfp_t gfp,
 						   bool reserved)
 {
+	bool gfp_mask = gfp & ~__GFP_WAIT;
 	struct request *rq;
 
 	do {
 		struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
 		struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-		rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
+		rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp_mask,
 						reserved);
 		if (rq)
 			break;
 
-		if (gfp & __GFP_WAIT) {
-			__blk_mq_run_hw_queue(hctx);
-			blk_mq_put_ctx(ctx);
-		} else {
+		if (!(gfp & __GFP_WAIT)) {
 			blk_mq_put_ctx(ctx);
 			break;
 		}
 
-		blk_mq_wait_for_tags(hctx, reserved);
+		__blk_mq_run_hw_queue(hctx);
+		blk_mq_put_ctx(ctx);
+		gfp_mask = gfp;
 	} while (1);
 
 	return rq;
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 4/5] blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request
  2014-05-27 18:59 blk-mq: refactor request allocation Christoph Hellwig
                   ` (2 preceding siblings ...)
  2014-05-27 18:59 ` [PATCH 3/5] blk-mq: remove blk_mq_wait_for_tags Christoph Hellwig
@ 2014-05-27 18:59 ` Christoph Hellwig
  2014-05-27 18:59 ` [PATCH 5/5] blk-mq: remove blk_mq_alloc_request_pinned Christoph Hellwig
  2014-05-27 20:58 ` blk-mq: refactor request allocation Jens Axboe
  5 siblings, 0 replies; 10+ messages in thread
From: Christoph Hellwig @ 2014-05-27 18:59 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel

We already do a non-blocking allocation in blk_mq_map_request, no need
to repeat it.  Just call __blk_mq_alloc_request to wait directly.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c |    8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 44ee79c..7489d19 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1115,12 +1115,14 @@ static struct request *blk_mq_map_request(struct request_queue *q,
 	trace_block_getrq(q, bio, rw);
 	rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
 	if (unlikely(!rq)) {
+		__blk_mq_run_hw_queue(hctx);
 		blk_mq_put_ctx(ctx);
 		trace_block_sleeprq(q, bio, rw);
-		rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
-							false);
-		ctx = rq->mq_ctx;
+
+		ctx = blk_mq_get_ctx(q);
 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
+		rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
+					    __GFP_WAIT|GFP_ATOMIC, false);
 	}
 
 	hctx->queued++;
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 5/5] blk-mq: remove blk_mq_alloc_request_pinned
  2014-05-27 18:59 blk-mq: refactor request allocation Christoph Hellwig
                   ` (3 preceding siblings ...)
  2014-05-27 18:59 ` [PATCH 4/5] blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request Christoph Hellwig
@ 2014-05-27 18:59 ` Christoph Hellwig
  2014-05-27 20:58 ` blk-mq: refactor request allocation Jens Axboe
  5 siblings, 0 replies; 10+ messages in thread
From: Christoph Hellwig @ 2014-05-27 18:59 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel

We now only have one caller left and can open code it there in a cleaner
way.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c |   48 ++++++++++++++++--------------------------------
 1 file changed, 16 insertions(+), 32 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7489d19..9d27077 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -264,46 +264,30 @@ __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
 	return NULL;
 }
 
-static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
-						   int rw, gfp_t gfp,
-						   bool reserved)
-{
-	bool gfp_mask = gfp & ~__GFP_WAIT;
-	struct request *rq;
-
-	do {
-		struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
-		struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
-		rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp_mask,
-						reserved);
-		if (rq)
-			break;
-
-		if (!(gfp & __GFP_WAIT)) {
-			blk_mq_put_ctx(ctx);
-			break;
-		}
-
-		__blk_mq_run_hw_queue(hctx);
-		blk_mq_put_ctx(ctx);
-		gfp_mask = gfp;
-	} while (1);
-
-	return rq;
-}
-
 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
 		bool reserved)
 {
+	struct blk_mq_ctx *ctx;
+	struct blk_mq_hw_ctx *hctx;
 	struct request *rq;
 
 	if (blk_mq_queue_enter(q))
 		return NULL;
 
-	rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
-	if (rq)
-		blk_mq_put_ctx(rq->mq_ctx);
+	ctx = blk_mq_get_ctx(q);
+	hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+	rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
+				    reserved);
+	if (!rq && (gfp & __GFP_WAIT)) {
+		__blk_mq_run_hw_queue(hctx);
+		blk_mq_put_ctx(ctx);
+
+		ctx = blk_mq_get_ctx(q);
+		hctx = q->mq_ops->map_queue(q, ctx->cpu);
+		rq =  __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved);
+	}
+	blk_mq_put_ctx(ctx);
 	return rq;
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: blk-mq: refactor request allocation
  2014-05-27 18:59 blk-mq: refactor request allocation Christoph Hellwig
                   ` (4 preceding siblings ...)
  2014-05-27 18:59 ` [PATCH 5/5] blk-mq: remove blk_mq_alloc_request_pinned Christoph Hellwig
@ 2014-05-27 20:58 ` Jens Axboe
  2014-05-28  5:19   ` Christoph Hellwig
  5 siblings, 1 reply; 10+ messages in thread
From: Jens Axboe @ 2014-05-27 20:58 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-kernel

On 05/27/2014 12:59 PM, Christoph Hellwig wrote:
> This series streamlines the request allocation path.
> 

Series looks innocuous enough to me, but it's about a 1.5% performance
drop here with an actual device. These tests are very stable, anything
over ~0.1% is definitely outside of noise. I repeated and rebooted a few
times and tested both, it's persistent. No smoking guns in the profile.


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: blk-mq: refactor request allocation
  2014-05-27 20:58 ` blk-mq: refactor request allocation Jens Axboe
@ 2014-05-28  5:19   ` Christoph Hellwig
  2014-05-28 14:20     ` Jens Axboe
  2014-05-28 15:47     ` Jens Axboe
  0 siblings, 2 replies; 10+ messages in thread
From: Christoph Hellwig @ 2014-05-28  5:19 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel

On Tue, May 27, 2014 at 02:58:08PM -0600, Jens Axboe wrote:
> On 05/27/2014 12:59 PM, Christoph Hellwig wrote:
> > This series streamlines the request allocation path.
> > 
> 
> Series looks innocuous enough to me, but it's about a 1.5% performance
> drop here with an actual device. These tests are very stable, anything
> over ~0.1% is definitely outside of noise. I repeated and rebooted a few
> times and tested both, it's persistent. No smoking guns in the profile.

Can you do a bisect to narrow it down to one of the patches?

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: blk-mq: refactor request allocation
  2014-05-28  5:19   ` Christoph Hellwig
@ 2014-05-28 14:20     ` Jens Axboe
  2014-05-28 15:47     ` Jens Axboe
  1 sibling, 0 replies; 10+ messages in thread
From: Jens Axboe @ 2014-05-28 14:20 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-kernel

On 2014-05-27 23:19, Christoph Hellwig wrote:
> On Tue, May 27, 2014 at 02:58:08PM -0600, Jens Axboe wrote:
>> On 05/27/2014 12:59 PM, Christoph Hellwig wrote:
>>> This series streamlines the request allocation path.
>>>
>>
>> Series looks innocuous enough to me, but it's about a 1.5% performance
>> drop here with an actual device. These tests are very stable, anything
>> over ~0.1% is definitely outside of noise. I repeated and rebooted a few
>> times and tested both, it's persistent. No smoking guns in the profile.
>
> Can you do a bisect to narrow it down to one of the patches?

Sure, I'll give it a whirl.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: blk-mq: refactor request allocation
  2014-05-28  5:19   ` Christoph Hellwig
  2014-05-28 14:20     ` Jens Axboe
@ 2014-05-28 15:47     ` Jens Axboe
  1 sibling, 0 replies; 10+ messages in thread
From: Jens Axboe @ 2014-05-28 15:47 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-kernel

On 2014-05-27 23:19, Christoph Hellwig wrote:
> On Tue, May 27, 2014 at 02:58:08PM -0600, Jens Axboe wrote:
>> On 05/27/2014 12:59 PM, Christoph Hellwig wrote:
>>> This series streamlines the request allocation path.
>>>
>>
>> Series looks innocuous enough to me, but it's about a 1.5% performance
>> drop here with an actual device. These tests are very stable, anything
>> over ~0.1% is definitely outside of noise. I repeated and rebooted a few
>> times and tested both, it's persistent. No smoking guns in the profile.
>
> Can you do a bisect to narrow it down to one of the patches?

Did bisect and even one-by-one, and now it doesn't show up of course. 
The nvme branch was rebased on top of a new base since yesterday, so it 
could just be layout weirdness from the previous one.

So I think we can call it fine, I'll apply it.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2014-05-28 15:47 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-05-27 18:59 blk-mq: refactor request allocation Christoph Hellwig
2014-05-27 18:59 ` [PATCH 1/5] blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request Christoph Hellwig
2014-05-27 18:59 ` [PATCH 2/5] blk-mq: initialize request in __blk_mq_alloc_request Christoph Hellwig
2014-05-27 18:59 ` [PATCH 3/5] blk-mq: remove blk_mq_wait_for_tags Christoph Hellwig
2014-05-27 18:59 ` [PATCH 4/5] blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request Christoph Hellwig
2014-05-27 18:59 ` [PATCH 5/5] blk-mq: remove blk_mq_alloc_request_pinned Christoph Hellwig
2014-05-27 20:58 ` blk-mq: refactor request allocation Jens Axboe
2014-05-28  5:19   ` Christoph Hellwig
2014-05-28 14:20     ` Jens Axboe
2014-05-28 15:47     ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).