All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHSET 0/4] Various block optimizations
@ 2021-10-13 16:49 Jens Axboe
  2021-10-13 16:49 ` [PATCH 1/4] block: provide helpers for rq_list manipulation Jens Axboe
                   ` (3 more replies)
  0 siblings, 4 replies; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 16:49 UTC (permalink / raw)
  To: linux-block

Hi,

Patches aren't really connected in this series, they are all standalone
optimizations that make a difference.

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 1/4] block: provide helpers for rq_list manipulation
  2021-10-13 16:49 [PATCHSET 0/4] Various block optimizations Jens Axboe
@ 2021-10-13 16:49 ` Jens Axboe
  2021-10-13 17:11   ` Christoph Hellwig
  2021-10-13 16:49 ` [PATCH 2/4] block: inline fast path of driver tag allocation Jens Axboe
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 16:49 UTC (permalink / raw)
  To: linux-block; +Cc: Jens Axboe, Christoph Hellwig

Instead of open-coding the list additions, traversal, and removal,
provide a basic set of helpers.

Suggested-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-mq.c         | 21 +++++----------------
 include/linux/blk-mq.h | 25 +++++++++++++++++++++++++
 2 files changed, 30 insertions(+), 16 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6dfd3aaa6073..46a91e5fabc5 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -426,10 +426,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 			tag = tag_offset + i;
 			tags &= ~(1UL << i);
 			rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
-			rq->rq_next = *data->cached_rq;
-			*data->cached_rq = rq;
+			rq_list_add_tail(data->cached_rq, rq);
 		}
 		data->nr_tags -= nr;
+		return rq_list_pop(data->cached_rq);
 	} else {
 		/*
 		 * Waiting allocations only fail because of an inactive hctx.
@@ -453,14 +453,6 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 
 		return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
 	}
-
-	if (data->cached_rq) {
-		rq = *data->cached_rq;
-		*data->cached_rq = rq->rq_next;
-		return rq;
-	}
-
-	return NULL;
 }
 
 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
@@ -603,11 +595,9 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
 void blk_mq_free_plug_rqs(struct blk_plug *plug)
 {
-	while (plug->cached_rq) {
-		struct request *rq;
+	struct request *rq;
 
-		rq = plug->cached_rq;
-		plug->cached_rq = rq->rq_next;
+	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) {
 		percpu_ref_get(&rq->q->q_usage_counter);
 		blk_mq_free_request(rq);
 	}
@@ -2264,8 +2254,7 @@ void blk_mq_submit_bio(struct bio *bio)
 
 	plug = blk_mq_plug(q, bio);
 	if (plug && plug->cached_rq) {
-		rq = plug->cached_rq;
-		plug->cached_rq = rq->rq_next;
+		rq = rq_list_pop(&plug->cached_rq);
 		INIT_LIST_HEAD(&rq->queuelist);
 	} else {
 		struct blk_mq_alloc_data data = {
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a9c1d0882550..c05560524841 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -473,6 +473,31 @@ struct blk_mq_tag_set {
 	struct list_head	tag_list;
 };
 
+#define rq_list_add_tail(listptr, rq)	do {		\
+	(rq)->rq_next = *(listptr);			\
+	*(listptr) = rq;				\
+} while (0)
+
+#define rq_list_pop(listptr)				\
+({							\
+	struct request *__req = NULL;			\
+	if ((listptr) && *(listptr))	{		\
+		__req = *(listptr);			\
+		*(listptr) = __req->rq_next;		\
+	}						\
+	__req;						\
+})
+
+#define rq_list_peek(listptr)				\
+({							\
+	struct request *__req = NULL;			\
+	if ((listptr) && *(listptr))			\
+		__req = *(listptr);			\
+	__req;						\
+})
+
+#define rq_list_next(rq)	(rq)->rq_next
+
 /**
  * struct blk_mq_queue_data - Data about a request inserted in a queue
  *
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 2/4] block: inline fast path of driver tag allocation
  2021-10-13 16:49 [PATCHSET 0/4] Various block optimizations Jens Axboe
  2021-10-13 16:49 ` [PATCH 1/4] block: provide helpers for rq_list manipulation Jens Axboe
@ 2021-10-13 16:49 ` Jens Axboe
  2021-10-13 17:22   ` Christoph Hellwig
  2021-10-13 16:49 ` [PATCH 3/4] block: don't bother iter advancing a fully done bio Jens Axboe
  2021-10-13 16:49 ` [PATCH 4/4] block: move update request helpers into blk-mq.c Jens Axboe
  3 siblings, 1 reply; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 16:49 UTC (permalink / raw)
  To: linux-block; +Cc: Jens Axboe

If we don't use an IO scheduler or have shared tags, then we don't need
to call into this external function at all. This saves ~2% for such
a setup.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-mq.c |  8 +++-----
 block/blk-mq.h | 15 ++++++++++++++-
 2 files changed, 17 insertions(+), 6 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 46a91e5fabc5..fe3e926c20a9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1135,7 +1135,7 @@ static inline unsigned int queued_to_index(unsigned int queued)
 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
 }
 
-static bool __blk_mq_get_driver_tag(struct request *rq)
+static bool __blk_mq_alloc_driver_tag(struct request *rq)
 {
 	struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
 	unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
@@ -1159,11 +1159,9 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
 	return true;
 }
 
-bool blk_mq_get_driver_tag(struct request *rq)
+bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
 {
-	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
-
-	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
+	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
 		return false;
 
 	if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 8be447995106..ceed0a001c76 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -264,7 +264,20 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
 }
 
-bool blk_mq_get_driver_tag(struct request *rq);
+bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
+
+static inline bool blk_mq_get_driver_tag(struct request *rq)
+{
+	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+
+	if (rq->tag != BLK_MQ_NO_TAG &&
+	    !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
+		hctx->tags->rqs[rq->tag] = rq;
+		return true;
+	}
+
+	return __blk_mq_get_driver_tag(hctx, rq);
+}
 
 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
 {
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 3/4] block: don't bother iter advancing a fully done bio
  2021-10-13 16:49 [PATCHSET 0/4] Various block optimizations Jens Axboe
  2021-10-13 16:49 ` [PATCH 1/4] block: provide helpers for rq_list manipulation Jens Axboe
  2021-10-13 16:49 ` [PATCH 2/4] block: inline fast path of driver tag allocation Jens Axboe
@ 2021-10-13 16:49 ` Jens Axboe
  2021-10-13 17:26   ` Christoph Hellwig
  2021-10-13 16:49 ` [PATCH 4/4] block: move update request helpers into blk-mq.c Jens Axboe
  3 siblings, 1 reply; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 16:49 UTC (permalink / raw)
  To: linux-block; +Cc: Jens Axboe

If we're completing nbytes and nbytes is the size of the bio, don't bother
with calling into the iterator increment helpers. Just clear the bio
size and we're done.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/bio.c         |  4 ++--
 include/linux/bio.h | 13 +++++++++++--
 2 files changed, 13 insertions(+), 4 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index a3c9ff23a036..874ff235aff7 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1289,7 +1289,7 @@ EXPORT_SYMBOL(submit_bio_wait);
  *
  * @bio will then represent the remaining, uncompleted portion of the io.
  */
-void bio_advance(struct bio *bio, unsigned bytes)
+void __bio_advance(struct bio *bio, unsigned bytes)
 {
 	if (bio_integrity(bio))
 		bio_integrity_advance(bio, bytes);
@@ -1297,7 +1297,7 @@ void bio_advance(struct bio *bio, unsigned bytes)
 	bio_crypt_advance(bio, bytes);
 	bio_advance_iter(bio, &bio->bi_iter, bytes);
 }
-EXPORT_SYMBOL(bio_advance);
+EXPORT_SYMBOL(__bio_advance);
 
 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
 			struct bio *src, struct bvec_iter *src_iter)
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 62d684b7dd4c..44b543e7baf6 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -119,6 +119,17 @@ static inline void bio_advance_iter_single(const struct bio *bio,
 		bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
 }
 
+extern void __bio_advance(struct bio *, unsigned);
+
+static inline void bio_advance(struct bio *bio, unsigned int nbytes)
+{
+	if (nbytes == bio->bi_iter.bi_size) {
+		bio->bi_iter.bi_size = 0;
+		return;
+	}
+	__bio_advance(bio, nbytes);
+}
+
 #define __bio_for_each_segment(bvl, bio, iter, start)			\
 	for (iter = (start);						\
 	     (iter).bi_size &&						\
@@ -381,8 +392,6 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
 struct request_queue;
 
 extern int submit_bio_wait(struct bio *bio);
-extern void bio_advance(struct bio *, unsigned);
-
 extern void bio_init(struct bio *bio, struct bio_vec *table,
 		     unsigned short max_vecs);
 extern void bio_uninit(struct bio *);
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 4/4] block: move update request helpers into blk-mq.c
  2021-10-13 16:49 [PATCHSET 0/4] Various block optimizations Jens Axboe
                   ` (2 preceding siblings ...)
  2021-10-13 16:49 ` [PATCH 3/4] block: don't bother iter advancing a fully done bio Jens Axboe
@ 2021-10-13 16:49 ` Jens Axboe
  2021-10-13 17:32   ` Christoph Hellwig
  3 siblings, 1 reply; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 16:49 UTC (permalink / raw)
  To: linux-block; +Cc: Jens Axboe

For some reason we still have them in blk-core, with the rest of the
request completion being in blk-mq. That causes and out-of-line call
for each completion.

Move them into blk-mq.c instead.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-core.c | 214 -----------------------------------------------
 block/blk-mq.c   | 214 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 214 insertions(+), 214 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index d5b0258dd218..b199579c5f1f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -166,103 +166,6 @@ inline const char *blk_op_str(unsigned int op)
 }
 EXPORT_SYMBOL_GPL(blk_op_str);
 
-static const struct {
-	int		errno;
-	const char	*name;
-} blk_errors[] = {
-	[BLK_STS_OK]		= { 0,		"" },
-	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
-	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
-	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
-	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
-	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
-	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
-	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
-	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
-	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
-	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
-	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
-
-	/* device mapper special case, should not leak out: */
-	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
-
-	/* zone device specific errors */
-	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
-	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },
-
-	/* everything else not covered above: */
-	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
-};
-
-blk_status_t errno_to_blk_status(int errno)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
-		if (blk_errors[i].errno == errno)
-			return (__force blk_status_t)i;
-	}
-
-	return BLK_STS_IOERR;
-}
-EXPORT_SYMBOL_GPL(errno_to_blk_status);
-
-int blk_status_to_errno(blk_status_t status)
-{
-	int idx = (__force int)status;
-
-	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
-		return -EIO;
-	return blk_errors[idx].errno;
-}
-EXPORT_SYMBOL_GPL(blk_status_to_errno);
-
-static void print_req_error(struct request *req, blk_status_t status,
-		const char *caller)
-{
-	int idx = (__force int)status;
-
-	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
-		return;
-
-	printk_ratelimited(KERN_ERR
-		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
-		"phys_seg %u prio class %u\n",
-		caller, blk_errors[idx].name,
-		req->rq_disk ? req->rq_disk->disk_name : "?",
-		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
-		req->cmd_flags & ~REQ_OP_MASK,
-		req->nr_phys_segments,
-		IOPRIO_PRIO_CLASS(req->ioprio));
-}
-
-static void req_bio_endio(struct request *rq, struct bio *bio,
-			  unsigned int nbytes, blk_status_t error)
-{
-	if (error)
-		bio->bi_status = error;
-
-	if (unlikely(rq->rq_flags & RQF_QUIET))
-		bio_set_flag(bio, BIO_QUIET);
-
-	bio_advance(bio, nbytes);
-
-	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
-		/*
-		 * Partial zone append completions cannot be supported as the
-		 * BIO fragments may end up not being written sequentially.
-		 */
-		if (bio->bi_iter.bi_size)
-			bio->bi_status = BLK_STS_IOERR;
-		else
-			bio->bi_iter.bi_sector = rq->__sector;
-	}
-
-	/* don't actually finish bio if it's part of flush sequence */
-	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
-		bio_endio(bio);
-}
-
 void blk_dump_rq_flags(struct request *rq, char *msg)
 {
 	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
@@ -1305,17 +1208,6 @@ static void update_io_ticks(struct block_device *part, unsigned long now,
 	}
 }
 
-static void blk_account_io_completion(struct request *req, unsigned int bytes)
-{
-	if (req->part && blk_do_io_stat(req)) {
-		const int sgrp = op_stat_group(req_op(req));
-
-		part_stat_lock();
-		part_stat_add(req->part, sectors[sgrp], bytes >> 9);
-		part_stat_unlock();
-	}
-}
-
 void __blk_account_io_done(struct request *req, u64 now)
 {
 	const int sgrp = op_stat_group(req_op(req));
@@ -1424,112 +1316,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq)
 }
 EXPORT_SYMBOL_GPL(blk_steal_bios);
 
-/**
- * blk_update_request - Complete multiple bytes without completing the request
- * @req:      the request being processed
- * @error:    block status code
- * @nr_bytes: number of bytes to complete for @req
- *
- * Description:
- *     Ends I/O on a number of bytes attached to @req, but doesn't complete
- *     the request structure even if @req doesn't have leftover.
- *     If @req has leftover, sets it up for the next range of segments.
- *
- *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
- *     %false return from this function.
- *
- * Note:
- *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
- *      except in the consistency check at the end of this function.
- *
- * Return:
- *     %false - this request doesn't have any more data
- *     %true  - this request has more data
- **/
-bool blk_update_request(struct request *req, blk_status_t error,
-		unsigned int nr_bytes)
-{
-	int total_bytes;
-
-	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
-
-	if (!req->bio)
-		return false;
-
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
-	    error == BLK_STS_OK)
-		req->q->integrity.profile->complete_fn(req, nr_bytes);
-#endif
-
-	if (unlikely(error && !blk_rq_is_passthrough(req) &&
-		     !(req->rq_flags & RQF_QUIET)))
-		print_req_error(req, error, __func__);
-
-	blk_account_io_completion(req, nr_bytes);
-
-	total_bytes = 0;
-	while (req->bio) {
-		struct bio *bio = req->bio;
-		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
-
-		if (bio_bytes == bio->bi_iter.bi_size)
-			req->bio = bio->bi_next;
-
-		/* Completion has already been traced */
-		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
-		req_bio_endio(req, bio, bio_bytes, error);
-
-		total_bytes += bio_bytes;
-		nr_bytes -= bio_bytes;
-
-		if (!nr_bytes)
-			break;
-	}
-
-	/*
-	 * completely done
-	 */
-	if (!req->bio) {
-		/*
-		 * Reset counters so that the request stacking driver
-		 * can find how many bytes remain in the request
-		 * later.
-		 */
-		req->__data_len = 0;
-		return false;
-	}
-
-	req->__data_len -= total_bytes;
-
-	/* update sector only for requests with clear definition of sector */
-	if (!blk_rq_is_passthrough(req))
-		req->__sector += total_bytes >> 9;
-
-	/* mixed attributes always follow the first bio */
-	if (req->rq_flags & RQF_MIXED_MERGE) {
-		req->cmd_flags &= ~REQ_FAILFAST_MASK;
-		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
-	}
-
-	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
-		/*
-		 * If total number of sectors is less than the first segment
-		 * size, something has gone terribly wrong.
-		 */
-		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
-			blk_dump_rq_flags(req, "request botched");
-			req->__data_len = blk_rq_cur_bytes(req);
-		}
-
-		/* recalculate the number of segments */
-		req->nr_phys_segments = blk_recalc_rq_segments(req);
-	}
-
-	return true;
-}
-EXPORT_SYMBOL_GPL(blk_update_request);
-
 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 /**
  * rq_flush_dcache_pages - Helper function to flush all pages in a request
diff --git a/block/blk-mq.c b/block/blk-mq.c
index fe3e926c20a9..069837a020fe 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -626,6 +626,220 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 }
 EXPORT_SYMBOL(__blk_mq_end_request);
 
+static void blk_account_io_completion(struct request *req, unsigned int bytes)
+{
+	if (req->part && blk_do_io_stat(req)) {
+		const int sgrp = op_stat_group(req_op(req));
+
+		part_stat_lock();
+		part_stat_add(req->part, sectors[sgrp], bytes >> 9);
+		part_stat_unlock();
+	}
+}
+
+static void req_bio_endio(struct request *rq, struct bio *bio,
+			  unsigned int nbytes, blk_status_t error)
+{
+	if (error)
+		bio->bi_status = error;
+
+	if (unlikely(rq->rq_flags & RQF_QUIET))
+		bio_set_flag(bio, BIO_QUIET);
+
+	bio_advance(bio, nbytes);
+
+	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
+		/*
+		 * Partial zone append completions cannot be supported as the
+		 * BIO fragments may end up not being written sequentially.
+		 */
+		if (bio->bi_iter.bi_size)
+			bio->bi_status = BLK_STS_IOERR;
+		else
+			bio->bi_iter.bi_sector = rq->__sector;
+	}
+
+	/* don't actually finish bio if it's part of flush sequence */
+	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
+		bio_endio(bio);
+}
+
+static const struct {
+	int		errno;
+	const char	*name;
+} blk_errors[] = {
+	[BLK_STS_OK]		= { 0,		"" },
+	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
+	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
+	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
+	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
+	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
+	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
+	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
+	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
+	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
+	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
+	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
+
+	/* device mapper special case, should not leak out: */
+	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
+
+	/* zone device specific errors */
+	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
+	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },
+
+	/* everything else not covered above: */
+	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
+};
+
+blk_status_t errno_to_blk_status(int errno)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
+		if (blk_errors[i].errno == errno)
+			return (__force blk_status_t)i;
+	}
+
+	return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(errno_to_blk_status);
+
+int blk_status_to_errno(blk_status_t status)
+{
+	int idx = (__force int)status;
+
+	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
+		return -EIO;
+	return blk_errors[idx].errno;
+}
+EXPORT_SYMBOL_GPL(blk_status_to_errno);
+
+static void print_req_error(struct request *req, blk_status_t status,
+		const char *caller)
+{
+	int idx = (__force int)status;
+
+	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
+		return;
+
+	printk_ratelimited(KERN_ERR
+		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
+		"phys_seg %u prio class %u\n",
+		caller, blk_errors[idx].name,
+		req->rq_disk ? req->rq_disk->disk_name : "?",
+		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
+		req->cmd_flags & ~REQ_OP_MASK,
+		req->nr_phys_segments,
+		IOPRIO_PRIO_CLASS(req->ioprio));
+}
+
+/**
+ * blk_update_request - Complete multiple bytes without completing the request
+ * @req:      the request being processed
+ * @error:    block status code
+ * @nr_bytes: number of bytes to complete for @req
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @req, but doesn't complete
+ *     the request structure even if @req doesn't have leftover.
+ *     If @req has leftover, sets it up for the next range of segments.
+ *
+ *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
+ *     %false return from this function.
+ *
+ * Note:
+ *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
+ *      except in the consistency check at the end of this function.
+ *
+ * Return:
+ *     %false - this request doesn't have any more data
+ *     %true  - this request has more data
+ **/
+bool blk_update_request(struct request *req, blk_status_t error,
+		unsigned int nr_bytes)
+{
+	int total_bytes;
+
+	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
+
+	if (!req->bio)
+		return false;
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+	    error == BLK_STS_OK)
+		req->q->integrity.profile->complete_fn(req, nr_bytes);
+#endif
+
+	if (unlikely(error && !blk_rq_is_passthrough(req) &&
+		     !(req->rq_flags & RQF_QUIET)))
+		print_req_error(req, error, __func__);
+
+	blk_account_io_completion(req, nr_bytes);
+
+	total_bytes = 0;
+	while (req->bio) {
+		struct bio *bio = req->bio;
+		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
+
+		if (bio_bytes == bio->bi_iter.bi_size)
+			req->bio = bio->bi_next;
+
+		/* Completion has already been traced */
+		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
+		req_bio_endio(req, bio, bio_bytes, error);
+
+		total_bytes += bio_bytes;
+		nr_bytes -= bio_bytes;
+
+		if (!nr_bytes)
+			break;
+	}
+
+	/*
+	 * completely done
+	 */
+	if (!req->bio) {
+		/*
+		 * Reset counters so that the request stacking driver
+		 * can find how many bytes remain in the request
+		 * later.
+		 */
+		req->__data_len = 0;
+		return false;
+	}
+
+	req->__data_len -= total_bytes;
+
+	/* update sector only for requests with clear definition of sector */
+	if (!blk_rq_is_passthrough(req))
+		req->__sector += total_bytes >> 9;
+
+	/* mixed attributes always follow the first bio */
+	if (req->rq_flags & RQF_MIXED_MERGE) {
+		req->cmd_flags &= ~REQ_FAILFAST_MASK;
+		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
+	}
+
+	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
+		/*
+		 * If total number of sectors is less than the first segment
+		 * size, something has gone terribly wrong.
+		 */
+		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
+			blk_dump_rq_flags(req, "request botched");
+			req->__data_len = blk_rq_cur_bytes(req);
+		}
+
+		/* recalculate the number of segments */
+		req->nr_phys_segments = blk_recalc_rq_segments(req);
+	}
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
 void blk_mq_end_request(struct request *rq, blk_status_t error)
 {
 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH 1/4] block: provide helpers for rq_list manipulation
  2021-10-13 16:49 ` [PATCH 1/4] block: provide helpers for rq_list manipulation Jens Axboe
@ 2021-10-13 17:11   ` Christoph Hellwig
  2021-10-13 17:47     ` Jens Axboe
  0 siblings, 1 reply; 19+ messages in thread
From: Christoph Hellwig @ 2021-10-13 17:11 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, Christoph Hellwig

On Wed, Oct 13, 2021 at 10:49:34AM -0600, Jens Axboe wrote:
> Instead of open-coding the list additions, traversal, and removal,
> provide a basic set of helpers.
> 
> Suggested-by: Christoph Hellwig <hch@infradead.org>
> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> ---
>  block/blk-mq.c         | 21 +++++----------------
>  include/linux/blk-mq.h | 25 +++++++++++++++++++++++++
>  2 files changed, 30 insertions(+), 16 deletions(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 6dfd3aaa6073..46a91e5fabc5 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -426,10 +426,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
>  			tag = tag_offset + i;
>  			tags &= ~(1UL << i);
>  			rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
> -			rq->rq_next = *data->cached_rq;
> -			*data->cached_rq = rq;
> +			rq_list_add_tail(data->cached_rq, rq);
>  		}

This doesn't seem to match the code in the current for-5.6/block branch.

>  		data->nr_tags -= nr;
> +		return rq_list_pop(data->cached_rq);
>  	} else {

But either way no need for an else after a return.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 2/4] block: inline fast path of driver tag allocation
  2021-10-13 16:49 ` [PATCH 2/4] block: inline fast path of driver tag allocation Jens Axboe
@ 2021-10-13 17:22   ` Christoph Hellwig
  2021-10-13 17:46     ` Jens Axboe
  0 siblings, 1 reply; 19+ messages in thread
From: Christoph Hellwig @ 2021-10-13 17:22 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block

On Wed, Oct 13, 2021 at 10:49:35AM -0600, Jens Axboe wrote:
> If we don't use an IO scheduler or have shared tags, then we don't need
> to call into this external function at all. This saves ~2% for such
> a setup.

Hmm.  What happens if you just throw an inline tag onto
blk_mq_get_driver_tag?  All the high performance callers should be
in blk-mq.c anyway.  If that isn't enough maybe something like the
version below?

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 38e6651d8b94c..ba9af26d5209d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1126,18 +1126,23 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
 	return true;
 }
 
-bool blk_mq_get_driver_tag(struct request *rq)
+static void blk_mq_inc_active_requests(struct request *rq)
+{
+	if (!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
+		rq->rq_flags |= RQF_MQ_INFLIGHT;
+		__blk_mq_inc_active_requests(rq->mq_hctx);
+	}
+}
+
+inline bool blk_mq_get_driver_tag(struct request *rq)
 {
 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
 	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
 		return false;
 
-	if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
-			!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
-		rq->rq_flags |= RQF_MQ_INFLIGHT;
-		__blk_mq_inc_active_requests(hctx);
-	}
+	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
+		blk_mq_inc_active_requests(rq);
 	hctx->tags->rqs[rq->tag] = rq;
 	return true;
 }

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH 3/4] block: don't bother iter advancing a fully done bio
  2021-10-13 16:49 ` [PATCH 3/4] block: don't bother iter advancing a fully done bio Jens Axboe
@ 2021-10-13 17:26   ` Christoph Hellwig
  2021-10-13 17:46     ` Jens Axboe
  0 siblings, 1 reply; 19+ messages in thread
From: Christoph Hellwig @ 2021-10-13 17:26 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block

On Wed, Oct 13, 2021 at 10:49:36AM -0600, Jens Axboe wrote:
> +extern void __bio_advance(struct bio *, unsigned);

No need for the extern, but it would be nice to spell out the argument
names.

> +static inline void bio_advance(struct bio *bio, unsigned int nbytes)
> +{

The kerneldoc comment for bio_advance needs to move here now.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 4/4] block: move update request helpers into blk-mq.c
  2021-10-13 16:49 ` [PATCH 4/4] block: move update request helpers into blk-mq.c Jens Axboe
@ 2021-10-13 17:32   ` Christoph Hellwig
  2021-10-13 17:46     ` Jens Axboe
  0 siblings, 1 reply; 19+ messages in thread
From: Christoph Hellwig @ 2021-10-13 17:32 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block

On Wed, Oct 13, 2021 at 10:49:37AM -0600, Jens Axboe wrote:
> For some reason we still have them in blk-core, with the rest of the
> request completion being in blk-mq. That causes and out-of-line call
> for each completion.
> 
> Move them into blk-mq.c instead.

The status/errno helpers really are core code.  And if we change
the block_rq_complete tracepoint to just take the status and do the
conversion inside the trace event to avoid the fast path out of line
call.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 2/4] block: inline fast path of driver tag allocation
  2021-10-13 17:22   ` Christoph Hellwig
@ 2021-10-13 17:46     ` Jens Axboe
  2021-10-13 17:57       ` Christoph Hellwig
  0 siblings, 1 reply; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 17:46 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/13/21 11:22 AM, Christoph Hellwig wrote:
> On Wed, Oct 13, 2021 at 10:49:35AM -0600, Jens Axboe wrote:
>> If we don't use an IO scheduler or have shared tags, then we don't need
>> to call into this external function at all. This saves ~2% for such
>> a setup.
> 
> Hmm.  What happens if you just throw an inline tag onto
> blk_mq_get_driver_tag?

I'd be surprised if that's any different than my patch in terms of
performance, the fast path would be about the same. I don't feel
strongly about it, can do that instead.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 3/4] block: don't bother iter advancing a fully done bio
  2021-10-13 17:26   ` Christoph Hellwig
@ 2021-10-13 17:46     ` Jens Axboe
  0 siblings, 0 replies; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 17:46 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/13/21 11:26 AM, Christoph Hellwig wrote:
> On Wed, Oct 13, 2021 at 10:49:36AM -0600, Jens Axboe wrote:
>> +extern void __bio_advance(struct bio *, unsigned);
> 
> No need for the extern, but it would be nice to spell out the argument
> names.

Done

>> +static inline void bio_advance(struct bio *bio, unsigned int nbytes)
>> +{
> 
> The kerneldoc comment for bio_advance needs to move here now.

Ah yes, done.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 4/4] block: move update request helpers into blk-mq.c
  2021-10-13 17:32   ` Christoph Hellwig
@ 2021-10-13 17:46     ` Jens Axboe
  2021-10-13 17:54       ` Christoph Hellwig
  0 siblings, 1 reply; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 17:46 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/13/21 11:32 AM, Christoph Hellwig wrote:
> On Wed, Oct 13, 2021 at 10:49:37AM -0600, Jens Axboe wrote:
>> For some reason we still have them in blk-core, with the rest of the
>> request completion being in blk-mq. That causes and out-of-line call
>> for each completion.
>>
>> Move them into blk-mq.c instead.
> 
> The status/errno helpers really are core code.  And if we change
> the block_rq_complete tracepoint to just take the status and do the
> conversion inside the trace event to avoid the fast path out of line
> call.

It's all core code at this point imho, there's on request based without
mq.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 1/4] block: provide helpers for rq_list manipulation
  2021-10-13 17:11   ` Christoph Hellwig
@ 2021-10-13 17:47     ` Jens Axboe
  0 siblings, 0 replies; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 17:47 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/13/21 11:11 AM, Christoph Hellwig wrote:
> On Wed, Oct 13, 2021 at 10:49:34AM -0600, Jens Axboe wrote:
>> Instead of open-coding the list additions, traversal, and removal,
>> provide a basic set of helpers.
>>
>> Suggested-by: Christoph Hellwig <hch@infradead.org>
>> Signed-off-by: Jens Axboe <axboe@kernel.dk>
>> ---
>>  block/blk-mq.c         | 21 +++++----------------
>>  include/linux/blk-mq.h | 25 +++++++++++++++++++++++++
>>  2 files changed, 30 insertions(+), 16 deletions(-)
>>
>> diff --git a/block/blk-mq.c b/block/blk-mq.c
>> index 6dfd3aaa6073..46a91e5fabc5 100644
>> --- a/block/blk-mq.c
>> +++ b/block/blk-mq.c
>> @@ -426,10 +426,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
>>  			tag = tag_offset + i;
>>  			tags &= ~(1UL << i);
>>  			rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
>> -			rq->rq_next = *data->cached_rq;
>> -			*data->cached_rq = rq;
>> +			rq_list_add_tail(data->cached_rq, rq);
>>  		}
> 
> This doesn't seem to match the code in the current for-5.6/block branch.

It's after the 2 patch improvement series.

>>  		data->nr_tags -= nr;
>> +		return rq_list_pop(data->cached_rq);
>>  	} else {
> 
> But either way no need for an else after a return.

We can kill that, but it's really independent and unrelated to this patch.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 4/4] block: move update request helpers into blk-mq.c
  2021-10-13 17:46     ` Jens Axboe
@ 2021-10-13 17:54       ` Christoph Hellwig
  2021-10-13 17:57         ` Jens Axboe
  0 siblings, 1 reply; 19+ messages in thread
From: Christoph Hellwig @ 2021-10-13 17:54 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Christoph Hellwig, linux-block

On Wed, Oct 13, 2021 at 11:46:40AM -0600, Jens Axboe wrote:
> On 10/13/21 11:32 AM, Christoph Hellwig wrote:
> > On Wed, Oct 13, 2021 at 10:49:37AM -0600, Jens Axboe wrote:
> >> For some reason we still have them in blk-core, with the rest of the
> >> request completion being in blk-mq. That causes and out-of-line call
> >> for each completion.
> >>
> >> Move them into blk-mq.c instead.
> > 
> > The status/errno helpers really are core code.  And if we change
> > the block_rq_complete tracepoint to just take the status and do the
> > conversion inside the trace event to avoid the fast path out of line
> > call.
> 
> It's all core code at this point imho, there's on request based without
> mq.

But the errno mapping is just as relevant for bio based drivers.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 2/4] block: inline fast path of driver tag allocation
  2021-10-13 17:46     ` Jens Axboe
@ 2021-10-13 17:57       ` Christoph Hellwig
  2021-10-13 18:07         ` Jens Axboe
  0 siblings, 1 reply; 19+ messages in thread
From: Christoph Hellwig @ 2021-10-13 17:57 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Christoph Hellwig, linux-block

On Wed, Oct 13, 2021 at 11:46:04AM -0600, Jens Axboe wrote:
> On 10/13/21 11:22 AM, Christoph Hellwig wrote:
> > On Wed, Oct 13, 2021 at 10:49:35AM -0600, Jens Axboe wrote:
> >> If we don't use an IO scheduler or have shared tags, then we don't need
> >> to call into this external function at all. This saves ~2% for such
> >> a setup.
> > 
> > Hmm.  What happens if you just throw an inline tag onto
> > blk_mq_get_driver_tag?
> 
> I'd be surprised if that's any different than my patch in terms of
> performance, the fast path would be about the same. I don't feel
> strongly about it, can do that instead.

I find the double indirection in your patch a bit confusing.  Not a big
deal if it is actually required, but if we can avoid that I'd prefer
not to add the extra indirection.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 4/4] block: move update request helpers into blk-mq.c
  2021-10-13 17:54       ` Christoph Hellwig
@ 2021-10-13 17:57         ` Jens Axboe
  2021-10-14  5:00           ` Christoph Hellwig
  0 siblings, 1 reply; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 17:57 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/13/21 11:54 AM, Christoph Hellwig wrote:
> On Wed, Oct 13, 2021 at 11:46:40AM -0600, Jens Axboe wrote:
>> On 10/13/21 11:32 AM, Christoph Hellwig wrote:
>>> On Wed, Oct 13, 2021 at 10:49:37AM -0600, Jens Axboe wrote:
>>>> For some reason we still have them in blk-core, with the rest of the
>>>> request completion being in blk-mq. That causes and out-of-line call
>>>> for each completion.
>>>>
>>>> Move them into blk-mq.c instead.
>>>
>>> The status/errno helpers really are core code.  And if we change
>>> the block_rq_complete tracepoint to just take the status and do the
>>> conversion inside the trace event to avoid the fast path out of line
>>> call.
>>
>> It's all core code at this point imho, there's on request based without
>> mq.
> 
> But the errno mapping is just as relevant for bio based drivers.

It's not like they are conditionally enabled, if you get one you get
the other. But I can shuffle it around if it means a lot to you...

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 2/4] block: inline fast path of driver tag allocation
  2021-10-13 17:57       ` Christoph Hellwig
@ 2021-10-13 18:07         ` Jens Axboe
  0 siblings, 0 replies; 19+ messages in thread
From: Jens Axboe @ 2021-10-13 18:07 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/13/21 11:57 AM, Christoph Hellwig wrote:
> On Wed, Oct 13, 2021 at 11:46:04AM -0600, Jens Axboe wrote:
>> On 10/13/21 11:22 AM, Christoph Hellwig wrote:
>>> On Wed, Oct 13, 2021 at 10:49:35AM -0600, Jens Axboe wrote:
>>>> If we don't use an IO scheduler or have shared tags, then we don't need
>>>> to call into this external function at all. This saves ~2% for such
>>>> a setup.
>>>
>>> Hmm.  What happens if you just throw an inline tag onto
>>> blk_mq_get_driver_tag?
>>
>> I'd be surprised if that's any different than my patch in terms of
>> performance, the fast path would be about the same. I don't feel
>> strongly about it, can do that instead.
> 
> I find the double indirection in your patch a bit confusing.  Not a big
> deal if it is actually required, but if we can avoid that I'd prefer
> not to add the extra indirection.

Tested the variants, and it does seem to be the best one...

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 4/4] block: move update request helpers into blk-mq.c
  2021-10-13 17:57         ` Jens Axboe
@ 2021-10-14  5:00           ` Christoph Hellwig
  2021-10-14 15:14             ` Jens Axboe
  0 siblings, 1 reply; 19+ messages in thread
From: Christoph Hellwig @ 2021-10-14  5:00 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Christoph Hellwig, linux-block

On Wed, Oct 13, 2021 at 11:57:51AM -0600, Jens Axboe wrote:
> It's not like they are conditionally enabled, if you get one you get
> the other. But I can shuffle it around if it means a lot to you...

As I've been trying to reshuffle the files and data structures to
keep the bio and requests bits clearly separated I'd appreciate if we
can keep the status codes in core.c and just do the long overdue move
the request completion helers.

And as said before:  I think we should fix the trace point to not
unconditionally call the status to errno conversion first as not doing
it at all for the fast path will be even faster than inlining it :)

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 4/4] block: move update request helpers into blk-mq.c
  2021-10-14  5:00           ` Christoph Hellwig
@ 2021-10-14 15:14             ` Jens Axboe
  0 siblings, 0 replies; 19+ messages in thread
From: Jens Axboe @ 2021-10-14 15:14 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/13/21 11:00 PM, Christoph Hellwig wrote:
> On Wed, Oct 13, 2021 at 11:57:51AM -0600, Jens Axboe wrote:
>> It's not like they are conditionally enabled, if you get one you get
>> the other. But I can shuffle it around if it means a lot to you...
> 
> As I've been trying to reshuffle the files and data structures to
> keep the bio and requests bits clearly separated I'd appreciate if we
> can keep the status codes in core.c and just do the long overdue move
> the request completion helers.

Fair enough, I'll leave the error stuff in blk-core, doesn't really matter
anyway as it should be out-of-line.

> And as said before:  I think we should fix the trace point to not
> unconditionally call the status to errno conversion first as not doing
> it at all for the fast path will be even faster than inlining it :)

I'll do that as separate patch on top.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2021-10-14 15:14 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-13 16:49 [PATCHSET 0/4] Various block optimizations Jens Axboe
2021-10-13 16:49 ` [PATCH 1/4] block: provide helpers for rq_list manipulation Jens Axboe
2021-10-13 17:11   ` Christoph Hellwig
2021-10-13 17:47     ` Jens Axboe
2021-10-13 16:49 ` [PATCH 2/4] block: inline fast path of driver tag allocation Jens Axboe
2021-10-13 17:22   ` Christoph Hellwig
2021-10-13 17:46     ` Jens Axboe
2021-10-13 17:57       ` Christoph Hellwig
2021-10-13 18:07         ` Jens Axboe
2021-10-13 16:49 ` [PATCH 3/4] block: don't bother iter advancing a fully done bio Jens Axboe
2021-10-13 17:26   ` Christoph Hellwig
2021-10-13 17:46     ` Jens Axboe
2021-10-13 16:49 ` [PATCH 4/4] block: move update request helpers into blk-mq.c Jens Axboe
2021-10-13 17:32   ` Christoph Hellwig
2021-10-13 17:46     ` Jens Axboe
2021-10-13 17:54       ` Christoph Hellwig
2021-10-13 17:57         ` Jens Axboe
2021-10-14  5:00           ` Christoph Hellwig
2021-10-14 15:14             ` Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.