From: Johannes Thumshirn <jthumshirn@suse.de>
To: Jens Axboe <axboe@fb.com>
Cc: Hannes Reinecke <hare@suse.de>,
Bart Van Assche <bvanassche@acm.org>,
Christoph Hellwig <hch@lst.de>, Jan Kara <jack@suse.cz>,
Linux Block Layer Mailinglist <linux-block@vger.kernel.org>,
Linux FSDEVEL Mailinglist <linux-fsdevel@vger.kernel.org>,
Jens Axboe <axboe@kernel.dk>,
Johannes Thumshirn <jthumshirn@suse.de>
Subject: [PATCH v2 1/3] block: bio: kill BIO_SEG_VALID flag
Date: Fri, 22 Mar 2019 14:13:44 +0100 [thread overview]
Message-ID: <20190322131346.20169-2-jthumshirn@suse.de> (raw)
In-Reply-To: <20190322131346.20169-1-jthumshirn@suse.de>
From: Jens Axboe <axboe@kernel.dk>
Kill the BIO_SEG_VALID flag. We should just use ->bi_phys_segments to tell
if it's valid or not.
This patch uses -1 to signify it's not.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
[ jth initialize pc bios with 1 phys segment and WARN if we're submitting
bios with -1 segments ]
Signed-off-by: Johannes Thumshirn <jthumshirn@suse.de>
---
block/bio.c | 15 +++++++++------
block/blk-core.c | 1 +
block/blk-merge.c | 13 ++++++-------
drivers/md/raid5.c | 2 +-
include/linux/blk_types.h | 1 -
5 files changed, 17 insertions(+), 15 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 71a78d9fb8b7..a79b2bbcffd0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -280,6 +280,7 @@ void bio_init(struct bio *bio, struct bio_vec *table,
unsigned short max_vecs)
{
memset(bio, 0, sizeof(*bio));
+ bio->bi_phys_segments = -1;
atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->__bi_cnt, 1);
@@ -305,6 +306,7 @@ void bio_reset(struct bio *bio)
bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES);
+ bio->bi_phys_segments = -1;
bio->bi_flags = flags;
atomic_set(&bio->__bi_remaining, 1);
}
@@ -573,7 +575,7 @@ EXPORT_SYMBOL(bio_put);
int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
- if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+ if (unlikely(bio->bi_phys_segments == -1))
blk_recount_segments(q, bio);
return bio->bi_phys_segments;
@@ -712,7 +714,10 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
bvec->bv_len = len;
bvec->bv_offset = offset;
bio->bi_vcnt++;
- bio->bi_phys_segments++;
+ if (bio->bi_phys_segments == -1)
+ bio->bi_phys_segments = 1;
+ else
+ bio->bi_phys_segments++;
bio->bi_iter.bi_size += len;
/*
@@ -731,7 +736,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
/* If we may be able to merge these biovecs, force a recount */
if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
- bio_clear_flag(bio, BIO_SEG_VALID);
+ bio->bi_phys_segments = -1;
done:
return len;
@@ -1913,10 +1918,8 @@ void bio_trim(struct bio *bio, int offset, int size)
if (offset == 0 && size == bio->bi_iter.bi_size)
return;
- bio_clear_flag(bio, BIO_SEG_VALID);
-
+ bio->bi_phys_segments = -1;
bio_advance(bio, offset << 9);
-
bio->bi_iter.bi_size = size;
if (bio_integrity(bio))
diff --git a/block/blk-core.c b/block/blk-core.c
index 4673ebe42255..53372a16dd7c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1514,6 +1514,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
else if (bio_op(bio) == REQ_OP_DISCARD)
rq->nr_phys_segments = 1;
+ WARN_ON(rq->nr_phys_segments == -1);
rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1c9d4f0f96ea..16c2ae69c46b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -343,7 +343,6 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
/* physical segments can be figured out during splitting */
res = split ? split : *bio;
res->bi_phys_segments = nsegs;
- bio_set_flag(res, BIO_SEG_VALID);
if (split) {
/* there isn't chance to merge the splitted bio */
@@ -440,8 +439,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
bio->bi_next = NULL;
bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
bio->bi_next = nxt;
-
- bio_set_flag(bio, BIO_SEG_VALID);
}
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
@@ -620,6 +617,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{
int nr_phys_segs = bio_phys_segments(q, bio);
+ if (WARN_ON(nr_phys_segs == -1))
+ nr_phys_segs = 0;
if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
goto no_merge;
@@ -651,9 +650,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
req_set_nomerge(q, req);
return 0;
}
- if (!bio_flagged(req->biotail, BIO_SEG_VALID))
+ if (req->biotail->bi_phys_segments == -1)
blk_recount_segments(q, req->biotail);
- if (!bio_flagged(bio, BIO_SEG_VALID))
+ if (bio->bi_phys_segments == -1)
blk_recount_segments(q, bio);
return ll_new_hw_segment(q, req, bio);
@@ -673,9 +672,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
req_set_nomerge(q, req);
return 0;
}
- if (!bio_flagged(bio, BIO_SEG_VALID))
+ if (bio->bi_phys_segments == -1)
blk_recount_segments(q, bio);
- if (!bio_flagged(req->bio, BIO_SEG_VALID))
+ if (req->bio->bi_phys_segments == -1)
blk_recount_segments(q, req->bio);
return ll_new_hw_segment(q, req, bio);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c033bfcb209e..79eb54dcf0f9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5247,7 +5247,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
bio_set_dev(align_bi, rdev->bdev);
- bio_clear_flag(align_bi, BIO_SEG_VALID);
+ align_bi->bi_phys_segments = -1;
if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
bio_sectors(align_bi),
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d66bf5f32610..472059e92071 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -215,7 +215,6 @@ struct bio {
/*
* bio flags
*/
-#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
#define BIO_CLONED 2 /* doesn't own data */
#define BIO_BOUNCED 3 /* bio is a bounce bio */
#define BIO_USER_MAPPED 4 /* contains user pages */
--
2.16.4
next prev parent reply other threads:[~2019-03-22 13:14 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-03-22 13:13 [PATCH v2 0/3] add flag for tracking bio allocation Johannes Thumshirn
2019-03-22 13:13 ` Johannes Thumshirn [this message]
2019-03-22 14:01 ` [PATCH v2 1/3] block: bio: kill BIO_SEG_VALID flag Christoph Hellwig
2019-03-25 8:02 ` Johannes Thumshirn
2019-03-22 14:06 ` Hannes Reinecke
2019-03-22 22:00 ` Jens Axboe
2019-03-23 19:31 ` Jens Axboe
2019-03-25 13:32 ` Johannes Thumshirn
2019-03-22 22:40 ` Ming Lei
2019-03-22 13:13 ` [PATCH v2 2/3] block: bio: ensure newly added bio flags don't override BVEC_POOL_IDX Johannes Thumshirn
2019-03-22 14:01 ` Christoph Hellwig
2019-03-22 14:07 ` Hannes Reinecke
2019-03-22 13:13 ` [PATCH v2 3/3] block: bio: introduce BIO_ALLOCED flag and check it in bio_free Johannes Thumshirn
2019-03-22 14:02 ` Christoph Hellwig
2019-03-22 14:05 ` Hannes Reinecke
2019-03-22 21:30 ` Keith Busch
2019-03-22 23:04 ` Ming Lei
2019-03-22 14:10 ` Hannes Reinecke
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190322131346.20169-2-jthumshirn@suse.de \
--to=jthumshirn@suse.de \
--cc=axboe@fb.com \
--cc=axboe@kernel.dk \
--cc=bvanassche@acm.org \
--cc=hare@suse.de \
--cc=hch@lst.de \
--cc=jack@suse.cz \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).