All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ming Lei <ming.lei@redhat.com>
To: Jens Axboe <axboe@kernel.dk>
Cc: Mike Snitzer <snitzer@redhat.com>,
	linux-mm@kvack.org, dm-devel@redhat.com,
	Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
	"Darrick J . Wong" <darrick.wong@oracle.com>,
	Omar Sandoval <osandov@fb.com>,
	cluster-devel@redhat.com, linux-ext4@vger.kernel.org,
	Shaohua Li <shli@kernel.org>,
	Kent Overstreet <kent.overstreet@gmail.com>,
	Boaz Harrosh <ooo@electrozaur.com>,
	Gao Xiang <gaoxiang25@huawei.com>, Coly Li <colyli@suse.de>,
	linux-raid@vger.kernel.org, Bob Peterson <rpeterso@redhat.com>,
	linux-bcache@vger.kernel.org,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Dave Chinner <dchinner@redhat.com>,
	David Sterba <dsterba@suse.com>, Ming Lei <ming.lei@redhat.com>,
	linux-block@vger.kernel.org, Theodore Ts'o <tytso@mit.edu>,
	linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-btrfs@vger.kernel.org
Subject: [PATCH V12 19/20] block: kill QUEUE_FLAG_NO_SG_MERGE
Date: Mon, 26 Nov 2018 10:17:19 +0800	[thread overview]
Message-ID: <20181126021720.19471-20-ming.lei@redhat.com> (raw)
In-Reply-To: <20181126021720.19471-1-ming.lei@redhat.com>

Since bdced438acd83ad83a6c ("block: setup bi_phys_segments after splitting"),
physical segment number is mainly figured out in blk_queue_split() for
fast path, and the flag of BIO_SEG_VALID is set there too.

Now only blk_recount_segments() and blk_recalc_rq_segments() use this
flag.

Basically blk_recount_segments() is bypassed in fast path given BIO_SEG_VALID
is set in blk_queue_split().

For another user of blk_recalc_rq_segments():

- run in partial completion branch of blk_update_request, which is an unusual case

- run in blk_cloned_rq_check_limits(), still not a big problem if the flag is killed
since dm-rq is the only user.

Multi-page bvec is enabled now, not doing S/G merging is rather pointless with the
current setup of the I/O path, as it isn't going to save you a significant amount
of cycles.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-merge.c      | 31 ++++++-------------------------
 block/blk-mq-debugfs.c |  1 -
 block/blk-mq.c         |  3 ---
 drivers/md/dm-table.c  | 13 -------------
 include/linux/blkdev.h |  1 -
 5 files changed, 6 insertions(+), 43 deletions(-)

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 20b5b0c3e182..9a7fd8b1f90a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -355,8 +355,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
 EXPORT_SYMBOL(blk_queue_split);
 
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
-					     struct bio *bio,
-					     bool no_sg_merge)
+					     struct bio *bio)
 {
 	struct bio_vec bv, bvprv = { NULL };
 	unsigned int seg_size, nr_phys_segs;
@@ -382,13 +381,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 	nr_phys_segs = 0;
 	for_each_bio(bio) {
 		bio_for_each_bvec(bv, bio, iter) {
-			/*
-			 * If SG merging is disabled, each bio vector is
-			 * a segment
-			 */
-			if (no_sg_merge)
-				goto new_segment;
-
 			if (prev) {
 				if (seg_size + bv.bv_len
 				    > queue_max_segment_size(q))
@@ -418,27 +410,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 
 void blk_recalc_rq_segments(struct request *rq)
 {
-	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
-			&rq->q->queue_flags);
-
-	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
-			no_sg_merge);
+	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-	unsigned short seg_cnt = bio_segments(bio);
-
-	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
-			(seg_cnt < queue_max_segments(q)))
-		bio->bi_phys_segments = seg_cnt;
-	else {
-		struct bio *nxt = bio->bi_next;
+	struct bio *nxt = bio->bi_next;
 
-		bio->bi_next = NULL;
-		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
-		bio->bi_next = nxt;
-	}
+	bio->bi_next = NULL;
+	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+	bio->bi_next = nxt;
 
 	bio_set_flag(bio, BIO_SEG_VALID);
 }
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index a32bb79d6c95..d752fe4461af 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -127,7 +127,6 @@ static const char *const blk_queue_flag_name[] = {
 	QUEUE_FLAG_NAME(SAME_FORCE),
 	QUEUE_FLAG_NAME(DEAD),
 	QUEUE_FLAG_NAME(INIT_DONE),
-	QUEUE_FLAG_NAME(NO_SG_MERGE),
 	QUEUE_FLAG_NAME(POLL),
 	QUEUE_FLAG_NAME(WC),
 	QUEUE_FLAG_NAME(FUA),
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b16204df65d1..7b17191d755b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2780,9 +2780,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 
 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 
-	if (!(set->flags & BLK_MQ_F_SG_MERGE))
-		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
-
 	q->sg_reserved_size = INT_MAX;
 
 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 844f7d0f2ef8..a41832cf0c98 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1698,14 +1698,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
 	return q && !blk_queue_add_random(q);
 }
 
-static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
-				   sector_t start, sector_t len, void *data)
-{
-	struct request_queue *q = bdev_get_queue(dev->bdev);
-
-	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
-}
-
 static bool dm_table_all_devices_attribute(struct dm_table *t,
 					   iterate_devices_callout_fn func)
 {
@@ -1902,11 +1894,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 	if (!dm_table_supports_write_zeroes(t))
 		q->limits.max_write_zeroes_sectors = 0;
 
-	if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
-		blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
-	else
-		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
-
 	dm_table_verify_integrity(t);
 
 	/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fa263de3f1d1..9c1ae3a62ed3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -589,7 +589,6 @@ struct request_queue {
 #define QUEUE_FLAG_SAME_FORCE  15	/* force complete on same CPU */
 #define QUEUE_FLAG_DEAD        16	/* queue tear-down finished */
 #define QUEUE_FLAG_INIT_DONE   17	/* queue is initialized */
-#define QUEUE_FLAG_NO_SG_MERGE 18	/* don't attempt to merge SG segments*/
 #define QUEUE_FLAG_POLL	       19	/* IO polling enabled if set */
 #define QUEUE_FLAG_WC	       20	/* Write back caching */
 #define QUEUE_FLAG_FUA	       21	/* device supports FUA writes */
-- 
2.9.5

WARNING: multiple messages have this Message-ID (diff)
From: Ming Lei <ming.lei@redhat.com>
To: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, Theodore Ts'o <tytso@mit.edu>,
	Omar Sandoval <osandov@fb.com>, Sagi Grimberg <sagi@grimberg.me>,
	Dave Chinner <dchinner@redhat.com>,
	Kent Overstreet <kent.overstreet@gmail.com>,
	Mike Snitzer <snitzer@redhat.com>,
	dm-devel@redhat.com, Alexander Viro <viro@zeniv.linux.org.uk>,
	linux-fsdevel@vger.kernel.org, Shaohua Li <shli@kernel.org>,
	linux-raid@vger.kernel.org, David Sterba <dsterba@suse.com>,
	linux-btrfs@vger.kernel.org,
	"Darrick J . Wong" <darrick.wong@oracle.com>,
	linux-xfs@vger.kernel.org, Gao Xiang <gaoxiang25@huawei.com>,
	Christoph Hellwig <hch@lst.de>,
	linux-ext4@vger.kernel.org, Coly Li <colyli@suse.de>,
	linux-bcache@vger.kernel.org, Boaz Harrosh <ooo@electrozaur.com>,
	Bob Peterson <rpeterso@redhat.com>,
	cluster-devel@redhat.com, Ming Lei <ming.lei@redhat.com>
Subject: [PATCH V12 19/20] block: kill QUEUE_FLAG_NO_SG_MERGE
Date: Mon, 26 Nov 2018 10:17:19 +0800	[thread overview]
Message-ID: <20181126021720.19471-20-ming.lei@redhat.com> (raw)
In-Reply-To: <20181126021720.19471-1-ming.lei@redhat.com>

Since bdced438acd83ad83a6c ("block: setup bi_phys_segments after splitting"),
physical segment number is mainly figured out in blk_queue_split() for
fast path, and the flag of BIO_SEG_VALID is set there too.

Now only blk_recount_segments() and blk_recalc_rq_segments() use this
flag.

Basically blk_recount_segments() is bypassed in fast path given BIO_SEG_VALID
is set in blk_queue_split().

For another user of blk_recalc_rq_segments():

- run in partial completion branch of blk_update_request, which is an unusual case

- run in blk_cloned_rq_check_limits(), still not a big problem if the flag is killed
since dm-rq is the only user.

Multi-page bvec is enabled now, not doing S/G merging is rather pointless with the
current setup of the I/O path, as it isn't going to save you a significant amount
of cycles.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-merge.c      | 31 ++++++-------------------------
 block/blk-mq-debugfs.c |  1 -
 block/blk-mq.c         |  3 ---
 drivers/md/dm-table.c  | 13 -------------
 include/linux/blkdev.h |  1 -
 5 files changed, 6 insertions(+), 43 deletions(-)

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 20b5b0c3e182..9a7fd8b1f90a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -355,8 +355,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
 EXPORT_SYMBOL(blk_queue_split);
 
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
-					     struct bio *bio,
-					     bool no_sg_merge)
+					     struct bio *bio)
 {
 	struct bio_vec bv, bvprv = { NULL };
 	unsigned int seg_size, nr_phys_segs;
@@ -382,13 +381,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 	nr_phys_segs = 0;
 	for_each_bio(bio) {
 		bio_for_each_bvec(bv, bio, iter) {
-			/*
-			 * If SG merging is disabled, each bio vector is
-			 * a segment
-			 */
-			if (no_sg_merge)
-				goto new_segment;
-
 			if (prev) {
 				if (seg_size + bv.bv_len
 				    > queue_max_segment_size(q))
@@ -418,27 +410,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 
 void blk_recalc_rq_segments(struct request *rq)
 {
-	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
-			&rq->q->queue_flags);
-
-	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
-			no_sg_merge);
+	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-	unsigned short seg_cnt = bio_segments(bio);
-
-	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
-			(seg_cnt < queue_max_segments(q)))
-		bio->bi_phys_segments = seg_cnt;
-	else {
-		struct bio *nxt = bio->bi_next;
+	struct bio *nxt = bio->bi_next;
 
-		bio->bi_next = NULL;
-		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
-		bio->bi_next = nxt;
-	}
+	bio->bi_next = NULL;
+	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+	bio->bi_next = nxt;
 
 	bio_set_flag(bio, BIO_SEG_VALID);
 }
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index a32bb79d6c95..d752fe4461af 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -127,7 +127,6 @@ static const char *const blk_queue_flag_name[] = {
 	QUEUE_FLAG_NAME(SAME_FORCE),
 	QUEUE_FLAG_NAME(DEAD),
 	QUEUE_FLAG_NAME(INIT_DONE),
-	QUEUE_FLAG_NAME(NO_SG_MERGE),
 	QUEUE_FLAG_NAME(POLL),
 	QUEUE_FLAG_NAME(WC),
 	QUEUE_FLAG_NAME(FUA),
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b16204df65d1..7b17191d755b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2780,9 +2780,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 
 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 
-	if (!(set->flags & BLK_MQ_F_SG_MERGE))
-		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
-
 	q->sg_reserved_size = INT_MAX;
 
 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 844f7d0f2ef8..a41832cf0c98 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1698,14 +1698,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
 	return q && !blk_queue_add_random(q);
 }
 
-static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
-				   sector_t start, sector_t len, void *data)
-{
-	struct request_queue *q = bdev_get_queue(dev->bdev);
-
-	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
-}
-
 static bool dm_table_all_devices_attribute(struct dm_table *t,
 					   iterate_devices_callout_fn func)
 {
@@ -1902,11 +1894,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 	if (!dm_table_supports_write_zeroes(t))
 		q->limits.max_write_zeroes_sectors = 0;
 
-	if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
-		blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
-	else
-		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
-
 	dm_table_verify_integrity(t);
 
 	/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fa263de3f1d1..9c1ae3a62ed3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -589,7 +589,6 @@ struct request_queue {
 #define QUEUE_FLAG_SAME_FORCE  15	/* force complete on same CPU */
 #define QUEUE_FLAG_DEAD        16	/* queue tear-down finished */
 #define QUEUE_FLAG_INIT_DONE   17	/* queue is initialized */
-#define QUEUE_FLAG_NO_SG_MERGE 18	/* don't attempt to merge SG segments*/
 #define QUEUE_FLAG_POLL	       19	/* IO polling enabled if set */
 #define QUEUE_FLAG_WC	       20	/* Write back caching */
 #define QUEUE_FLAG_FUA	       21	/* device supports FUA writes */
-- 
2.9.5


WARNING: multiple messages have this Message-ID (diff)
From: Ming Lei <ming.lei@redhat.com>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [PATCH V12 19/20] block: kill QUEUE_FLAG_NO_SG_MERGE
Date: Mon, 26 Nov 2018 10:17:19 +0800	[thread overview]
Message-ID: <20181126021720.19471-20-ming.lei@redhat.com> (raw)
In-Reply-To: <20181126021720.19471-1-ming.lei@redhat.com>

Since bdced438acd83ad83a6c ("block: setup bi_phys_segments after splitting"),
physical segment number is mainly figured out in blk_queue_split() for
fast path, and the flag of BIO_SEG_VALID is set there too.

Now only blk_recount_segments() and blk_recalc_rq_segments() use this
flag.

Basically blk_recount_segments() is bypassed in fast path given BIO_SEG_VALID
is set in blk_queue_split().

For another user of blk_recalc_rq_segments():

- run in partial completion branch of blk_update_request, which is an unusual case

- run in blk_cloned_rq_check_limits(), still not a big problem if the flag is killed
since dm-rq is the only user.

Multi-page bvec is enabled now, not doing S/G merging is rather pointless with the
current setup of the I/O path, as it isn't going to save you a significant amount
of cycles.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-merge.c      | 31 ++++++-------------------------
 block/blk-mq-debugfs.c |  1 -
 block/blk-mq.c         |  3 ---
 drivers/md/dm-table.c  | 13 -------------
 include/linux/blkdev.h |  1 -
 5 files changed, 6 insertions(+), 43 deletions(-)

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 20b5b0c3e182..9a7fd8b1f90a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -355,8 +355,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
 EXPORT_SYMBOL(blk_queue_split);
 
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
-					     struct bio *bio,
-					     bool no_sg_merge)
+					     struct bio *bio)
 {
 	struct bio_vec bv, bvprv = { NULL };
 	unsigned int seg_size, nr_phys_segs;
@@ -382,13 +381,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 	nr_phys_segs = 0;
 	for_each_bio(bio) {
 		bio_for_each_bvec(bv, bio, iter) {
-			/*
-			 * If SG merging is disabled, each bio vector is
-			 * a segment
-			 */
-			if (no_sg_merge)
-				goto new_segment;
-
 			if (prev) {
 				if (seg_size + bv.bv_len
 				    > queue_max_segment_size(q))
@@ -418,27 +410,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 
 void blk_recalc_rq_segments(struct request *rq)
 {
-	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
-			&rq->q->queue_flags);
-
-	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
-			no_sg_merge);
+	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-	unsigned short seg_cnt = bio_segments(bio);
-
-	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
-			(seg_cnt < queue_max_segments(q)))
-		bio->bi_phys_segments = seg_cnt;
-	else {
-		struct bio *nxt = bio->bi_next;
+	struct bio *nxt = bio->bi_next;
 
-		bio->bi_next = NULL;
-		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
-		bio->bi_next = nxt;
-	}
+	bio->bi_next = NULL;
+	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+	bio->bi_next = nxt;
 
 	bio_set_flag(bio, BIO_SEG_VALID);
 }
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index a32bb79d6c95..d752fe4461af 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -127,7 +127,6 @@ static const char *const blk_queue_flag_name[] = {
 	QUEUE_FLAG_NAME(SAME_FORCE),
 	QUEUE_FLAG_NAME(DEAD),
 	QUEUE_FLAG_NAME(INIT_DONE),
-	QUEUE_FLAG_NAME(NO_SG_MERGE),
 	QUEUE_FLAG_NAME(POLL),
 	QUEUE_FLAG_NAME(WC),
 	QUEUE_FLAG_NAME(FUA),
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b16204df65d1..7b17191d755b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2780,9 +2780,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 
 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 
-	if (!(set->flags & BLK_MQ_F_SG_MERGE))
-		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
-
 	q->sg_reserved_size = INT_MAX;
 
 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 844f7d0f2ef8..a41832cf0c98 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1698,14 +1698,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
 	return q && !blk_queue_add_random(q);
 }
 
-static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
-				   sector_t start, sector_t len, void *data)
-{
-	struct request_queue *q = bdev_get_queue(dev->bdev);
-
-	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
-}
-
 static bool dm_table_all_devices_attribute(struct dm_table *t,
 					   iterate_devices_callout_fn func)
 {
@@ -1902,11 +1894,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 	if (!dm_table_supports_write_zeroes(t))
 		q->limits.max_write_zeroes_sectors = 0;
 
-	if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
-		blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
-	else
-		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
-
 	dm_table_verify_integrity(t);
 
 	/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fa263de3f1d1..9c1ae3a62ed3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -589,7 +589,6 @@ struct request_queue {
 #define QUEUE_FLAG_SAME_FORCE  15	/* force complete on same CPU */
 #define QUEUE_FLAG_DEAD        16	/* queue tear-down finished */
 #define QUEUE_FLAG_INIT_DONE   17	/* queue is initialized */
-#define QUEUE_FLAG_NO_SG_MERGE 18	/* don't attempt to merge SG segments*/
 #define QUEUE_FLAG_POLL	       19	/* IO polling enabled if set */
 #define QUEUE_FLAG_WC	       20	/* Write back caching */
 #define QUEUE_FLAG_FUA	       21	/* device supports FUA writes */
-- 
2.9.5



  parent reply	other threads:[~2018-11-26  2:17 UTC|newest]

Thread overview: 144+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-26  2:17 [PATCH V12 00/20] block: support multi-page bvec Ming Lei
2018-11-26  2:17 ` [Cluster-devel] " Ming Lei
2018-11-26  2:17 ` Ming Lei
2018-11-26  2:17 ` [PATCH V12 01/20] btrfs: remove various bio_offset arguments Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 22:28   ` Omar Sandoval
2018-11-26 22:28     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:28     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 02/20] btrfs: look at bi_size for repair decisions Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 22:46   ` Omar Sandoval
2018-11-26 22:46     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:46     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 03/20] block: remove the "cluster" flag Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 22:12   ` Omar Sandoval
2018-11-26 22:12     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:12     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 04/20] block: don't use bio->bi_vcnt to figure out segment number Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 22:14   ` Omar Sandoval
2018-11-26 22:14     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:14     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 05/20] block: remove bvec_iter_rewind() Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 12:53   ` Christoph Hellwig
2018-11-26 12:53     ` [Cluster-devel] " Christoph Hellwig
2018-11-26 12:53     ` Christoph Hellwig
2018-11-26 22:16   ` Omar Sandoval
2018-11-26 22:16     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:16     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 06/20] block: rename bvec helpers Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26  8:17   ` Miguel Ojeda
2018-11-26  8:17     ` [Cluster-devel] " Miguel Ojeda
2018-11-26 12:54   ` Christoph Hellwig
2018-11-26 12:54     ` [Cluster-devel] " Christoph Hellwig
2018-11-26 12:54     ` Christoph Hellwig
2018-11-26 22:18   ` Omar Sandoval
2018-11-26 22:18     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:18     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 07/20] block: introduce multi-page " Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 12:55   ` Christoph Hellwig
2018-11-26 12:55     ` [Cluster-devel] " Christoph Hellwig
2018-11-26 12:55     ` Christoph Hellwig
2018-11-26  2:17 ` [PATCH V12 08/20] block: introduce bio_for_each_bvec() and rq_for_each_bvec() Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 12:55   ` Christoph Hellwig
2018-11-26 12:55     ` [Cluster-devel] " Christoph Hellwig
2018-11-26 12:55     ` Christoph Hellwig
2018-11-26  2:17 ` [PATCH V12 09/20] block: use bio_for_each_bvec() to compute multi-page bvec count Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 13:11   ` Christoph Hellwig
2018-11-26 13:11     ` [Cluster-devel] " Christoph Hellwig
2018-11-26 13:11     ` Christoph Hellwig
2018-11-26 22:37   ` Omar Sandoval
2018-11-26 22:37     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:37     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 10/20] block: use bio_for_each_bvec() to map sg Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 13:11   ` Christoph Hellwig
2018-11-26 13:11     ` [Cluster-devel] " Christoph Hellwig
2018-11-26 13:11     ` Christoph Hellwig
2018-11-26  2:17 ` [PATCH V12 11/20] block: introduce bvec_last_segment() Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26  2:17 ` [PATCH V12 12/20] fs/buffer.c: use bvec iterator to truncate the bio Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26  2:17 ` [PATCH V12 13/20] block: loop: pass multi-page bvec to iov_iter Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 22:39   ` Omar Sandoval
2018-11-26 22:39     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:39     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 14/20] bcache: avoid to use bio_for_each_segment_all() in bch_bio_alloc_pages() Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 22:40   ` Omar Sandoval
2018-11-26 22:40     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:40     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 15/20] block: allow bio_for_each_segment_all() to iterate over multi-page bvec Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 22:42   ` Omar Sandoval
2018-11-26 22:42     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:42     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 16/20] block: enable multipage bvecs Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 12:58   ` Christoph Hellwig
2018-11-26 12:58     ` [Cluster-devel] " Christoph Hellwig
2018-11-26 12:58     ` Christoph Hellwig
2018-11-27  2:25     ` Ming Lei
2018-11-27  2:25       ` [Cluster-devel] " Ming Lei
2018-11-27  2:25       ` Ming Lei
2018-11-26 22:56   ` Omar Sandoval
2018-11-26 22:56     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:56     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 17/20] block: always define BIO_MAX_PAGES as 256 Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 22:44   ` Omar Sandoval
2018-11-26 22:44     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:44     ` Omar Sandoval
2018-11-26  2:17 ` [PATCH V12 18/20] block: document usage of bio iterator helpers Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26 12:59   ` Christoph Hellwig
2018-11-26 12:59     ` [Cluster-devel] " Christoph Hellwig
2018-11-26 12:59     ` Christoph Hellwig
2018-11-26 22:43   ` Omar Sandoval
2018-11-26 22:43     ` [Cluster-devel] " Omar Sandoval
2018-11-26 22:43     ` Omar Sandoval
2018-11-26  2:17 ` Ming Lei [this message]
2018-11-26  2:17   ` [Cluster-devel] [PATCH V12 19/20] block: kill QUEUE_FLAG_NO_SG_MERGE Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-26  2:17 ` [PATCH V12 20/20] block: kill BLK_MQ_F_SG_MERGE Ming Lei
2018-11-26  2:17   ` [Cluster-devel] " Ming Lei
2018-11-26  2:17   ` Ming Lei
2018-11-28 13:44 ` [PATCH V12 00/20] block: support multi-page bvec Jens Axboe
2018-11-28 13:44   ` [Cluster-devel] " Jens Axboe
2018-11-28 13:44   ` Jens Axboe
2018-11-29  1:30   ` Ming Lei
2018-11-29  1:30     ` [Cluster-devel] " Ming Lei
2018-11-29  1:30     ` Ming Lei
2018-11-29  1:30     ` Ming Lei
2018-11-29  2:20     ` Jens Axboe
2018-11-29  2:20       ` [Cluster-devel] " Jens Axboe
2018-11-29  2:20       ` Jens Axboe
2018-11-29  3:30       ` Ming Lei
2018-11-29  3:30         ` [Cluster-devel] " Ming Lei
2018-11-29  3:30         ` Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181126021720.19471-20-ming.lei@redhat.com \
    --to=ming.lei@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=cluster-devel@redhat.com \
    --cc=colyli@suse.de \
    --cc=darrick.wong@oracle.com \
    --cc=dchinner@redhat.com \
    --cc=dm-devel@redhat.com \
    --cc=dsterba@suse.com \
    --cc=gaoxiang25@huawei.com \
    --cc=hch@lst.de \
    --cc=kent.overstreet@gmail.com \
    --cc=linux-bcache@vger.kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-raid@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=ooo@electrozaur.com \
    --cc=osandov@fb.com \
    --cc=rpeterso@redhat.com \
    --cc=sagi@grimberg.me \
    --cc=shli@kernel.org \
    --cc=snitzer@redhat.com \
    --cc=tytso@mit.edu \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.