linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* simlify the blk_rq_map* implementation and reclaim two bio flags
@ 2020-08-27 15:37 Christoph Hellwig
  2020-08-27 15:37 ` [PATCH 1/4] block: remove the BIO_NULL_MAPPED flag Christoph Hellwig
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Christoph Hellwig @ 2020-08-27 15:37 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

Hi Jens,

this series cleans up some of the passthrough remapping code, and as
part of that reclaims two bio flags.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/4] block: remove the BIO_NULL_MAPPED flag
  2020-08-27 15:37 simlify the blk_rq_map* implementation and reclaim two bio flags Christoph Hellwig
@ 2020-08-27 15:37 ` Christoph Hellwig
  2020-08-27 15:37 ` [PATCH 2/4] block: remove __blk_rq_unmap_user Christoph Hellwig
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2020-08-27 15:37 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

We can simply use a boolean flag in the bio_map_data data structure
instead.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-map.c           | 9 +++++----
 include/linux/blk_types.h | 1 -
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/block/blk-map.c b/block/blk-map.c
index 6e804892d5ec6a..51e6195f878d3c 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -12,7 +12,8 @@
 #include "blk.h"
 
 struct bio_map_data {
-	int is_our_pages;
+	bool is_our_pages : 1;
+	bool is_null_mapped : 1;
 	struct iov_iter iter;
 	struct iovec iov[];
 };
@@ -108,7 +109,7 @@ static int bio_uncopy_user(struct bio *bio)
 	struct bio_map_data *bmd = bio->bi_private;
 	int ret = 0;
 
-	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+	if (!bmd || !bmd->is_null_mapped) {
 		/*
 		 * if we're in a workqueue, the request is orphaned, so
 		 * don't copy into a random user address space, just free
@@ -158,7 +159,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
 	 * The caller provided iov might point to an on-stack or otherwise
 	 * shortlived one.
 	 */
-	bmd->is_our_pages = map_data ? 0 : 1;
+	bmd->is_our_pages = !map_data;
 
 	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
 	if (nr_pages > BIO_MAX_PAGES)
@@ -234,7 +235,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
 
 	bio->bi_private = bmd;
 	if (map_data && map_data->null_mapped)
-		bio_set_flag(bio, BIO_NULL_MAPPED);
+		bmd->is_null_mapped = true;
 	return bio;
 cleanup:
 	if (!map_data)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 4ecf4fed171f0d..3d1bd8dad69baf 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -256,7 +256,6 @@ enum {
 	BIO_CLONED,		/* doesn't own data */
 	BIO_BOUNCED,		/* bio is a bounce bio */
 	BIO_USER_MAPPED,	/* contains user pages */
-	BIO_NULL_MAPPED,	/* contains invalid user pages */
 	BIO_WORKINGSET,		/* contains userspace workingset pages */
 	BIO_QUIET,		/* Make BIO Quiet */
 	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/4] block: remove __blk_rq_unmap_user
  2020-08-27 15:37 simlify the blk_rq_map* implementation and reclaim two bio flags Christoph Hellwig
  2020-08-27 15:37 ` [PATCH 1/4] block: remove the BIO_NULL_MAPPED flag Christoph Hellwig
@ 2020-08-27 15:37 ` Christoph Hellwig
  2020-08-27 15:37 ` [PATCH 3/4] block: remove __blk_rq_map_user_iov Christoph Hellwig
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2020-08-27 15:37 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

Open code __blk_rq_unmap_user in the two callers.  Both never pass a NULL
bio, and one of them can use an existing local variable instead of the bio
flag.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-map.c | 29 +++++++++++------------------
 1 file changed, 11 insertions(+), 18 deletions(-)

diff --git a/block/blk-map.c b/block/blk-map.c
index 51e6195f878d3c..10de4809edf9a7 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -558,20 +558,6 @@ int blk_rq_append_bio(struct request *rq, struct bio **bio)
 }
 EXPORT_SYMBOL(blk_rq_append_bio);
 
-static int __blk_rq_unmap_user(struct bio *bio)
-{
-	int ret = 0;
-
-	if (bio) {
-		if (bio_flagged(bio, BIO_USER_MAPPED))
-			bio_unmap_user(bio);
-		else
-			ret = bio_uncopy_user(bio);
-	}
-
-	return ret;
-}
-
 static int __blk_rq_map_user_iov(struct request *rq,
 		struct rq_map_data *map_data, struct iov_iter *iter,
 		gfp_t gfp_mask, bool copy)
@@ -599,7 +585,10 @@ static int __blk_rq_map_user_iov(struct request *rq,
 	 */
 	ret = blk_rq_append_bio(rq, &bio);
 	if (ret) {
-		__blk_rq_unmap_user(orig_bio);
+		if (copy)
+			bio_uncopy_user(orig_bio);
+		else
+			bio_unmap_user(orig_bio);
 		return ret;
 	}
 	bio_get(bio);
@@ -701,9 +690,13 @@ int blk_rq_unmap_user(struct bio *bio)
 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
 			mapped_bio = bio->bi_private;
 
-		ret2 = __blk_rq_unmap_user(mapped_bio);
-		if (ret2 && !ret)
-			ret = ret2;
+		if (bio_flagged(mapped_bio, BIO_USER_MAPPED)) {
+			bio_unmap_user(mapped_bio);
+		} else {
+			ret2 = bio_uncopy_user(mapped_bio);
+			if (ret2 && !ret)
+				ret = ret2;
+		}
 
 		mapped_bio = bio;
 		bio = bio->bi_next;
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/4] block: remove __blk_rq_map_user_iov
  2020-08-27 15:37 simlify the blk_rq_map* implementation and reclaim two bio flags Christoph Hellwig
  2020-08-27 15:37 ` [PATCH 1/4] block: remove the BIO_NULL_MAPPED flag Christoph Hellwig
  2020-08-27 15:37 ` [PATCH 2/4] block: remove __blk_rq_unmap_user Christoph Hellwig
@ 2020-08-27 15:37 ` Christoph Hellwig
  2020-08-27 15:37 ` [PATCH 4/4] block: remove the BIO_USER_MAPPED flag Christoph Hellwig
  2020-08-29 16:49 ` simlify the blk_rq_map* implementation and reclaim two bio flags Jens Axboe
  4 siblings, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2020-08-27 15:37 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

Just duplicate a small amount of code in the low-level map into the bio
and copy to the bio routines, leading to much easier to follow and
maintain code, and better shared error handling.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-map.c | 144 ++++++++++++++++++------------------------------
 1 file changed, 54 insertions(+), 90 deletions(-)

diff --git a/block/blk-map.c b/block/blk-map.c
index 10de4809edf9a7..427962ac2f675f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -127,24 +127,12 @@ static int bio_uncopy_user(struct bio *bio)
 	return ret;
 }
 
-/**
- *	bio_copy_user_iov	-	copy user data to bio
- *	@q:		destination block queue
- *	@map_data:	pointer to the rq_map_data holding pages (if necessary)
- *	@iter:		iovec iterator
- *	@gfp_mask:	memory allocation flags
- *
- *	Prepares and returns a bio for indirect user io, bouncing data
- *	to/from kernel pages as necessary. Must be paired with
- *	call bio_uncopy_user() on io completion.
- */
-static struct bio *bio_copy_user_iov(struct request_queue *q,
-		struct rq_map_data *map_data, struct iov_iter *iter,
-		gfp_t gfp_mask)
+static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
+		struct iov_iter *iter, gfp_t gfp_mask)
 {
 	struct bio_map_data *bmd;
 	struct page *page;
-	struct bio *bio;
+	struct bio *bio, *bounce_bio;
 	int i = 0, ret;
 	int nr_pages;
 	unsigned int len = iter->count;
@@ -152,7 +140,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
 
 	bmd = bio_alloc_map_data(iter, gfp_mask);
 	if (!bmd)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	/*
 	 * We need to do a deep copy of the iov_iter including the iovecs.
@@ -169,8 +157,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
 	bio = bio_kmalloc(gfp_mask, nr_pages);
 	if (!bio)
 		goto out_bmd;
-
-	ret = 0;
+	bio->bi_opf |= req_op(rq);
 
 	if (map_data) {
 		nr_pages = 1 << map_data->page_order;
@@ -187,7 +174,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
 		if (map_data) {
 			if (i == map_data->nr_entries * nr_pages) {
 				ret = -ENOMEM;
-				break;
+				goto cleanup;
 			}
 
 			page = map_data->pages[i / nr_pages];
@@ -195,14 +182,14 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
 
 			i++;
 		} else {
-			page = alloc_page(q->bounce_gfp | gfp_mask);
+			page = alloc_page(rq->q->bounce_gfp | gfp_mask);
 			if (!page) {
 				ret = -ENOMEM;
-				break;
+				goto cleanup;
 			}
 		}
 
-		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
+		if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
 			if (!map_data)
 				__free_page(page);
 			break;
@@ -212,9 +199,6 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
 		offset = 0;
 	}
 
-	if (ret)
-		goto cleanup;
-
 	if (map_data)
 		map_data->offset += bio->bi_iter.bi_size;
 
@@ -236,39 +220,42 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
 	bio->bi_private = bmd;
 	if (map_data && map_data->null_mapped)
 		bmd->is_null_mapped = true;
-	return bio;
+
+	bounce_bio = bio;
+	ret = blk_rq_append_bio(rq, &bounce_bio);
+	if (ret)
+		goto cleanup;
+
+	/*
+	 * We link the bounce buffer in and could have to traverse it later, so
+	 * we have to get a ref to prevent it from being freed
+	 */
+	bio_get(bounce_bio);
+	return 0;
 cleanup:
 	if (!map_data)
 		bio_free_pages(bio);
 	bio_put(bio);
 out_bmd:
 	kfree(bmd);
-	return ERR_PTR(ret);
+	return ret;
 }
 
-/**
- *	bio_map_user_iov - map user iovec into bio
- *	@q:		the struct request_queue for the bio
- *	@iter:		iovec iterator
- *	@gfp_mask:	memory allocation flags
- *
- *	Map the user space address into a bio suitable for io to a block
- *	device. Returns an error pointer in case of error.
- */
-static struct bio *bio_map_user_iov(struct request_queue *q,
-		struct iov_iter *iter, gfp_t gfp_mask)
+static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
+		gfp_t gfp_mask)
 {
-	unsigned int max_sectors = queue_max_hw_sectors(q);
-	int j;
-	struct bio *bio;
+	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
+	struct bio *bio, *bounce_bio;
 	int ret;
+	int j;
 
 	if (!iov_iter_count(iter))
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
 	if (!bio)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
+	bio->bi_opf |= req_op(rq);
 
 	while (iov_iter_count(iter)) {
 		struct page **pages;
@@ -284,7 +271,7 @@ static struct bio *bio_map_user_iov(struct request_queue *q,
 
 		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
 
-		if (unlikely(offs & queue_dma_alignment(q))) {
+		if (unlikely(offs & queue_dma_alignment(rq->q))) {
 			ret = -EINVAL;
 			j = 0;
 		} else {
@@ -296,7 +283,7 @@ static struct bio *bio_map_user_iov(struct request_queue *q,
 				if (n > bytes)
 					n = bytes;
 
-				if (!bio_add_hw_page(q, bio, page, n, offs,
+				if (!bio_add_hw_page(rq->q, bio, page, n, offs,
 						     max_sectors, &same_page)) {
 					if (same_page)
 						put_page(page);
@@ -323,18 +310,30 @@ static struct bio *bio_map_user_iov(struct request_queue *q,
 	bio_set_flag(bio, BIO_USER_MAPPED);
 
 	/*
-	 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
-	 * it would normally disappear when its bi_end_io is run.
-	 * however, we need it for the unmap, so grab an extra
-	 * reference to it
+	 * Subtle: if we end up needing to bounce a bio, it would normally
+	 * disappear when its bi_end_io is run.  However, we need the original
+	 * bio for the unmap, so grab an extra reference to it
 	 */
 	bio_get(bio);
-	return bio;
 
+	bounce_bio = bio;
+	ret = blk_rq_append_bio(rq, &bounce_bio);
+	if (ret)
+		goto out_put_orig;
+
+	/*
+	 * We link the bounce buffer in and could have to traverse it
+	 * later, so we have to get a ref to prevent it from being freed
+	 */
+	bio_get(bounce_bio);
+	return 0;
+
+ out_put_orig:
+	bio_put(bio);
  out_unmap:
 	bio_release_pages(bio, false);
 	bio_put(bio);
-	return ERR_PTR(ret);
+	return ret;
 }
 
 /**
@@ -558,44 +557,6 @@ int blk_rq_append_bio(struct request *rq, struct bio **bio)
 }
 EXPORT_SYMBOL(blk_rq_append_bio);
 
-static int __blk_rq_map_user_iov(struct request *rq,
-		struct rq_map_data *map_data, struct iov_iter *iter,
-		gfp_t gfp_mask, bool copy)
-{
-	struct request_queue *q = rq->q;
-	struct bio *bio, *orig_bio;
-	int ret;
-
-	if (copy)
-		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
-	else
-		bio = bio_map_user_iov(q, iter, gfp_mask);
-
-	if (IS_ERR(bio))
-		return PTR_ERR(bio);
-
-	bio->bi_opf &= ~REQ_OP_MASK;
-	bio->bi_opf |= req_op(rq);
-
-	orig_bio = bio;
-
-	/*
-	 * We link the bounce buffer in and could have to traverse it
-	 * later so we have to get a ref to prevent it from being freed
-	 */
-	ret = blk_rq_append_bio(rq, &bio);
-	if (ret) {
-		if (copy)
-			bio_uncopy_user(orig_bio);
-		else
-			bio_unmap_user(orig_bio);
-		return ret;
-	}
-	bio_get(bio);
-
-	return 0;
-}
-
 /**
  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
  * @q:		request queue where request should be inserted
@@ -639,7 +600,10 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 
 	i = *iter;
 	do {
-		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
+		if (copy)
+			ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
+		else
+			ret = bio_map_user_iov(rq, &i, gfp_mask);
 		if (ret)
 			goto unmap_rq;
 		if (!bio)
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/4] block: remove the BIO_USER_MAPPED flag
  2020-08-27 15:37 simlify the blk_rq_map* implementation and reclaim two bio flags Christoph Hellwig
                   ` (2 preceding siblings ...)
  2020-08-27 15:37 ` [PATCH 3/4] block: remove __blk_rq_map_user_iov Christoph Hellwig
@ 2020-08-27 15:37 ` Christoph Hellwig
  2020-08-29 16:49 ` simlify the blk_rq_map* implementation and reclaim two bio flags Jens Axboe
  4 siblings, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2020-08-27 15:37 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

Just check if there is private data, in which case the bio must have
originated from bio_copy_user_iov.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-map.c           | 10 ++++------
 include/linux/blk_types.h |  1 -
 2 files changed, 4 insertions(+), 7 deletions(-)

diff --git a/block/blk-map.c b/block/blk-map.c
index 427962ac2f675f..be118926ccf4e3 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -109,7 +109,7 @@ static int bio_uncopy_user(struct bio *bio)
 	struct bio_map_data *bmd = bio->bi_private;
 	int ret = 0;
 
-	if (!bmd || !bmd->is_null_mapped) {
+	if (!bmd->is_null_mapped) {
 		/*
 		 * if we're in a workqueue, the request is orphaned, so
 		 * don't copy into a random user address space, just free
@@ -307,8 +307,6 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
 			break;
 	}
 
-	bio_set_flag(bio, BIO_USER_MAPPED);
-
 	/*
 	 * Subtle: if we end up needing to bounce a bio, it would normally
 	 * disappear when its bi_end_io is run.  However, we need the original
@@ -654,12 +652,12 @@ int blk_rq_unmap_user(struct bio *bio)
 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
 			mapped_bio = bio->bi_private;
 
-		if (bio_flagged(mapped_bio, BIO_USER_MAPPED)) {
-			bio_unmap_user(mapped_bio);
-		} else {
+		if (bio->bi_private) {
 			ret2 = bio_uncopy_user(mapped_bio);
 			if (ret2 && !ret)
 				ret = ret2;
+		} else {
+			bio_unmap_user(mapped_bio);
 		}
 
 		mapped_bio = bio;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 3d1bd8dad69baf..39b1ba6da9ef71 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -255,7 +255,6 @@ enum {
 	BIO_NO_PAGE_REF,	/* don't put release vec pages */
 	BIO_CLONED,		/* doesn't own data */
 	BIO_BOUNCED,		/* bio is a bounce bio */
-	BIO_USER_MAPPED,	/* contains user pages */
 	BIO_WORKINGSET,		/* contains userspace workingset pages */
 	BIO_QUIET,		/* Make BIO Quiet */
 	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: simlify the blk_rq_map* implementation and reclaim two bio flags
  2020-08-27 15:37 simlify the blk_rq_map* implementation and reclaim two bio flags Christoph Hellwig
                   ` (3 preceding siblings ...)
  2020-08-27 15:37 ` [PATCH 4/4] block: remove the BIO_USER_MAPPED flag Christoph Hellwig
@ 2020-08-29 16:49 ` Jens Axboe
  4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2020-08-29 16:49 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 8/27/20 9:37 AM, Christoph Hellwig wrote:
> Hi Jens,
> 
> this series cleans up some of the passthrough remapping code, and as
> part of that reclaims two bio flags.

Applied, thanks.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-08-29 16:49 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-27 15:37 simlify the blk_rq_map* implementation and reclaim two bio flags Christoph Hellwig
2020-08-27 15:37 ` [PATCH 1/4] block: remove the BIO_NULL_MAPPED flag Christoph Hellwig
2020-08-27 15:37 ` [PATCH 2/4] block: remove __blk_rq_unmap_user Christoph Hellwig
2020-08-27 15:37 ` [PATCH 3/4] block: remove __blk_rq_map_user_iov Christoph Hellwig
2020-08-27 15:37 ` [PATCH 4/4] block: remove the BIO_USER_MAPPED flag Christoph Hellwig
2020-08-29 16:49 ` simlify the blk_rq_map* implementation and reclaim two bio flags Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).