All of lore.kernel.org
 help / color / mirror / Atom feed
From: Satya Tangirala <satyat@google.com>
To: "Theodore Y . Ts'o" <tytso@mit.edu>,
	Jaegeuk Kim <jaegeuk@kernel.org>,
	Eric Biggers <ebiggers@kernel.org>, Chao Yu <chao@kernel.org>,
	Jens Axboe <axboe@kernel.dk>,
	"Darrick J . Wong" <darrick.wong@oracle.com>
Cc: linux-kernel@vger.kernel.org, linux-fscrypt@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-xfs@vger.kernel.org, linux-block@vger.kernel.org,
	linux-ext4@vger.kernel.org, Satya Tangirala <satyat@google.com>
Subject: [PATCH v7 1/8] block: ensure bios are not split in middle of crypto data unit
Date: Tue, 17 Nov 2020 14:07:01 +0000	[thread overview]
Message-ID: <20201117140708.1068688-2-satyat@google.com> (raw)
In-Reply-To: <20201117140708.1068688-1-satyat@google.com>

Introduce blk_crypto_bio_sectors_alignment() that returns the required
alignment for the number of sectors in a bio. Any bio split must ensure
that the number of sectors in the resulting bios is aligned to that
returned value. This patch also updates __blk_queue_split(),
__blk_queue_bounce() and blk_crypto_split_bio_if_needed() to respect
blk_crypto_bio_sectors_alignment() when splitting bios.

Signed-off-by: Satya Tangirala <satyat@google.com>
---
 block/bio.c                 |  1 +
 block/blk-crypto-fallback.c | 10 ++--
 block/blk-crypto-internal.h | 18 +++++++
 block/blk-merge.c           | 96 ++++++++++++++++++++++++++++++++-----
 block/blk-mq.c              |  3 ++
 block/bounce.c              |  4 ++
 6 files changed, 117 insertions(+), 15 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index fa01bef35bb1..259cef126df3 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1472,6 +1472,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
 
 	BUG_ON(sectors <= 0);
 	BUG_ON(sectors >= bio_sectors(bio));
+	WARN_ON(!IS_ALIGNED(sectors, blk_crypto_bio_sectors_alignment(bio)));
 
 	/* Zone append commands cannot be split */
 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index c162b754efbd..db2d2c67b308 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -209,20 +209,22 @@ static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
 static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
 {
 	struct bio *bio = *bio_ptr;
+	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
 	unsigned int i = 0;
-	unsigned int num_sectors = 0;
+	unsigned int len = 0;
 	struct bio_vec bv;
 	struct bvec_iter iter;
 
 	bio_for_each_segment(bv, bio, iter) {
-		num_sectors += bv.bv_len >> SECTOR_SHIFT;
+		len += bv.bv_len;
 		if (++i == BIO_MAX_PAGES)
 			break;
 	}
-	if (num_sectors < bio_sectors(bio)) {
+	if (len < bio->bi_iter.bi_size) {
 		struct bio *split_bio;
 
-		split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
+		len = round_down(len, bc->bc_key->crypto_cfg.data_unit_size);
+		split_bio = bio_split(bio, len >> SECTOR_SHIFT, GFP_NOIO, NULL);
 		if (!split_bio) {
 			bio->bi_status = BLK_STS_RESOURCE;
 			return false;
diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
index 0d36aae538d7..304e90ed99f5 100644
--- a/block/blk-crypto-internal.h
+++ b/block/blk-crypto-internal.h
@@ -60,6 +60,19 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
 	return rq->crypt_ctx;
 }
 
+/*
+ * Returns the alignment requirement for the number of sectors in this bio based
+ * on its bi_crypt_context. Any bios split from this bio must follow this
+ * alignment requirement as well.
+ */
+static inline unsigned int blk_crypto_bio_sectors_alignment(struct bio *bio)
+{
+	if (!bio_has_crypt_ctx(bio))
+		return 1;
+	return bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size >>
+								SECTOR_SHIFT;
+}
+
 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
 
 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
@@ -93,6 +106,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
 	return false;
 }
 
+static inline unsigned int blk_crypto_bio_sectors_alignment(struct bio *bio)
+{
+	return 1;
+}
+
 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
 
 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index bcf5e4580603..f34dda7132f9 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -149,13 +149,15 @@ static inline unsigned get_max_io_size(struct request_queue *q,
 	unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
 	unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
 	unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
+	unsigned int bio_sectors_alignment =
+					blk_crypto_bio_sectors_alignment(bio);
 
 	max_sectors += start_offset;
 	max_sectors &= ~(pbs - 1);
-	if (max_sectors > start_offset)
-		return max_sectors - start_offset;
+	if (max_sectors - start_offset >= bio_sectors_alignment)
+		return round_down(max_sectors - start_offset, bio_sectors_alignment);
 
-	return sectors & ~(lbs - 1);
+	return round_down(sectors & ~(lbs - 1), bio_sectors_alignment);
 }
 
 static inline unsigned get_max_segment_size(const struct request_queue *q,
@@ -174,6 +176,41 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
 			(unsigned long)queue_max_segment_size(q));
 }
 
+/**
+ * update_aligned_sectors_and_segs() - Ensures that *@aligned_sectors is aligned
+ *				       to @bio_sectors_alignment, and that
+ *				       *@aligned_segs is the value of nsegs
+ *				       when sectors reached/first exceeded that
+ *				       value of *@aligned_sectors.
+ *
+ * @nsegs: [in] The current number of segs
+ * @sectors: [in] The current number of sectors
+ * @aligned_segs: [in,out] The number of segments that make up @aligned_sectors
+ * @aligned_sectors: [in,out] The largest number of sectors <= @sectors that is
+ *		     aligned to @sectors
+ * @bio_sectors_alignment: [in] The alignment requirement for the number of
+ *			  sectors
+ *
+ * Updates *@aligned_sectors to the largest number <= @sectors that is also a
+ * multiple of @bio_sectors_alignment. This is done by updating *@aligned_sectors
+ * whenever @sectors is at least @bio_sectors_alignment more than
+ * *@aligned_sectors, since that means we can increment *@aligned_sectors while
+ * still keeping it aligned to @bio_sectors_alignment and also keeping it <=
+ * @sectors. *@aligned_segs is updated to the value of nsegs when @sectors first
+ * reaches/exceeds any value that causes *@aligned_sectors to be updated.
+ */
+static inline void update_aligned_sectors_and_segs(const unsigned int nsegs,
+						   const unsigned int sectors,
+						   unsigned int *aligned_segs,
+				unsigned int *aligned_sectors,
+				const unsigned int bio_sectors_alignment)
+{
+	if (sectors - *aligned_sectors < bio_sectors_alignment)
+		return;
+	*aligned_sectors = round_down(sectors, bio_sectors_alignment);
+	*aligned_segs = nsegs;
+}
+
 /**
  * bvec_split_segs - verify whether or not a bvec should be split in the middle
  * @q:        [in] request queue associated with the bio associated with @bv
@@ -195,9 +232,12 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
  * the block driver.
  */
 static bool bvec_split_segs(const struct request_queue *q,
-			    const struct bio_vec *bv, unsigned *nsegs,
-			    unsigned *sectors, unsigned max_segs,
-			    unsigned max_sectors)
+			    const struct bio_vec *bv, unsigned int *nsegs,
+			    unsigned int *sectors, unsigned int *aligned_segs,
+			    unsigned int *aligned_sectors,
+			    unsigned int bio_sectors_alignment,
+			    unsigned int max_segs,
+			    unsigned int max_sectors)
 {
 	unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
 	unsigned len = min(bv->bv_len, max_len);
@@ -211,6 +251,11 @@ static bool bvec_split_segs(const struct request_queue *q,
 
 		(*nsegs)++;
 		total_len += seg_size;
+		update_aligned_sectors_and_segs(*nsegs,
+						*sectors + (total_len >> 9),
+						aligned_segs,
+						aligned_sectors,
+						bio_sectors_alignment);
 		len -= seg_size;
 
 		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
@@ -235,6 +280,8 @@ static bool bvec_split_segs(const struct request_queue *q,
  * following is guaranteed for the cloned bio:
  * - That it has at most get_max_io_size(@q, @bio) sectors.
  * - That it has at most queue_max_segments(@q) segments.
+ * - That the number of sectors in the returned bio is aligned to
+ *   blk_crypto_bio_sectors_alignment(@bio)
  *
  * Except for discard requests the cloned bio will point at the bi_io_vec of
  * the original bio. It is the responsibility of the caller to ensure that the
@@ -252,6 +299,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 	unsigned nsegs = 0, sectors = 0;
 	const unsigned max_sectors = get_max_io_size(q, bio);
 	const unsigned max_segs = queue_max_segments(q);
+	const unsigned int bio_sectors_alignment =
+					blk_crypto_bio_sectors_alignment(bio);
+	unsigned int aligned_segs = 0, aligned_sectors = 0;
 
 	bio_for_each_bvec(bv, bio, iter) {
 		/*
@@ -266,8 +316,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
 			nsegs++;
 			sectors += bv.bv_len >> 9;
-		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
-					 max_sectors)) {
+			update_aligned_sectors_and_segs(nsegs, sectors,
+							&aligned_segs,
+							&aligned_sectors,
+							bio_sectors_alignment);
+		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors,
+					   &aligned_segs, &aligned_sectors,
+					   bio_sectors_alignment, max_segs,
+					   max_sectors)) {
 			goto split;
 		}
 
@@ -275,11 +331,24 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 		bvprvp = &bvprv;
 	}
 
+	/*
+	 * The input bio's number of sectors is assumed to be aligned to
+	 * bio_sectors_alignment. If that's the case, then this function should
+	 * ensure that aligned_segs == nsegs and aligned_sectors == sectors if
+	 * the bio is not going to be split.
+	 */
+	WARN_ON(aligned_segs != nsegs || aligned_sectors != sectors);
 	*segs = nsegs;
 	return NULL;
 split:
-	*segs = nsegs;
-	return bio_split(bio, sectors, GFP_NOIO, bs);
+	*segs = aligned_segs;
+	if (WARN_ON(aligned_sectors == 0))
+		goto err;
+	return bio_split(bio, aligned_sectors, GFP_NOIO, bs);
+err:
+	bio->bi_status = BLK_STS_IOERR;
+	bio_endio(bio);
+	return bio;
 }
 
 /**
@@ -366,6 +435,9 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
 {
 	unsigned int nr_phys_segs = 0;
 	unsigned int nr_sectors = 0;
+	unsigned int nr_aligned_phys_segs = 0;
+	unsigned int nr_aligned_sectors = 0;
+	unsigned int bio_sectors_alignment;
 	struct req_iterator iter;
 	struct bio_vec bv;
 
@@ -381,9 +453,11 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
 		return 1;
 	}
 
+	bio_sectors_alignment = blk_crypto_bio_sectors_alignment(rq->bio);
 	rq_for_each_bvec(bv, rq, iter)
 		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
-				UINT_MAX, UINT_MAX);
+				&nr_aligned_phys_segs, &nr_aligned_sectors,
+				bio_sectors_alignment, UINT_MAX, UINT_MAX);
 	return nr_phys_segs;
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 55bcee5dc032..de5c97ab8e5a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2161,6 +2161,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
 	blk_queue_bounce(q, &bio);
 	__blk_queue_split(&bio, &nr_segs);
 
+	if (bio->bi_status != BLK_STS_OK)
+		goto queue_exit;
+
 	if (!bio_integrity_prep(bio))
 		goto queue_exit;
 
diff --git a/block/bounce.c b/block/bounce.c
index 162a6eee8999..b15224799008 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -295,6 +295,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 	bool bounce = false;
 	int sectors = 0;
 	bool passthrough = bio_is_passthrough(*bio_orig);
+	unsigned int bio_sectors_alignment;
 
 	bio_for_each_segment(from, *bio_orig, iter) {
 		if (i++ < BIO_MAX_PAGES)
@@ -305,6 +306,9 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 	if (!bounce)
 		return;
 
+	bio_sectors_alignment = blk_crypto_bio_sectors_alignment(bio);
+	sectors = round_down(sectors, bio_sectors_alignment);
+
 	if (!passthrough && sectors < bio_sectors(*bio_orig)) {
 		bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
 		bio_chain(bio, *bio_orig);
-- 
2.29.2.299.gdc1121823c-goog


WARNING: multiple messages have this Message-ID (diff)
From: Satya Tangirala via Linux-f2fs-devel <linux-f2fs-devel@lists.sourceforge.net>
To: "Theodore Y . Ts'o" <tytso@mit.edu>,
	Jaegeuk Kim <jaegeuk@kernel.org>,
	 Eric Biggers <ebiggers@kernel.org>, Chao Yu <chao@kernel.org>,
	Jens Axboe <axboe@kernel.dk>,
	"Darrick J . Wong" <darrick.wong@oracle.com>
Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-xfs@vger.kernel.org, linux-fscrypt@vger.kernel.org,
	Satya Tangirala <satyat@google.com>,
	linux-ext4@vger.kernel.org
Subject: [f2fs-dev] [PATCH v7 1/8] block: ensure bios are not split in middle of crypto data unit
Date: Tue, 17 Nov 2020 14:07:01 +0000	[thread overview]
Message-ID: <20201117140708.1068688-2-satyat@google.com> (raw)
In-Reply-To: <20201117140708.1068688-1-satyat@google.com>

Introduce blk_crypto_bio_sectors_alignment() that returns the required
alignment for the number of sectors in a bio. Any bio split must ensure
that the number of sectors in the resulting bios is aligned to that
returned value. This patch also updates __blk_queue_split(),
__blk_queue_bounce() and blk_crypto_split_bio_if_needed() to respect
blk_crypto_bio_sectors_alignment() when splitting bios.

Signed-off-by: Satya Tangirala <satyat@google.com>
---
 block/bio.c                 |  1 +
 block/blk-crypto-fallback.c | 10 ++--
 block/blk-crypto-internal.h | 18 +++++++
 block/blk-merge.c           | 96 ++++++++++++++++++++++++++++++++-----
 block/blk-mq.c              |  3 ++
 block/bounce.c              |  4 ++
 6 files changed, 117 insertions(+), 15 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index fa01bef35bb1..259cef126df3 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1472,6 +1472,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
 
 	BUG_ON(sectors <= 0);
 	BUG_ON(sectors >= bio_sectors(bio));
+	WARN_ON(!IS_ALIGNED(sectors, blk_crypto_bio_sectors_alignment(bio)));
 
 	/* Zone append commands cannot be split */
 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index c162b754efbd..db2d2c67b308 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -209,20 +209,22 @@ static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
 static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
 {
 	struct bio *bio = *bio_ptr;
+	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
 	unsigned int i = 0;
-	unsigned int num_sectors = 0;
+	unsigned int len = 0;
 	struct bio_vec bv;
 	struct bvec_iter iter;
 
 	bio_for_each_segment(bv, bio, iter) {
-		num_sectors += bv.bv_len >> SECTOR_SHIFT;
+		len += bv.bv_len;
 		if (++i == BIO_MAX_PAGES)
 			break;
 	}
-	if (num_sectors < bio_sectors(bio)) {
+	if (len < bio->bi_iter.bi_size) {
 		struct bio *split_bio;
 
-		split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
+		len = round_down(len, bc->bc_key->crypto_cfg.data_unit_size);
+		split_bio = bio_split(bio, len >> SECTOR_SHIFT, GFP_NOIO, NULL);
 		if (!split_bio) {
 			bio->bi_status = BLK_STS_RESOURCE;
 			return false;
diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
index 0d36aae538d7..304e90ed99f5 100644
--- a/block/blk-crypto-internal.h
+++ b/block/blk-crypto-internal.h
@@ -60,6 +60,19 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
 	return rq->crypt_ctx;
 }
 
+/*
+ * Returns the alignment requirement for the number of sectors in this bio based
+ * on its bi_crypt_context. Any bios split from this bio must follow this
+ * alignment requirement as well.
+ */
+static inline unsigned int blk_crypto_bio_sectors_alignment(struct bio *bio)
+{
+	if (!bio_has_crypt_ctx(bio))
+		return 1;
+	return bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size >>
+								SECTOR_SHIFT;
+}
+
 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
 
 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
@@ -93,6 +106,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
 	return false;
 }
 
+static inline unsigned int blk_crypto_bio_sectors_alignment(struct bio *bio)
+{
+	return 1;
+}
+
 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
 
 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index bcf5e4580603..f34dda7132f9 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -149,13 +149,15 @@ static inline unsigned get_max_io_size(struct request_queue *q,
 	unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
 	unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
 	unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
+	unsigned int bio_sectors_alignment =
+					blk_crypto_bio_sectors_alignment(bio);
 
 	max_sectors += start_offset;
 	max_sectors &= ~(pbs - 1);
-	if (max_sectors > start_offset)
-		return max_sectors - start_offset;
+	if (max_sectors - start_offset >= bio_sectors_alignment)
+		return round_down(max_sectors - start_offset, bio_sectors_alignment);
 
-	return sectors & ~(lbs - 1);
+	return round_down(sectors & ~(lbs - 1), bio_sectors_alignment);
 }
 
 static inline unsigned get_max_segment_size(const struct request_queue *q,
@@ -174,6 +176,41 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
 			(unsigned long)queue_max_segment_size(q));
 }
 
+/**
+ * update_aligned_sectors_and_segs() - Ensures that *@aligned_sectors is aligned
+ *				       to @bio_sectors_alignment, and that
+ *				       *@aligned_segs is the value of nsegs
+ *				       when sectors reached/first exceeded that
+ *				       value of *@aligned_sectors.
+ *
+ * @nsegs: [in] The current number of segs
+ * @sectors: [in] The current number of sectors
+ * @aligned_segs: [in,out] The number of segments that make up @aligned_sectors
+ * @aligned_sectors: [in,out] The largest number of sectors <= @sectors that is
+ *		     aligned to @sectors
+ * @bio_sectors_alignment: [in] The alignment requirement for the number of
+ *			  sectors
+ *
+ * Updates *@aligned_sectors to the largest number <= @sectors that is also a
+ * multiple of @bio_sectors_alignment. This is done by updating *@aligned_sectors
+ * whenever @sectors is at least @bio_sectors_alignment more than
+ * *@aligned_sectors, since that means we can increment *@aligned_sectors while
+ * still keeping it aligned to @bio_sectors_alignment and also keeping it <=
+ * @sectors. *@aligned_segs is updated to the value of nsegs when @sectors first
+ * reaches/exceeds any value that causes *@aligned_sectors to be updated.
+ */
+static inline void update_aligned_sectors_and_segs(const unsigned int nsegs,
+						   const unsigned int sectors,
+						   unsigned int *aligned_segs,
+				unsigned int *aligned_sectors,
+				const unsigned int bio_sectors_alignment)
+{
+	if (sectors - *aligned_sectors < bio_sectors_alignment)
+		return;
+	*aligned_sectors = round_down(sectors, bio_sectors_alignment);
+	*aligned_segs = nsegs;
+}
+
 /**
  * bvec_split_segs - verify whether or not a bvec should be split in the middle
  * @q:        [in] request queue associated with the bio associated with @bv
@@ -195,9 +232,12 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
  * the block driver.
  */
 static bool bvec_split_segs(const struct request_queue *q,
-			    const struct bio_vec *bv, unsigned *nsegs,
-			    unsigned *sectors, unsigned max_segs,
-			    unsigned max_sectors)
+			    const struct bio_vec *bv, unsigned int *nsegs,
+			    unsigned int *sectors, unsigned int *aligned_segs,
+			    unsigned int *aligned_sectors,
+			    unsigned int bio_sectors_alignment,
+			    unsigned int max_segs,
+			    unsigned int max_sectors)
 {
 	unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
 	unsigned len = min(bv->bv_len, max_len);
@@ -211,6 +251,11 @@ static bool bvec_split_segs(const struct request_queue *q,
 
 		(*nsegs)++;
 		total_len += seg_size;
+		update_aligned_sectors_and_segs(*nsegs,
+						*sectors + (total_len >> 9),
+						aligned_segs,
+						aligned_sectors,
+						bio_sectors_alignment);
 		len -= seg_size;
 
 		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
@@ -235,6 +280,8 @@ static bool bvec_split_segs(const struct request_queue *q,
  * following is guaranteed for the cloned bio:
  * - That it has at most get_max_io_size(@q, @bio) sectors.
  * - That it has at most queue_max_segments(@q) segments.
+ * - That the number of sectors in the returned bio is aligned to
+ *   blk_crypto_bio_sectors_alignment(@bio)
  *
  * Except for discard requests the cloned bio will point at the bi_io_vec of
  * the original bio. It is the responsibility of the caller to ensure that the
@@ -252,6 +299,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 	unsigned nsegs = 0, sectors = 0;
 	const unsigned max_sectors = get_max_io_size(q, bio);
 	const unsigned max_segs = queue_max_segments(q);
+	const unsigned int bio_sectors_alignment =
+					blk_crypto_bio_sectors_alignment(bio);
+	unsigned int aligned_segs = 0, aligned_sectors = 0;
 
 	bio_for_each_bvec(bv, bio, iter) {
 		/*
@@ -266,8 +316,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
 			nsegs++;
 			sectors += bv.bv_len >> 9;
-		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
-					 max_sectors)) {
+			update_aligned_sectors_and_segs(nsegs, sectors,
+							&aligned_segs,
+							&aligned_sectors,
+							bio_sectors_alignment);
+		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors,
+					   &aligned_segs, &aligned_sectors,
+					   bio_sectors_alignment, max_segs,
+					   max_sectors)) {
 			goto split;
 		}
 
@@ -275,11 +331,24 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 		bvprvp = &bvprv;
 	}
 
+	/*
+	 * The input bio's number of sectors is assumed to be aligned to
+	 * bio_sectors_alignment. If that's the case, then this function should
+	 * ensure that aligned_segs == nsegs and aligned_sectors == sectors if
+	 * the bio is not going to be split.
+	 */
+	WARN_ON(aligned_segs != nsegs || aligned_sectors != sectors);
 	*segs = nsegs;
 	return NULL;
 split:
-	*segs = nsegs;
-	return bio_split(bio, sectors, GFP_NOIO, bs);
+	*segs = aligned_segs;
+	if (WARN_ON(aligned_sectors == 0))
+		goto err;
+	return bio_split(bio, aligned_sectors, GFP_NOIO, bs);
+err:
+	bio->bi_status = BLK_STS_IOERR;
+	bio_endio(bio);
+	return bio;
 }
 
 /**
@@ -366,6 +435,9 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
 {
 	unsigned int nr_phys_segs = 0;
 	unsigned int nr_sectors = 0;
+	unsigned int nr_aligned_phys_segs = 0;
+	unsigned int nr_aligned_sectors = 0;
+	unsigned int bio_sectors_alignment;
 	struct req_iterator iter;
 	struct bio_vec bv;
 
@@ -381,9 +453,11 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
 		return 1;
 	}
 
+	bio_sectors_alignment = blk_crypto_bio_sectors_alignment(rq->bio);
 	rq_for_each_bvec(bv, rq, iter)
 		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
-				UINT_MAX, UINT_MAX);
+				&nr_aligned_phys_segs, &nr_aligned_sectors,
+				bio_sectors_alignment, UINT_MAX, UINT_MAX);
 	return nr_phys_segs;
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 55bcee5dc032..de5c97ab8e5a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2161,6 +2161,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
 	blk_queue_bounce(q, &bio);
 	__blk_queue_split(&bio, &nr_segs);
 
+	if (bio->bi_status != BLK_STS_OK)
+		goto queue_exit;
+
 	if (!bio_integrity_prep(bio))
 		goto queue_exit;
 
diff --git a/block/bounce.c b/block/bounce.c
index 162a6eee8999..b15224799008 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -295,6 +295,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 	bool bounce = false;
 	int sectors = 0;
 	bool passthrough = bio_is_passthrough(*bio_orig);
+	unsigned int bio_sectors_alignment;
 
 	bio_for_each_segment(from, *bio_orig, iter) {
 		if (i++ < BIO_MAX_PAGES)
@@ -305,6 +306,9 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 	if (!bounce)
 		return;
 
+	bio_sectors_alignment = blk_crypto_bio_sectors_alignment(bio);
+	sectors = round_down(sectors, bio_sectors_alignment);
+
 	if (!passthrough && sectors < bio_sectors(*bio_orig)) {
 		bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
 		bio_chain(bio, *bio_orig);
-- 
2.29.2.299.gdc1121823c-goog



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

  reply	other threads:[~2020-11-17 14:08 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-17 14:07 [PATCH v7 0/8] add support for direct I/O with fscrypt using blk-crypto Satya Tangirala
2020-11-17 14:07 ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-17 14:07 ` Satya Tangirala [this message]
2020-11-17 14:07   ` [f2fs-dev] [PATCH v7 1/8] block: ensure bios are not split in middle of crypto data unit Satya Tangirala via Linux-f2fs-devel
2020-11-17 23:31   ` Eric Biggers
2020-11-17 23:31     ` [f2fs-dev] " Eric Biggers
2020-11-18  0:38     ` Satya Tangirala
2020-11-18  0:38       ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-25 22:12       ` Eric Biggers
2020-11-25 22:12         ` [f2fs-dev] " Eric Biggers
2020-12-02 22:52         ` Satya Tangirala
2020-12-02 22:52           ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-25 22:15   ` Eric Biggers
2020-11-25 22:15     ` [f2fs-dev] " Eric Biggers
2020-11-17 14:07 ` [PATCH v7 2/8] blk-crypto: don't require user buffer alignment Satya Tangirala
2020-11-17 14:07   ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-17 23:51   ` Eric Biggers
2020-11-17 23:51     ` [f2fs-dev] " Eric Biggers
2020-11-17 14:07 ` [PATCH v7 3/8] fscrypt: add functions for direct I/O support Satya Tangirala
2020-11-17 14:07   ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-17 14:07 ` [PATCH v7 4/8] direct-io: add support for fscrypt using blk-crypto Satya Tangirala
2020-11-17 14:07   ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-17 14:07 ` [PATCH v7 5/8] iomap: support direct I/O with " Satya Tangirala
2020-11-17 14:07   ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-17 14:07 ` [PATCH v7 6/8] ext4: " Satya Tangirala
2020-11-17 14:07   ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-12-03 13:45   ` Theodore Y. Ts'o
2020-12-03 13:45     ` [f2fs-dev] " Theodore Y. Ts'o
2020-11-17 14:07 ` [PATCH v7 7/8] f2fs: " Satya Tangirala
2020-11-17 14:07   ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-17 14:07 ` [PATCH v7 8/8] fscrypt: update documentation for direct I/O support Satya Tangirala
2020-11-17 14:07   ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-18  2:38   ` Eric Biggers
2020-11-18  2:38     ` [f2fs-dev] " Eric Biggers
2020-11-17 17:15 ` [PATCH v7 0/8] add support for direct I/O with fscrypt using blk-crypto Theodore Y. Ts'o
2020-11-17 17:15   ` [f2fs-dev] " Theodore Y. Ts'o
2020-11-17 17:20   ` Darrick J. Wong
2020-11-17 17:20     ` [f2fs-dev] " Darrick J. Wong
2020-12-03 23:57   ` Satya Tangirala
2020-12-03 23:57     ` [f2fs-dev] " Satya Tangirala via Linux-f2fs-devel
2020-11-18  2:51 ` Eric Biggers
2020-11-18  2:51   ` [f2fs-dev] " Eric Biggers

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201117140708.1068688-2-satyat@google.com \
    --to=satyat@google.com \
    --cc=axboe@kernel.dk \
    --cc=chao@kernel.org \
    --cc=darrick.wong@oracle.com \
    --cc=ebiggers@kernel.org \
    --cc=jaegeuk@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fscrypt@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=tytso@mit.edu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.