All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mike Snitzer <snitzer@redhat.com>
To: axboe@kernel.dk
Cc: ming.lei@redhat.com, hch@lst.de, dm-devel@redhat.com,
	linux-block@vger.kernel.org
Subject: [PATCH v2 2/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_bioset
Date: Wed, 23 Mar 2022 15:45:22 -0400	[thread overview]
Message-ID: <20220323194524.5900-3-snitzer@kernel.org> (raw)
In-Reply-To: <20220323194524.5900-1-snitzer@kernel.org>

Add REQ_ALLOC_CACHE and set it in %opf passed to bio_alloc_bioset to
inform bio_alloc_bioset (and any stacked block drivers) that bio should
be allocated from respective bioset's per-cpu alloc cache if possible.

This decouples access control to the alloc cache (via REQ_ALLOC_CACHE)
from actual participation in a specific alloc cache (BIO_PERCPU_CACHE).
Otherwise an upper layer's bioset may not have an alloc cache, in which
case the bio issued to underlying device(s) wouldn't reflect that
allocating from an alloc cache warranted (if possible).

Signed-off-by: Mike Snitzer <snitzer@kernel.org>
---
 block/bio.c               | 33 ++++++++++++++++++++-------------
 include/linux/bio.h       |  4 +++-
 include/linux/blk_types.h |  4 +++-
 3 files changed, 26 insertions(+), 15 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index a7633aa82d7d..0b65ea241f54 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -440,11 +440,7 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
 		return bio;
 	}
 	put_cpu();
-	bio = bio_alloc_bioset(bdev, nr_vecs, opf, gfp, bs);
-	if (!bio)
-		return NULL;
-	bio_set_flag(bio, BIO_PERCPU_CACHE);
-	return bio;
+	return NULL;
 }
 
 /**
@@ -488,11 +484,24 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
 	gfp_t saved_gfp = gfp_mask;
 	struct bio *bio;
 	void *p;
+	bool use_alloc_cache;
 
 	/* should not use nobvec bioset for nr_vecs > 0 */
 	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
 		return NULL;
 
+	use_alloc_cache = (bs->cache && (opf & REQ_ALLOC_CACHE) &&
+			   nr_vecs <= BIO_INLINE_VECS);
+	if (use_alloc_cache) {
+		bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, gfp_mask, bs);
+		if (bio)
+			return bio;
+		/*
+		 * No cached bio available, mark bio returned below to
+		 * particpate in per-cpu alloc cache.
+		 */
+	}
+
 	/*
 	 * submit_bio_noacct() converts recursion to iteration; this means if
 	 * we're running beneath it, any bios we allocate and submit will not be
@@ -546,6 +555,8 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
 		bio_init(bio, bdev, NULL, 0, opf);
 	}
 
+	if (use_alloc_cache)
+		bio_set_flag(bio, BIO_PERCPU_CACHE);
 	bio->bi_pool = bs;
 	return bio;
 
@@ -795,10 +806,7 @@ struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
 {
 	struct bio *bio;
 
-	if (bs->cache && bio_src->bi_opf & REQ_POLLED)
-		bio = bio_alloc_percpu_cache(bdev, 0, bio_src->bi_opf, gfp, bs);
-	else
-		bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
+	bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
 	if (!bio)
 		return NULL;
 
@@ -1792,10 +1800,9 @@ EXPORT_SYMBOL(bioset_init_from_src);
 struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
 		unsigned short nr_vecs, unsigned int opf, struct bio_set *bs)
 {
-	if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
-		return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
-
-	return bio_alloc_percpu_cache(bdev, nr_vecs, opf, GFP_KERNEL, bs);
+	if (kiocb->ki_flags & IOCB_ALLOC_CACHE)
+		opf |= REQ_ALLOC_CACHE;
+	return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
 }
 EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
 
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 709663ae757a..1be27e87a1f4 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -783,6 +783,8 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
 static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
 {
 	bio->bi_opf |= REQ_POLLED;
+	if (kiocb->ki_flags & IOCB_ALLOC_CACHE)
+		bio->bi_opf |= REQ_ALLOC_CACHE;
 	if (!is_sync_kiocb(kiocb))
 		bio->bi_opf |= REQ_NOWAIT;
 }
@@ -791,7 +793,7 @@ static inline void bio_clear_polled(struct bio *bio)
 {
 	/* can't support alloc cache if we turn off polling */
 	bio_clear_flag(bio, BIO_PERCPU_CACHE);
-	bio->bi_opf &= ~REQ_POLLED;
+	bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
 }
 
 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5561e58d158a..5f9a0c39d4c5 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -327,7 +327,7 @@ enum {
 	BIO_TRACKED,		/* set if bio goes through the rq_qos path */
 	BIO_REMAPPED,
 	BIO_ZONE_WRITE_LOCKED,	/* Owns a zoned device zone write lock */
-	BIO_PERCPU_CACHE,	/* can participate in per-cpu alloc cache */
+	BIO_PERCPU_CACHE,	/* participates in per-cpu alloc cache */
 	BIO_FLAG_LAST
 };
 
@@ -414,6 +414,7 @@ enum req_flag_bits {
 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
 
 	__REQ_POLLED,		/* caller polls for completion using bio_poll */
+	__REQ_ALLOC_CACHE,	/* allocate IO from cache if available */
 
 	/* for driver use */
 	__REQ_DRV,
@@ -439,6 +440,7 @@ enum req_flag_bits {
 
 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
 #define REQ_POLLED		(1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE		(1ULL << __REQ_ALLOC_CACHE)
 
 #define REQ_DRV			(1ULL << __REQ_DRV)
 #define REQ_SWAP		(1ULL << __REQ_SWAP)
-- 
2.15.0


WARNING: multiple messages have this Message-ID (diff)
From: Mike Snitzer <snitzer@redhat.com>
To: axboe@kernel.dk
Cc: linux-block@vger.kernel.org, dm-devel@redhat.com, hch@lst.de,
	ming.lei@redhat.com
Subject: [dm-devel] [PATCH v2 2/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_bioset
Date: Wed, 23 Mar 2022 15:45:22 -0400	[thread overview]
Message-ID: <20220323194524.5900-3-snitzer@kernel.org> (raw)
In-Reply-To: <20220323194524.5900-1-snitzer@kernel.org>

Add REQ_ALLOC_CACHE and set it in %opf passed to bio_alloc_bioset to
inform bio_alloc_bioset (and any stacked block drivers) that bio should
be allocated from respective bioset's per-cpu alloc cache if possible.

This decouples access control to the alloc cache (via REQ_ALLOC_CACHE)
from actual participation in a specific alloc cache (BIO_PERCPU_CACHE).
Otherwise an upper layer's bioset may not have an alloc cache, in which
case the bio issued to underlying device(s) wouldn't reflect that
allocating from an alloc cache warranted (if possible).

Signed-off-by: Mike Snitzer <snitzer@kernel.org>
---
 block/bio.c               | 33 ++++++++++++++++++++-------------
 include/linux/bio.h       |  4 +++-
 include/linux/blk_types.h |  4 +++-
 3 files changed, 26 insertions(+), 15 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index a7633aa82d7d..0b65ea241f54 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -440,11 +440,7 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
 		return bio;
 	}
 	put_cpu();
-	bio = bio_alloc_bioset(bdev, nr_vecs, opf, gfp, bs);
-	if (!bio)
-		return NULL;
-	bio_set_flag(bio, BIO_PERCPU_CACHE);
-	return bio;
+	return NULL;
 }
 
 /**
@@ -488,11 +484,24 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
 	gfp_t saved_gfp = gfp_mask;
 	struct bio *bio;
 	void *p;
+	bool use_alloc_cache;
 
 	/* should not use nobvec bioset for nr_vecs > 0 */
 	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
 		return NULL;
 
+	use_alloc_cache = (bs->cache && (opf & REQ_ALLOC_CACHE) &&
+			   nr_vecs <= BIO_INLINE_VECS);
+	if (use_alloc_cache) {
+		bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, gfp_mask, bs);
+		if (bio)
+			return bio;
+		/*
+		 * No cached bio available, mark bio returned below to
+		 * particpate in per-cpu alloc cache.
+		 */
+	}
+
 	/*
 	 * submit_bio_noacct() converts recursion to iteration; this means if
 	 * we're running beneath it, any bios we allocate and submit will not be
@@ -546,6 +555,8 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
 		bio_init(bio, bdev, NULL, 0, opf);
 	}
 
+	if (use_alloc_cache)
+		bio_set_flag(bio, BIO_PERCPU_CACHE);
 	bio->bi_pool = bs;
 	return bio;
 
@@ -795,10 +806,7 @@ struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
 {
 	struct bio *bio;
 
-	if (bs->cache && bio_src->bi_opf & REQ_POLLED)
-		bio = bio_alloc_percpu_cache(bdev, 0, bio_src->bi_opf, gfp, bs);
-	else
-		bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
+	bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
 	if (!bio)
 		return NULL;
 
@@ -1792,10 +1800,9 @@ EXPORT_SYMBOL(bioset_init_from_src);
 struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
 		unsigned short nr_vecs, unsigned int opf, struct bio_set *bs)
 {
-	if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
-		return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
-
-	return bio_alloc_percpu_cache(bdev, nr_vecs, opf, GFP_KERNEL, bs);
+	if (kiocb->ki_flags & IOCB_ALLOC_CACHE)
+		opf |= REQ_ALLOC_CACHE;
+	return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
 }
 EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
 
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 709663ae757a..1be27e87a1f4 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -783,6 +783,8 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
 static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
 {
 	bio->bi_opf |= REQ_POLLED;
+	if (kiocb->ki_flags & IOCB_ALLOC_CACHE)
+		bio->bi_opf |= REQ_ALLOC_CACHE;
 	if (!is_sync_kiocb(kiocb))
 		bio->bi_opf |= REQ_NOWAIT;
 }
@@ -791,7 +793,7 @@ static inline void bio_clear_polled(struct bio *bio)
 {
 	/* can't support alloc cache if we turn off polling */
 	bio_clear_flag(bio, BIO_PERCPU_CACHE);
-	bio->bi_opf &= ~REQ_POLLED;
+	bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
 }
 
 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5561e58d158a..5f9a0c39d4c5 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -327,7 +327,7 @@ enum {
 	BIO_TRACKED,		/* set if bio goes through the rq_qos path */
 	BIO_REMAPPED,
 	BIO_ZONE_WRITE_LOCKED,	/* Owns a zoned device zone write lock */
-	BIO_PERCPU_CACHE,	/* can participate in per-cpu alloc cache */
+	BIO_PERCPU_CACHE,	/* participates in per-cpu alloc cache */
 	BIO_FLAG_LAST
 };
 
@@ -414,6 +414,7 @@ enum req_flag_bits {
 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
 
 	__REQ_POLLED,		/* caller polls for completion using bio_poll */
+	__REQ_ALLOC_CACHE,	/* allocate IO from cache if available */
 
 	/* for driver use */
 	__REQ_DRV,
@@ -439,6 +440,7 @@ enum req_flag_bits {
 
 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
 #define REQ_POLLED		(1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE		(1ULL << __REQ_ALLOC_CACHE)
 
 #define REQ_DRV			(1ULL << __REQ_DRV)
 #define REQ_SWAP		(1ULL << __REQ_SWAP)
-- 
2.15.0

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel


  parent reply	other threads:[~2022-03-23 19:45 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-23 19:45 [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Mike Snitzer
2022-03-23 19:45 ` [dm-devel] " Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 1/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_clone Mike Snitzer
2022-03-23 19:45   ` [dm-devel] " Mike Snitzer
2022-03-23 19:45 ` Mike Snitzer [this message]
2022-03-23 19:45   ` [dm-devel] [PATCH v2 2/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_bioset Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 3/4] dm: enable BIOSET_PERCPU_CACHE for dm_io bioset Mike Snitzer
2022-03-23 19:45   ` [dm-devel] " Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 4/4] dm: conditionally enable BIOSET_PERCPU_CACHE for bio-based " Mike Snitzer
2022-03-23 19:45   ` [dm-devel] " Mike Snitzer
2022-03-24  0:25 ` [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Jens Axboe
2022-03-24  0:25   ` [dm-devel] " Jens Axboe
2022-03-24  7:39 ` Christoph Hellwig
2022-03-24  7:39   ` [dm-devel] " Christoph Hellwig
2022-03-24 14:41   ` Mike Snitzer
2022-03-24 14:41     ` [dm-devel] " Mike Snitzer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220323194524.5900-3-snitzer@kernel.org \
    --to=snitzer@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=dm-devel@redhat.com \
    --cc=hch@lst.de \
    --cc=linux-block@vger.kernel.org \
    --cc=ming.lei@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.