All of lore.kernel.org
 help / color / mirror / Atom feed
From: Qu Wenruo <wqu@suse.com>
To: linux-btrfs@vger.kernel.org
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Subject: [PATCH 06/14] btrfs: scrub: make scrub_ctx::stripes dynamically allocated
Date: Mon,  3 Jul 2023 15:32:30 +0800	[thread overview]
Message-ID: <e3636085c44cec6e167df77a000d3cd24c2fe678.1688368617.git.wqu@suse.com> (raw)
In-Reply-To: <cover.1688368617.git.wqu@suse.com>

Currently scrub_ctx::stripes are a fixed size array, this is fine for
most use cases, but later we may want to allocate one larger sized array
for logical bytenr based scrub.

So here we change the member to a dynamically allocated array.

This also affects the lifespan of the member.
Now it only needs to be allocated and initialized at the beginning of
scrub_stripe() function.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 67 ++++++++++++++++++++++++++++++++++++------------
 1 file changed, 51 insertions(+), 16 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 41c514db0793..1e49bb066619 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -172,7 +172,7 @@ struct scrub_stripe {
 };
 
 struct scrub_ctx {
-	struct scrub_stripe	stripes[SCRUB_STRIPES_PER_SCTX];
+	struct scrub_stripe	*stripes;
 	struct scrub_stripe	*raid56_data_stripes;
 	struct btrfs_fs_info	*fs_info;
 	int			first_free;
@@ -181,6 +181,9 @@ struct scrub_ctx {
 	int			readonly;
 	int			sectors_per_bio;
 
+	/* Number of stripes we have in @stripes. */
+	unsigned int		nr_stripes;
+
 	/* State of IO submission throttling affecting the associated device */
 	ktime_t			throttle_deadline;
 	u64			throttle_sent;
@@ -308,16 +311,24 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 	scrub_pause_off(fs_info);
 }
 
+static void free_scrub_stripes(struct scrub_ctx *sctx)
+{
+	if (!sctx->stripes)
+		return;
+
+	for (int i = 0; i < sctx->nr_stripes; i++)
+		release_scrub_stripe(&sctx->stripes[i]);
+	kfree(sctx->stripes);
+	sctx->nr_stripes = 0;
+	sctx->stripes = NULL;
+}
+
 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 {
-	int i;
-
 	if (!sctx)
 		return;
 
-	for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++)
-		release_scrub_stripe(&sctx->stripes[i]);
-
+	free_scrub_stripes(sctx);
 	kfree(sctx);
 }
 
@@ -331,7 +342,6 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 		struct btrfs_fs_info *fs_info, int is_dev_replace)
 {
 	struct scrub_ctx *sctx;
-	int		i;
 
 	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
 	if (!sctx)
@@ -339,14 +349,6 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 	refcount_set(&sctx->refs, 1);
 	sctx->is_dev_replace = is_dev_replace;
 	sctx->fs_info = fs_info;
-	for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) {
-		int ret;
-
-		ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
-		if (ret < 0)
-			goto nomem;
-		sctx->stripes[i].sctx = sctx;
-	}
 	sctx->first_free = 0;
 	atomic_set(&sctx->cancel_req, 0);
 
@@ -1659,6 +1661,7 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
 	const int nr_stripes = sctx->cur_stripe;
 	int ret = 0;
 
+	ASSERT(nr_stripes <= sctx->nr_stripes);
 	if (!nr_stripes)
 		return 0;
 
@@ -1753,8 +1756,11 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
 	struct scrub_stripe *stripe;
 	int ret;
 
+	ASSERT(sctx->stripes);
+	ASSERT(sctx->nr_stripes);
+
 	/* No available slot, submit all stripes and wait for them. */
-	if (sctx->cur_stripe >= SCRUB_STRIPES_PER_SCTX) {
+	if (sctx->cur_stripe >= sctx->nr_stripes) {
 		ret = flush_scrub_stripes(sctx);
 		if (ret < 0)
 			return ret;
@@ -2076,6 +2082,30 @@ static int scrub_simple_stripe(struct scrub_ctx *sctx,
 	return ret;
 }
 
+static int alloc_scrub_stripes(struct scrub_ctx *sctx, int nr_stripes)
+{
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
+	int ret;
+
+	ASSERT(!sctx->stripes);
+	ASSERT(!sctx->nr_stripes);
+	sctx->stripes = kcalloc(nr_stripes, sizeof(struct scrub_stripe),
+				GFP_KERNEL);
+	if (!sctx->stripes)
+		return -ENOMEM;
+	sctx->nr_stripes = nr_stripes;
+	for (int i = 0; i < sctx->nr_stripes; i++) {
+		ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
+		if (ret < 0)
+			goto cleanup;
+		sctx->stripes[i].sctx = sctx;
+	}
+	return 0;
+cleanup:
+	free_scrub_stripes(sctx);
+	return -ENOMEM;
+}
+
 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 					   struct btrfs_block_group *bg,
 					   struct extent_map *em,
@@ -2102,6 +2132,10 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 
 	scrub_blocked_if_needed(fs_info);
 
+	ret = alloc_scrub_stripes(sctx, SCRUB_STRIPES_PER_SCTX);
+	if (ret < 0)
+		return ret;
+
 	if (sctx->is_dev_replace &&
 	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
 		mutex_lock(&sctx->wr_lock);
@@ -2224,6 +2258,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 		kfree(sctx->raid56_data_stripes);
 		sctx->raid56_data_stripes = NULL;
 	}
+	free_scrub_stripes(sctx);
 
 	if (sctx->is_dev_replace && ret >= 0) {
 		int ret2;
-- 
2.41.0


  parent reply	other threads:[~2023-07-03  7:33 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-07-03  7:32 [PATCH 00/14] btrfs: scrub: introduce SCRUB_LOGICAL flag Qu Wenruo
2023-07-03  7:32 ` [PATCH 01/14] btrfs: raid56: remove unnecessary parameter for raid56_parity_alloc_scrub_rbio() Qu Wenruo
2023-07-03 12:33   ` Anand Jain
2023-07-12 16:23   ` Christoph Hellwig
2023-07-03  7:32 ` [PATCH 02/14] btrfs: raid56: allow scrub operation to update both P and Q stripes Qu Wenruo
2023-07-12 16:24   ` Christoph Hellwig
2023-07-03  7:32 ` [PATCH 03/14] btrfs: raid56: allow caching P/Q stripes Qu Wenruo
2023-07-12 16:27   ` Christoph Hellwig
2023-07-03  7:32 ` [PATCH 04/14] btrfs: raid56: add the interfaces to submit recovery rbio Qu Wenruo
2023-07-03  7:32 ` [PATCH 05/14] btrfs: add the ability to read P/Q stripes directly Qu Wenruo
2023-07-03  9:46   ` Qu Wenruo
2023-07-03  7:32 ` Qu Wenruo [this message]
2023-07-03  7:32 ` [PATCH 07/14] btrfs: scrub: introduce the skeleton for logical-scrub Qu Wenruo
2023-07-03  7:32 ` [PATCH 08/14] btrfs: scrub: extract the common preparation before scrubbing a block group Qu Wenruo
2023-07-03  7:32 ` [PATCH 09/14] btrfs: scrub: implement the chunk iteration code for scrub_logical Qu Wenruo
2023-07-03  7:32 ` [PATCH 10/14] btrfs: scrub: implement the basic extent iteration code Qu Wenruo
2023-07-03  7:32 ` [PATCH 11/14] btrfs: scrub: implement the repair part of scrub logical Qu Wenruo
2023-07-03  7:32 ` [PATCH 12/14] btrfs: scrub: add RAID56 support for queue_scrub_logical_stripes() Qu Wenruo
2023-07-03  7:32 ` [PATCH 13/14] btrfs: scrub: introduce the RAID56 data recovery path for scrub logical Qu Wenruo
2023-07-03  7:32 ` [PATCH 14/14] btrfs: scrub: implement the RAID56 P/Q scrub code Qu Wenruo
2023-07-03 12:58 ` [PATCH 00/14] btrfs: scrub: introduce SCRUB_LOGICAL flag Graham Cobb
2023-07-03 22:40   ` Qu Wenruo
2023-07-03 23:19     ` Graham Cobb
2023-07-13 12:14 ` David Sterba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e3636085c44cec6e167df77a000d3cd24c2fe678.1688368617.git.wqu@suse.com \
    --to=wqu@suse.com \
    --cc=johannes.thumshirn@wdc.com \
    --cc=linux-btrfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.