linux-btrfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/5] btrfs: scrub: use pointer array to replace @sblocks_for_recheck
       [not found] <cover.1646210538.git.wqu@suse.com>
@ 2022-03-02  8:44 ` Qu Wenruo
  2022-03-02  8:44 ` [PATCH 2/5] btrfs: extract the initialization of scrub_block into a helper function Qu Wenruo
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 5+ messages in thread
From: Qu Wenruo @ 2022-03-02  8:44 UTC (permalink / raw)
  To: linux-btrfs

In function scrub_handle_errored_block(), we use @sblocks_for_recheck
pointer to hold one scrub_block for each mirror, and uses kcalloc() to
allocate an array.

But this one pointer for an array is not really reader friendly.

Just change this pointer to struct scrub_block *[BTRFS_MAX_MIRRORS],
this will slightly increase the stack memory usage.

Since function scrub_handle_errored_block() won't get iterative calls,
this extra cost would completely be acceptable.

And since we're here, also set sblock->refs and use scrub_block_put() to
clean them up, as later we will add extra members in scrub_block, which
needs scrub_block_put() to clean them up.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 106 +++++++++++++++++++++++++----------------------
 1 file changed, 57 insertions(+), 49 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index a644fb00b344..d40a8ea7b0b5 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -205,7 +205,7 @@ struct full_stripe_lock {
 };
 
 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
-				     struct scrub_block *sblocks_for_recheck);
+				     struct scrub_block *sblocks_for_recheck[]);
 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
 				struct scrub_block *sblock,
 				int retry_failed_mirror);
@@ -813,7 +813,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 	unsigned int failed_mirror_index;
 	unsigned int is_metadata;
 	unsigned int have_csum;
-	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
+	/* One scrub_block for each mirror */
+	struct scrub_block *sblocks_for_recheck[BTRFS_MAX_MIRRORS] = { 0 };
 	struct scrub_block *sblock_bad;
 	int ret;
 	int mirror_index;
@@ -905,17 +906,29 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 	 * repaired area is verified in order to correctly maintain
 	 * the statistics.
 	 */
-
-	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
-				      sizeof(*sblocks_for_recheck), GFP_KERNEL);
-	if (!sblocks_for_recheck) {
-		spin_lock(&sctx->stat_lock);
-		sctx->stat.malloc_errors++;
-		sctx->stat.read_errors++;
-		sctx->stat.uncorrectable_errors++;
-		spin_unlock(&sctx->stat_lock);
-		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
-		goto out;
+	for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
+		sblocks_for_recheck[mirror_index] =
+			kzalloc(sizeof(struct scrub_block), GFP_KERNEL);
+		if (!sblocks_for_recheck[mirror_index]) {
+			spin_lock(&sctx->stat_lock);
+			sctx->stat.malloc_errors++;
+			sctx->stat.read_errors++;
+			sctx->stat.uncorrectable_errors++;
+			spin_unlock(&sctx->stat_lock);
+			btrfs_dev_stat_inc_and_print(dev,
+						     BTRFS_DEV_STAT_READ_ERRS);
+			goto out;
+		}
+		/*
+		 * note: the two members refs and outstanding_sectors
+		 * are not used in the blocks that are used for the recheck
+		 * procedure.
+		 *
+		 * But to make the cleanup easier, we just put one ref for
+		 * each sblocks.
+		 */
+		refcount_set(&sblocks_for_recheck[mirror_index]->refs, 1);
+		sblocks_for_recheck[mirror_index]->sctx = sctx;
 	}
 
 	/* setup the context, map the logical blocks and alloc the sectors */
@@ -929,7 +942,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 		goto out;
 	}
 	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
-	sblock_bad = sblocks_for_recheck + failed_mirror_index;
+	sblock_bad = sblocks_for_recheck[failed_mirror_index];
 
 	/* build and submit the bios for the failed mirror, check checksums */
 	scrub_recheck_block(fs_info, sblock_bad, 1);
@@ -1014,21 +1027,21 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 		if (!scrub_is_page_on_raid56(sblock_bad->sectorv[0])) {
 			if (mirror_index >= BTRFS_MAX_MIRRORS)
 				break;
-			if (!sblocks_for_recheck[mirror_index].sector_count)
+			if (!sblocks_for_recheck[mirror_index]->sector_count)
 				break;
 
-			sblock_other = sblocks_for_recheck + mirror_index;
+			sblock_other = sblocks_for_recheck[mirror_index];
 		} else {
 			struct scrub_recover *r = sblock_bad->sectorv[0]->recover;
 			int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
 
 			if (mirror_index >= max_allowed)
 				break;
-			if (!sblocks_for_recheck[1].sector_count)
+			if (!sblocks_for_recheck[1]->sector_count)
 				break;
 
 			ASSERT(failed_mirror_index == 0);
-			sblock_other = sblocks_for_recheck + 1;
+			sblock_other = sblocks_for_recheck[1];
 			sblock_other->sectorv[0]->mirror_num = 1 + mirror_index;
 		}
 
@@ -1100,12 +1113,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 			/* try to find no-io-error sector in mirrors */
 			for (mirror_index = 0;
 			     mirror_index < BTRFS_MAX_MIRRORS &&
-			     sblocks_for_recheck[mirror_index].sector_count > 0;
+			     sblocks_for_recheck[mirror_index]->sector_count > 0;
 			     mirror_index++) {
-				if (!sblocks_for_recheck[mirror_index].
+				if (!sblocks_for_recheck[mirror_index]->
 				    sectorv[sector_num]->io_error) {
-					sblock_other = sblocks_for_recheck +
-						       mirror_index;
+					sblock_other =
+						sblocks_for_recheck[mirror_index];
 					break;
 				}
 			}
@@ -1180,27 +1193,28 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 	}
 
 out:
-	if (sblocks_for_recheck) {
-		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
-		     mirror_index++) {
-			struct scrub_block *sblock = sblocks_for_recheck +
-						     mirror_index;
-			struct scrub_recover *recover;
-			int sector_index;
-
-			for (sector_index = 0; sector_index < sblock->sector_count;
-			     sector_index++) {
-				sblock->sectorv[sector_index]->sblock = NULL;
-				recover = sblock->sectorv[sector_index]->recover;
-				if (recover) {
-					scrub_put_recover(fs_info, recover);
-					sblock->sectorv[sector_index]->recover =
-									NULL;
-				}
-				scrub_sector_put(sblock->sectorv[sector_index]);
+	for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
+		struct scrub_block *sblock = sblocks_for_recheck[mirror_index];
+		struct scrub_recover *recover;
+		int sector_index;
+
+		/* Not allocated, continue checking the next mirror */
+		if (!sblock)
+			continue;
+
+		for (sector_index = 0; sector_index < sblock->sector_count;
+		     sector_index++) {
+			/*
+			 * Here we just cleanup the recover, each sector will be
+			 * properly cleaned up by later scrub_block_put()
+			 */
+			recover = sblock->sectorv[sector_index]->recover;
+			if (recover) {
+				scrub_put_recover(fs_info, recover);
+				sblock->sectorv[sector_index]->recover = NULL;
 			}
 		}
-		kfree(sblocks_for_recheck);
+		scrub_block_put(sblock);
 	}
 
 	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
@@ -1251,7 +1265,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
 }
 
 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
-				     struct scrub_block *sblocks_for_recheck)
+				     struct scrub_block *sblocks_for_recheck[])
 {
 	struct scrub_ctx *sctx = original_sblock->sctx;
 	struct btrfs_fs_info *fs_info = sctx->fs_info;
@@ -1271,12 +1285,6 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 	int nmirrors;
 	int ret;
 
-	/*
-	 * note: the two members refs and outstanding_sectors
-	 * are not used (and not set) in the blocks that are used for
-	 * the recheck procedure
-	 */
-
 	while (length > 0) {
 		sublen = min_t(u64, length, fs_info->sectorsize);
 		mapped_length = sublen;
@@ -1315,7 +1323,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 			struct scrub_block *sblock;
 			struct scrub_sector *ssector;
 
-			sblock = sblocks_for_recheck + mirror_index;
+			sblock = sblocks_for_recheck[mirror_index];
 			sblock->sctx = sctx;
 
 			ssector = kzalloc(sizeof(*ssector), GFP_NOFS);
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/5] btrfs: extract the initialization of scrub_block into a helper function
       [not found] <cover.1646210538.git.wqu@suse.com>
  2022-03-02  8:44 ` [PATCH 1/5] btrfs: scrub: use pointer array to replace @sblocks_for_recheck Qu Wenruo
@ 2022-03-02  8:44 ` Qu Wenruo
  2022-03-02  8:44 ` [PATCH 3/5] btrfs: extract the allocation and initialization of scrub_sector into a helper Qu Wenruo
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 5+ messages in thread
From: Qu Wenruo @ 2022-03-02  8:44 UTC (permalink / raw)
  To: linux-btrfs

Although there are only two callers, we are going to add some members
for scrub_block in the incoming patches.

Extracting the initialization code will make later expansion easier.

One thing to note is, even scrub_handle_errored_block() doesn't utilize
scrub_block::refs, we still use alloc_scrub_block() to initialize
sblock::ref, allowing us to use scrub_block_put() to do cleanup.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 49 ++++++++++++++++++++++++------------------------
 1 file changed, 24 insertions(+), 25 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index d40a8ea7b0b5..8ccd25666986 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -204,6 +204,19 @@ struct full_stripe_lock {
 	struct mutex mutex;
 };
 
+static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx)
+{
+	struct scrub_block *sblock;
+
+	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
+	if (!sblock)
+		return NULL;
+	refcount_set(&sblock->refs, 1);
+	sblock->sctx = sctx;
+	sblock->no_io_error_seen = 1;
+	return sblock;
+}
+
 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 				     struct scrub_block *sblocks_for_recheck[]);
 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
@@ -907,8 +920,15 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 	 * the statistics.
 	 */
 	for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
-		sblocks_for_recheck[mirror_index] =
-			kzalloc(sizeof(struct scrub_block), GFP_KERNEL);
+		/*
+		 * Note: the two members refs and outstanding_sectors
+		 * are not used in the blocks that are used for the recheck
+		 * procedure.
+		 *
+		 * But alloc_scrub_block() will initialize sblock::ref anyway,
+		 * so we can use scrub_block_put() to clean them up.
+		 */
+		sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx);
 		if (!sblocks_for_recheck[mirror_index]) {
 			spin_lock(&sctx->stat_lock);
 			sctx->stat.malloc_errors++;
@@ -919,16 +939,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 						     BTRFS_DEV_STAT_READ_ERRS);
 			goto out;
 		}
-		/*
-		 * note: the two members refs and outstanding_sectors
-		 * are not used in the blocks that are used for the recheck
-		 * procedure.
-		 *
-		 * But to make the cleanup easier, we just put one ref for
-		 * each sblocks.
-		 */
-		refcount_set(&sblocks_for_recheck[mirror_index]->refs, 1);
-		sblocks_for_recheck[mirror_index]->sctx = sctx;
 	}
 
 	/* setup the context, map the logical blocks and alloc the sectors */
@@ -2266,7 +2276,7 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
 	const u32 sectorsize = sctx->fs_info->sectorsize;
 	int index;
 
-	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
+	sblock = alloc_scrub_block(sctx);
 	if (!sblock) {
 		spin_lock(&sctx->stat_lock);
 		sctx->stat.malloc_errors++;
@@ -2274,12 +2284,6 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
 		return -ENOMEM;
 	}
 
-	/* one ref inside this function, plus one for each page added to
-	 * a bio later on */
-	refcount_set(&sblock->refs, 1);
-	sblock->sctx = sctx;
-	sblock->no_io_error_seen = 1;
-
 	for (index = 0; len > 0; index++) {
 		struct scrub_sector *ssector;
 		/*
@@ -2619,7 +2623,7 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
 
 	ASSERT(IS_ALIGNED(len, sectorsize));
 
-	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
+	sblock = alloc_scrub_block(sctx);
 	if (!sblock) {
 		spin_lock(&sctx->stat_lock);
 		sctx->stat.malloc_errors++;
@@ -2627,11 +2631,6 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
 		return -ENOMEM;
 	}
 
-	/* one ref inside this function, plus one for each page added to
-	 * a bio later on */
-	refcount_set(&sblock->refs, 1);
-	sblock->sctx = sctx;
-	sblock->no_io_error_seen = 1;
 	sblock->sparity = sparity;
 	scrub_parity_get(sparity);
 
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/5] btrfs: extract the allocation and initialization of scrub_sector into a helper
       [not found] <cover.1646210538.git.wqu@suse.com>
  2022-03-02  8:44 ` [PATCH 1/5] btrfs: scrub: use pointer array to replace @sblocks_for_recheck Qu Wenruo
  2022-03-02  8:44 ` [PATCH 2/5] btrfs: extract the initialization of scrub_block into a helper function Qu Wenruo
@ 2022-03-02  8:44 ` Qu Wenruo
  2022-03-02  8:44 ` [PATCH 4/5] btrfs: scrub: introduce scrub_block::pages for more efficient memory usage for subpage Qu Wenruo
  2022-03-02  8:44 ` [PATCH 5/5] btrfs: scrub: remove scrub_sector::page and use scrub_block::pages instead Qu Wenruo
  4 siblings, 0 replies; 5+ messages in thread
From: Qu Wenruo @ 2022-03-02  8:44 UTC (permalink / raw)
  To: linux-btrfs

The allocation and initialization is shared by 3 call sites, and we're
going to change the initialization of some members in the upcoming
patches.

So extra the allocation and initialization of scrub_sector into a
helper, alloc_scrub_sector(), which will do the following work:

- Allocate the memory for scrub_sector

- Allocate a page for scrub_sector::page

- Initialize scrub_sector::refs to 1

- Attach the allocated scrub_sector to scrub_block
  The attachment is bidirectional, which means scrub_block::sectorv[]
  will be updated and scrub_sector::sblock will also be updated.

- Update scrub_block::sector_count and do extra sanity check on it

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 61 ++++++++++++++++++++++++------------------------
 1 file changed, 30 insertions(+), 31 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 8ccd25666986..bab717e80918 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -217,6 +217,33 @@ static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx)
 	return sblock;
 }
 
+/* Allocate a new scrub sector and attach it to @sblock */
+static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock,
+					       gfp_t gfp)
+{
+	struct scrub_sector *ssector;
+
+	ssector = kzalloc(sizeof(*ssector), gfp);
+	if (!ssector)
+		return NULL;
+	ssector->page = alloc_page(gfp);
+	if (!ssector->page) {
+		kfree(ssector);
+		return NULL;
+	}
+	atomic_set(&ssector->refs, 1);
+	ssector->sblock = sblock;
+	/* This sector to be added should not be used */
+	ASSERT(sblock->sectorv[sblock->sector_count] == NULL);
+	/* And the sector count should be smaller than the limit */
+	ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK);
+
+	sblock->sectorv[sblock->sector_count] = ssector;
+	sblock->sector_count++;
+
+	return ssector;
+}
+
 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 				     struct scrub_block *sblocks_for_recheck[]);
 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
@@ -1336,18 +1363,14 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 			sblock = sblocks_for_recheck[mirror_index];
 			sblock->sctx = sctx;
 
-			ssector = kzalloc(sizeof(*ssector), GFP_NOFS);
+			ssector = alloc_scrub_sector(sblock, GFP_NOFS);
 			if (!ssector) {
-leave_nomem:
 				spin_lock(&sctx->stat_lock);
 				sctx->stat.malloc_errors++;
 				spin_unlock(&sctx->stat_lock);
 				scrub_put_recover(fs_info, recover);
 				return -ENOMEM;
 			}
-			scrub_sector_get(ssector);
-			sblock->sectorv[sector_index] = ssector;
-			ssector->sblock = sblock;
 			ssector->flags = flags;
 			ssector->generation = generation;
 			ssector->logical = logical;
@@ -1376,11 +1399,6 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 				physical_for_dev_replace;
 			/* for missing devices, dev->bdev is NULL */
 			ssector->mirror_num = mirror_index + 1;
-			sblock->sector_count++;
-			ssector->page = alloc_page(GFP_NOFS);
-			if (!ssector->page)
-				goto leave_nomem;
-
 			scrub_get_recover(recover);
 			ssector->recover = recover;
 		}
@@ -2293,19 +2311,14 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
 		 */
 		u32 l = min(sectorsize, len);
 
-		ssector = kzalloc(sizeof(*ssector), GFP_KERNEL);
+		ssector = alloc_scrub_sector(sblock, GFP_KERNEL);
 		if (!ssector) {
-leave_nomem:
 			spin_lock(&sctx->stat_lock);
 			sctx->stat.malloc_errors++;
 			spin_unlock(&sctx->stat_lock);
 			scrub_block_put(sblock);
 			return -ENOMEM;
 		}
-		ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
-		scrub_sector_get(ssector);
-		sblock->sectorv[index] = ssector;
-		ssector->sblock = sblock;
 		ssector->dev = dev;
 		ssector->flags = flags;
 		ssector->generation = gen;
@@ -2319,10 +2332,6 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
 		} else {
 			ssector->have_csum = 0;
 		}
-		sblock->sector_count++;
-		ssector->page = alloc_page(GFP_KERNEL);
-		if (!ssector->page)
-			goto leave_nomem;
 		len -= l;
 		logical += l;
 		physical += l;
@@ -2637,23 +2646,18 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
 	for (index = 0; len > 0; index++) {
 		struct scrub_sector *ssector;
 
-		ssector = kzalloc(sizeof(*ssector), GFP_KERNEL);
+		ssector = alloc_scrub_sector(sblock, GFP_KERNEL);
 		if (!ssector) {
-leave_nomem:
 			spin_lock(&sctx->stat_lock);
 			sctx->stat.malloc_errors++;
 			spin_unlock(&sctx->stat_lock);
 			scrub_block_put(sblock);
 			return -ENOMEM;
 		}
-		ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
-		/* For scrub block */
-		scrub_sector_get(ssector);
 		sblock->sectorv[index] = ssector;
 		/* For scrub parity */
 		scrub_sector_get(ssector);
 		list_add_tail(&ssector->list, &sparity->ssectors);
-		ssector->sblock = sblock;
 		ssector->dev = dev;
 		ssector->flags = flags;
 		ssector->generation = gen;
@@ -2666,11 +2670,6 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
 		} else {
 			ssector->have_csum = 0;
 		}
-		sblock->sector_count++;
-		ssector->page = alloc_page(GFP_KERNEL);
-		if (!ssector->page)
-			goto leave_nomem;
-
 
 		/* Iterate over the stripe range in sectorsize steps */
 		len -= sectorsize;
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 4/5] btrfs: scrub: introduce scrub_block::pages for more efficient memory usage for subpage
       [not found] <cover.1646210538.git.wqu@suse.com>
                   ` (2 preceding siblings ...)
  2022-03-02  8:44 ` [PATCH 3/5] btrfs: extract the allocation and initialization of scrub_sector into a helper Qu Wenruo
@ 2022-03-02  8:44 ` Qu Wenruo
  2022-03-02  8:44 ` [PATCH 5/5] btrfs: scrub: remove scrub_sector::page and use scrub_block::pages instead Qu Wenruo
  4 siblings, 0 replies; 5+ messages in thread
From: Qu Wenruo @ 2022-03-02  8:44 UTC (permalink / raw)
  To: linux-btrfs

[BACKGROUND]
Currently for scrub, we allocate one page for one sector, this is fine
for PAGE_SIZE == sectorsize support, but can waste extra memory for
subpage support.

[CODE CHANGE]
So this patch will make scrub_block to contain all the pages, so if
we're scrubing an extent sized 64K, and our page size is also 64K, we
only need to allocate one page.

[LIFESPAN CHANGE]
Since now scrub_sector no longer holds a page, but using
scrub_block::pages[] instead, we have to ensure scrub_block has a longer
lifespan for write bio.

(The lifespan for read bio is already large enough)

Now scrub_block will only be released after the write bio finished.

[COMING NEXT]
Currently we only added scrub_block::pages[] for this purpose, but
scrub_sector is still utilizing the old scrub_sector::page.

The switch will happen in the next patch.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 135 +++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 112 insertions(+), 23 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index bab717e80918..7edf063baee6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -54,6 +54,8 @@ struct scrub_ctx;
  */
 #define SCRUB_MAX_SECTORS_PER_BLOCK	(BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
 
+#define SCRUB_MAX_PAGES			(DIV_ROUND_UP(BTRFS_MAX_METADATA_BLOCKSIZE, PAGE_SIZE))
+
 struct scrub_recover {
 	refcount_t		refs;
 	struct btrfs_io_context	*bioc;
@@ -94,8 +96,16 @@ struct scrub_bio {
 };
 
 struct scrub_block {
+	/*
+	 * Each page will has its page::private used to record the logical
+	 * bytenr.
+	 */
+	struct page		*pages[SCRUB_MAX_PAGES];
 	struct scrub_sector	*sectorv[SCRUB_MAX_SECTORS_PER_BLOCK];
+	u64			logical; /* Logical bytenr of the sblock */
+	u32			len; /* The length of sblock in bytes */
 	int			sector_count;
+
 	atomic_t		outstanding_sectors;
 	refcount_t		refs; /* free mem on transition to zero */
 	struct scrub_ctx	*sctx;
@@ -204,7 +214,46 @@ struct full_stripe_lock {
 	struct mutex mutex;
 };
 
-static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx)
+#ifndef CONFIG_64BIT
+/* This structure is for archtectures whose (void *) is smaller than u64 */
+struct scrub_page_private {
+	u64 logical;
+};
+#endif
+
+static int attach_scrub_page_private(struct page *page, u64 logical)
+{
+#ifdef CONFIG_64BIT
+	attach_page_private(page, (void *)logical);
+	return 0;
+#else
+	struct scrub_page_private *spp;
+
+	spp = kmalloc(sizeof(*spp), GFP_KERNEL);
+	if (!spp)
+		return -ENOMEM;
+	spp->logical = logical;
+	attach_page_private(page, (void *)spp);
+	return 0;
+#endif
+}
+
+static void detach_scrub_page_private(struct page *page)
+{
+#ifdef CONFIG_64BIT
+	detach_page_private(page);
+	return;
+#else
+	struct scrub_page_private *spp;
+
+	spp = detach_page_private(page);
+	kfree(spp);
+	return;
+#endif
+}
+
+static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx,
+					     u64 logical)
 {
 	struct scrub_block *sblock;
 
@@ -213,27 +262,54 @@ static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx)
 		return NULL;
 	refcount_set(&sblock->refs, 1);
 	sblock->sctx = sctx;
+	sblock->logical = logical;
 	sblock->no_io_error_seen = 1;
+	/*
+	 * Scrub_block::pages will be allocated at alloc_scrub_sector() when
+	 * the corresponding page is not allocated.
+	 */
 	return sblock;
 }
 
-/* Allocate a new scrub sector and attach it to @sblock */
+/*
+ * Allocate a new scrub sector and attach it to @sblock.
+ *
+ * Will also allocate new pages for @sblock if needed.
+ */
 static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock,
-					       gfp_t gfp)
+					       u64 logical, gfp_t gfp)
 {
+	const int page_index = (logical - sblock->logical) >> PAGE_SHIFT;
 	struct scrub_sector *ssector;
 
 	ssector = kzalloc(sizeof(*ssector), gfp);
 	if (!ssector)
 		return NULL;
-	ssector->page = alloc_page(gfp);
-	if (!ssector->page) {
-		kfree(ssector);
-		return NULL;
+
+	/* Allocate a new page if the slot is not allocated*/
+	if (!sblock->pages[page_index]) {
+		int ret;
+
+		sblock->pages[page_index] = alloc_page(gfp);
+		if (!sblock->pages[page_index]) {
+			kfree(ssector);
+			return NULL;
+		}
+		ret = attach_scrub_page_private(sblock->pages[page_index],
+				sblock->logical + (page_index << PAGE_SHIFT));
+		if (ret < 0) {
+			kfree(ssector);
+			__free_page(sblock->pages[page_index]);
+			sblock->pages[page_index] = NULL;
+			return NULL;
+		}
 	}
+
 	atomic_set(&ssector->refs, 1);
 	ssector->sblock = sblock;
-	/* This sector to be added should not be used */
+	ssector->logical = logical;
+
+	/* The slot to be added should not be used */
 	ASSERT(sblock->sectorv[sblock->sector_count] == NULL);
 	/* And the sector count should be smaller than the limit */
 	ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK);
@@ -955,7 +1031,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 		 * But alloc_scrub_block() will initialize sblock::ref anyway,
 		 * so we can use scrub_block_put() to clean them up.
 		 */
-		sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx);
+		sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx,
+								      logical);
 		if (!sblocks_for_recheck[mirror_index]) {
 			spin_lock(&sctx->stat_lock);
 			sctx->stat.malloc_errors++;
@@ -1363,7 +1440,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 			sblock = sblocks_for_recheck[mirror_index];
 			sblock->sctx = sctx;
 
-			ssector = alloc_scrub_sector(sblock, GFP_NOFS);
+			ssector = alloc_scrub_sector(sblock, logical, GFP_NOFS);
 			if (!ssector) {
 				spin_lock(&sctx->stat_lock);
 				sctx->stat.malloc_errors++;
@@ -1373,7 +1450,6 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 			}
 			ssector->flags = flags;
 			ssector->generation = generation;
-			ssector->logical = logical;
 			ssector->have_csum = have_csum;
 			if (have_csum)
 				memcpy(ssector->csum,
@@ -1669,6 +1745,11 @@ static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
 	return ret;
 }
 
+static void scrub_block_get(struct scrub_block *sblock)
+{
+	refcount_inc(&sblock->refs);
+}
+
 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
 				    struct scrub_sector *ssector)
 {
@@ -1736,6 +1817,13 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
 
 	sbio->sectorv[sbio->sector_count] = ssector;
 	scrub_sector_get(ssector);
+	/*
+	 * Since ssector no longer holds a page, but uses sblock::pages, we
+	 * have to ensure the sblock didn't get freed before our write bio
+	 * finished.
+	 */
+	scrub_block_get(ssector->sblock);
+
 	sbio->sector_count++;
 	if (sbio->sector_count == sctx->sectors_per_bio)
 		scrub_wr_submit(sctx);
@@ -1797,8 +1885,10 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
 		}
 	}
 
-	for (i = 0; i < sbio->sector_count; i++)
+	for (i = 0; i < sbio->sector_count; i++) {
+		scrub_block_put(sbio->sectorv[i]->sblock);
 		scrub_sector_put(sbio->sectorv[i]);
+	}
 
 	bio_put(sbio->bio);
 	kfree(sbio);
@@ -1989,11 +2079,6 @@ static int scrub_checksum_super(struct scrub_block *sblock)
 	return fail_cor + fail_gen;
 }
 
-static void scrub_block_get(struct scrub_block *sblock)
-{
-	refcount_inc(&sblock->refs);
-}
-
 static void scrub_block_put(struct scrub_block *sblock)
 {
 	if (refcount_dec_and_test(&sblock->refs)) {
@@ -2004,6 +2089,12 @@ static void scrub_block_put(struct scrub_block *sblock)
 
 		for (i = 0; i < sblock->sector_count; i++)
 			scrub_sector_put(sblock->sectorv[i]);
+		for (i = 0; i < DIV_ROUND_UP(sblock->len, PAGE_SIZE); i++) {
+			if (sblock->pages[i]) {
+				detach_scrub_page_private(sblock->pages[i]);
+				__free_page(sblock->pages[i]);
+			}
+		}
 		kfree(sblock);
 	}
 }
@@ -2294,7 +2385,7 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
 	const u32 sectorsize = sctx->fs_info->sectorsize;
 	int index;
 
-	sblock = alloc_scrub_block(sctx);
+	sblock = alloc_scrub_block(sctx, logical);
 	if (!sblock) {
 		spin_lock(&sctx->stat_lock);
 		sctx->stat.malloc_errors++;
@@ -2311,7 +2402,7 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
 		 */
 		u32 l = min(sectorsize, len);
 
-		ssector = alloc_scrub_sector(sblock, GFP_KERNEL);
+		ssector = alloc_scrub_sector(sblock, logical, GFP_KERNEL);
 		if (!ssector) {
 			spin_lock(&sctx->stat_lock);
 			sctx->stat.malloc_errors++;
@@ -2322,7 +2413,6 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
 		ssector->dev = dev;
 		ssector->flags = flags;
 		ssector->generation = gen;
-		ssector->logical = logical;
 		ssector->physical = physical;
 		ssector->physical_for_dev_replace = physical_for_dev_replace;
 		ssector->mirror_num = mirror_num;
@@ -2632,7 +2722,7 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
 
 	ASSERT(IS_ALIGNED(len, sectorsize));
 
-	sblock = alloc_scrub_block(sctx);
+	sblock = alloc_scrub_block(sctx, logical);
 	if (!sblock) {
 		spin_lock(&sctx->stat_lock);
 		sctx->stat.malloc_errors++;
@@ -2646,7 +2736,7 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
 	for (index = 0; len > 0; index++) {
 		struct scrub_sector *ssector;
 
-		ssector = alloc_scrub_sector(sblock, GFP_KERNEL);
+		ssector = alloc_scrub_sector(sblock, logical, GFP_KERNEL);
 		if (!ssector) {
 			spin_lock(&sctx->stat_lock);
 			sctx->stat.malloc_errors++;
@@ -2661,7 +2751,6 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
 		ssector->dev = dev;
 		ssector->flags = flags;
 		ssector->generation = gen;
-		ssector->logical = logical;
 		ssector->physical = physical;
 		ssector->mirror_num = mirror_num;
 		if (csum) {
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 5/5] btrfs: scrub: remove scrub_sector::page and use scrub_block::pages instead
       [not found] <cover.1646210538.git.wqu@suse.com>
                   ` (3 preceding siblings ...)
  2022-03-02  8:44 ` [PATCH 4/5] btrfs: scrub: introduce scrub_block::pages for more efficient memory usage for subpage Qu Wenruo
@ 2022-03-02  8:44 ` Qu Wenruo
  4 siblings, 0 replies; 5+ messages in thread
From: Qu Wenruo @ 2022-03-02  8:44 UTC (permalink / raw)
  To: linux-btrfs

Although scrub currently works for subpage (PAGE_SIZE > sectorsize) cases,
it will allocate one page for each scrub_sector, which can cause extra
unnecessary memory usage.

This patch will utilize scrub_block::pages[] instead of allocating page
for each scrub_sector, this allows us to integrate larger extents while
use less memory.

For example, if our page size is 64K, sectorsize is 4K, and we got an
32K sized extent.
We will only allocated one page for scrub_block, and all 8 scrub sectors
will point to that page.

To do that properly, here we introduce several small helpers:

- scrub_page_get_logical()
  Get the logical bytenr of a page.
  We store the logical bytenr of the page range into page::private.
  But for 32bit systems, their (void *) is not large enough to contain
  a u64, so in that case we will need to allocate extra memory for it.

  For 64bit systems, we can use page::private directly.

- scrub_block_get_logical()
  Just get the logical bytenr of the first page.

- scrub_sector_get_page()
  Return the page which the scrub_sector points to.

- scrub_sector_get_page_offset()
  Return the offset inside the page which the scrub_sector points to.

- scrub_sector_get_kaddr()
  Return the address which the scrub_sector points to.
  Just a wrapper using scrub_sector_get_page() and
  scrub_sector_get_page_offset()

- bio_add_scrub_sector()

Please note that, even with this patch, we're still allocating one page
for one sector for data extents.

This is because in scrub_extent() we split the data extent using
sectorsize.

The memory usage reduce will need extra work to make scrub to work like
data read to only use the correct sector(s).

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 100 ++++++++++++++++++++++++++++++++++-------------
 1 file changed, 73 insertions(+), 27 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 7edf063baee6..02efb0ce7d43 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -64,7 +64,6 @@ struct scrub_recover {
 
 struct scrub_sector {
 	struct scrub_block	*sblock;
-	struct page		*page;
 	struct btrfs_device	*dev;
 	struct list_head	list;
 	u64			flags;  /* extent flags */
@@ -311,15 +310,65 @@ static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock,
 
 	/* The slot to be added should not be used */
 	ASSERT(sblock->sectorv[sblock->sector_count] == NULL);
+
 	/* And the sector count should be smaller than the limit */
 	ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK);
 
 	sblock->sectorv[sblock->sector_count] = ssector;
 	sblock->sector_count++;
+	sblock->len += sblock->sctx->fs_info->sectorsize;
 
 	return ssector;
 }
 
+static struct page *scrub_sector_get_page(struct scrub_sector *ssector)
+{
+	struct scrub_block *sblock = ssector->sblock;
+	int index;
+	/*
+	 * When calling this function, ssector should have been attached to
+	 * the parent sblock.
+	 */
+	ASSERT(sblock);
+
+	/* The range should be inside the sblock range */
+	ASSERT(ssector->logical - sblock->logical < sblock->len);
+
+	index = (ssector->logical - sblock->logical) >> PAGE_SHIFT;
+	ASSERT(index < SCRUB_MAX_PAGES);
+	ASSERT(sblock->pages[index]);
+	ASSERT(PagePrivate(sblock->pages[index]));
+	return sblock->pages[index];
+}
+
+static unsigned int scrub_sector_get_page_offset(struct scrub_sector *ssector)
+{
+	struct scrub_block *sblock = ssector->sblock;
+	/*
+	 * When calling this function, ssector should have been attached to
+	 * the parent sblock.
+	 */
+	ASSERT(sblock);
+
+	/* The range should be inside the sblock range */
+	ASSERT(ssector->logical - sblock->logical < sblock->len);
+
+	return offset_in_page(ssector->logical - sblock->logical);
+}
+
+static char *scrub_sector_get_kaddr(struct scrub_sector *ssector)
+{
+	return page_address(scrub_sector_get_page(ssector)) +
+	       scrub_sector_get_page_offset(ssector);
+}
+
+static int bio_add_scrub_sector(struct bio *bio, struct scrub_sector *ssector,
+				 unsigned int len)
+{
+	return bio_add_page(bio, scrub_sector_get_page(ssector), len,
+			   scrub_sector_get_page_offset(ssector));
+}
+
 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 				     struct scrub_block *sblocks_for_recheck[]);
 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
@@ -652,7 +701,6 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 		struct scrub_bio *sbio = sctx->bios[sctx->curr];
 
 		for (i = 0; i < sbio->sector_count; i++) {
-			WARN_ON(!sbio->sectorv[i]->page);
 			scrub_block_put(sbio->sectorv[i]->sblock);
 		}
 		bio_put(sbio->bio);
@@ -1533,8 +1581,7 @@ static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
 	for (sector_num = 0; sector_num < sblock->sector_count; sector_num++) {
 		struct scrub_sector *ssector = sblock->sectorv[sector_num];
 
-		WARN_ON(!ssector->page);
-		bio_add_page(bio, ssector->page, PAGE_SIZE, 0);
+		bio_add_scrub_sector(bio, ssector, fs_info->sectorsize);
 	}
 
 	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) {
@@ -1583,11 +1630,10 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
 			continue;
 		}
 
-		WARN_ON(!ssector->page);
 		bio = btrfs_bio_alloc(1);
 		bio_set_dev(bio, ssector->dev->bdev);
 
-		bio_add_page(bio, ssector->page, fs_info->sectorsize, 0);
+		bio_add_scrub_sector(bio, ssector, fs_info->sectorsize);
 		bio->bi_iter.bi_sector = ssector->physical >> 9;
 		bio->bi_opf = REQ_OP_READ;
 
@@ -1653,8 +1699,6 @@ static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
 	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
 	const u32 sectorsize = fs_info->sectorsize;
 
-	BUG_ON(ssector_bad->page == NULL);
-	BUG_ON(ssector_good->page == NULL);
 	if (force_write || sblock_bad->header_error ||
 	    sblock_bad->checksum_error || ssector_bad->io_error) {
 		struct bio *bio;
@@ -1671,7 +1715,7 @@ static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
 		bio->bi_iter.bi_sector = ssector_bad->physical >> 9;
 		bio->bi_opf = REQ_OP_WRITE;
 
-		ret = bio_add_page(bio, ssector_good->page, sectorsize, 0);
+		ret = bio_add_scrub_sector(bio, ssector_good, sectorsize);
 		if (ret != sectorsize) {
 			bio_put(bio);
 			return -EIO;
@@ -1714,11 +1758,16 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock,
 					     int sector_num)
 {
+	const u32 sectorsize = sblock->sctx->fs_info->sectorsize;
 	struct scrub_sector *ssector = sblock->sectorv[sector_num];
 
-	BUG_ON(ssector->page == NULL);
-	if (ssector->io_error)
-		clear_page(page_address(ssector->page));
+	if (ssector->io_error) {
+		/* We can only clear the full page if one sector is one page */
+		if (sectorsize == PAGE_SIZE)
+			clear_page(page_address(scrub_sector_get_page(ssector)));
+		else
+			memset(scrub_sector_get_kaddr(ssector), 0, sectorsize);
+	}
 
 	return scrub_add_sector_to_wr_bio(sblock->sctx, ssector);
 }
@@ -1803,7 +1852,7 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
 		goto again;
 	}
 
-	ret = bio_add_page(sbio->bio, ssector->page, sectorsize, 0);
+	ret = bio_add_scrub_sector(sbio->bio, ssector, sectorsize);
 	if (ret != sectorsize) {
 		if (sbio->sector_count < 1) {
 			bio_put(sbio->bio);
@@ -1943,15 +1992,11 @@ static int scrub_checksum_data(struct scrub_block *sblock)
 	if (!ssector->have_csum)
 		return 0;
 
-	kaddr = page_address(ssector->page);
+	kaddr = scrub_sector_get_kaddr(ssector);
 
 	shash->tfm = fs_info->csum_shash;
 	crypto_shash_init(shash);
 
-	/*
-	 * In scrub_sectors() and scrub_sectors_for_parity() we ensure each ssector
-	 * only contains one sector of data.
-	 */
 	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
 
 	if (memcmp(csum, ssector->csum, fs_info->csum_size))
@@ -1984,7 +2029,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
 	ASSERT(sblock->sector_count == num_sectors);
 
 	ssector = sblock->sectorv[0];
-	kaddr = page_address(ssector->page);
+	kaddr = scrub_sector_get_kaddr(ssector);
 	h = (struct btrfs_header *)kaddr;
 	memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
 
@@ -2014,7 +2059,8 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
 			    sectorsize - BTRFS_CSUM_SIZE);
 
 	for (i = 1; i < num_sectors; i++) {
-		kaddr = page_address(sblock->sectorv[i]->page);
+		ssector = sblock->sectorv[i];
+		kaddr = scrub_sector_get_kaddr(ssector);
 		crypto_shash_update(shash, kaddr, sectorsize);
 	}
 
@@ -2039,7 +2085,7 @@ static int scrub_checksum_super(struct scrub_block *sblock)
 
 	BUG_ON(sblock->sector_count < 1);
 	ssector = sblock->sectorv[0];
-	kaddr = page_address(ssector->page);
+	kaddr = scrub_sector_get_kaddr(ssector);
 	s = (struct btrfs_super_block *)kaddr;
 
 	if (ssector->logical != btrfs_super_bytenr(s))
@@ -2106,11 +2152,8 @@ static void scrub_sector_get(struct scrub_sector *ssector)
 
 static void scrub_sector_put(struct scrub_sector *ssector)
 {
-	if (atomic_dec_and_test(&ssector->refs)) {
-		if (ssector->page)
-			__free_page(ssector->page);
+	if (atomic_dec_and_test(&ssector->refs))
 		kfree(ssector);
-	}
 }
 
 /*
@@ -2241,7 +2284,7 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
 	}
 
 	sbio->sectorv[sbio->sector_count] = ssector;
-	ret = bio_add_page(sbio->bio, ssector->page, sectorsize, 0);
+	ret = bio_add_scrub_sector(sbio->bio, ssector, sectorsize);
 	if (ret != sectorsize) {
 		if (sbio->sector_count < 1) {
 			bio_put(sbio->bio);
@@ -2357,7 +2400,10 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
 	for (i = 0; i < sblock->sector_count; i++) {
 		struct scrub_sector *ssector = sblock->sectorv[i];
 
-		raid56_add_scrub_pages(rbio, ssector->page, ssector->logical);
+		/* Subpage doesn't support RAID56 yet */
+		ASSERT(fs_info->sectorsize == PAGE_SIZE);
+		raid56_add_scrub_pages(rbio, scrub_sector_get_page(ssector),
+				       ssector->logical);
 	}
 
 	btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-03-02  8:44 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <cover.1646210538.git.wqu@suse.com>
2022-03-02  8:44 ` [PATCH 1/5] btrfs: scrub: use pointer array to replace @sblocks_for_recheck Qu Wenruo
2022-03-02  8:44 ` [PATCH 2/5] btrfs: extract the initialization of scrub_block into a helper function Qu Wenruo
2022-03-02  8:44 ` [PATCH 3/5] btrfs: extract the allocation and initialization of scrub_sector into a helper Qu Wenruo
2022-03-02  8:44 ` [PATCH 4/5] btrfs: scrub: introduce scrub_block::pages for more efficient memory usage for subpage Qu Wenruo
2022-03-02  8:44 ` [PATCH 5/5] btrfs: scrub: remove scrub_sector::page and use scrub_block::pages instead Qu Wenruo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).