linux-btrfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Qu Wenruo <wqu@suse.com>
To: linux-btrfs@vger.kernel.org
Subject: [PATCH 25/32] btrfs: scrub: distinguish scrub_page from regular page
Date: Tue,  3 Nov 2020 21:31:01 +0800	[thread overview]
Message-ID: <20201103133108.148112-26-wqu@suse.com> (raw)
In-Reply-To: <20201103133108.148112-1-wqu@suse.com>

There are several call sites where we declare something like
"struct scrub_page *page".

This is just asking for troubles when read the code, as we also have
scrub_page::page member.

To avoid confusion, use "spage" for scrub_page strcture pointers.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 102 +++++++++++++++++++++++------------------------
 1 file changed, 51 insertions(+), 51 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 58cd3278fbfe..42d1d5258e83 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -255,10 +255,10 @@ static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 static void scrub_put_ctx(struct scrub_ctx *sctx);
 
-static inline int scrub_is_page_on_raid56(struct scrub_page *page)
+static inline int scrub_is_page_on_raid56(struct scrub_page *spage)
 {
-	return page->recover &&
-	       (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
+	return spage->recover &&
+	       (spage->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
 }
 
 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
@@ -1090,11 +1090,11 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 	success = 1;
 	for (page_num = 0; page_num < sblock_bad->page_count;
 	     page_num++) {
-		struct scrub_page *page_bad = sblock_bad->pagev[page_num];
+		struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
 		struct scrub_block *sblock_other = NULL;
 
 		/* skip no-io-error page in scrub */
-		if (!page_bad->io_error && !sctx->is_dev_replace)
+		if (!spage_bad->io_error && !sctx->is_dev_replace)
 			continue;
 
 		if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
@@ -1106,7 +1106,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 			 * sblock_for_recheck array to target device.
 			 */
 			sblock_other = NULL;
-		} else if (page_bad->io_error) {
+		} else if (spage_bad->io_error) {
 			/* try to find no-io-error page in mirrors */
 			for (mirror_index = 0;
 			     mirror_index < BTRFS_MAX_MIRRORS &&
@@ -1145,7 +1145,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 							       sblock_other,
 							       page_num, 0);
 			if (0 == ret)
-				page_bad->io_error = 0;
+				spage_bad->io_error = 0;
 			else
 				success = 0;
 		}
@@ -1323,13 +1323,13 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 		for (mirror_index = 0; mirror_index < nmirrors;
 		     mirror_index++) {
 			struct scrub_block *sblock;
-			struct scrub_page *page;
+			struct scrub_page *spage;
 
 			sblock = sblocks_for_recheck + mirror_index;
 			sblock->sctx = sctx;
 
-			page = kzalloc(sizeof(*page), GFP_NOFS);
-			if (!page) {
+			spage = kzalloc(sizeof(*spage), GFP_NOFS);
+			if (!spage) {
 leave_nomem:
 				spin_lock(&sctx->stat_lock);
 				sctx->stat.malloc_errors++;
@@ -1337,15 +1337,15 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 				scrub_put_recover(fs_info, recover);
 				return -ENOMEM;
 			}
-			scrub_page_get(page);
-			sblock->pagev[page_index] = page;
-			page->sblock = sblock;
-			page->flags = flags;
-			page->generation = generation;
-			page->logical = logical;
-			page->have_csum = have_csum;
+			scrub_page_get(spage);
+			sblock->pagev[page_index] = spage;
+			spage->sblock = sblock;
+			spage->flags = flags;
+			spage->generation = generation;
+			spage->logical = logical;
+			spage->have_csum = have_csum;
 			if (have_csum)
-				memcpy(page->csum,
+				memcpy(spage->csum,
 				       original_sblock->pagev[0]->csum,
 				       sctx->fs_info->csum_size);
 
@@ -1358,23 +1358,23 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 						      mirror_index,
 						      &stripe_index,
 						      &stripe_offset);
-			page->physical = bbio->stripes[stripe_index].physical +
+			spage->physical = bbio->stripes[stripe_index].physical +
 					 stripe_offset;
-			page->dev = bbio->stripes[stripe_index].dev;
+			spage->dev = bbio->stripes[stripe_index].dev;
 
 			BUG_ON(page_index >= original_sblock->page_count);
-			page->physical_for_dev_replace =
+			spage->physical_for_dev_replace =
 				original_sblock->pagev[page_index]->
 				physical_for_dev_replace;
 			/* for missing devices, dev->bdev is NULL */
-			page->mirror_num = mirror_index + 1;
+			spage->mirror_num = mirror_index + 1;
 			sblock->page_count++;
-			page->page = alloc_page(GFP_NOFS);
-			if (!page->page)
+			spage->page = alloc_page(GFP_NOFS);
+			if (!spage->page)
 				goto leave_nomem;
 
 			scrub_get_recover(recover);
-			page->recover = recover;
+			spage->recover = recover;
 		}
 		scrub_put_recover(fs_info, recover);
 		length -= sublen;
@@ -1392,19 +1392,19 @@ static void scrub_bio_wait_endio(struct bio *bio)
 
 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
 					struct bio *bio,
-					struct scrub_page *page)
+					struct scrub_page *spage)
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	int ret;
 	int mirror_num;
 
-	bio->bi_iter.bi_sector = page->logical >> 9;
+	bio->bi_iter.bi_sector = spage->logical >> 9;
 	bio->bi_private = &done;
 	bio->bi_end_io = scrub_bio_wait_endio;
 
-	mirror_num = page->sblock->pagev[0]->mirror_num;
-	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
-				    page->recover->map_length,
+	mirror_num = spage->sblock->pagev[0]->mirror_num;
+	ret = raid56_parity_recover(fs_info, bio, spage->recover->bbio,
+				    spage->recover->map_length,
 				    mirror_num, 0);
 	if (ret)
 		return ret;
@@ -1429,10 +1429,10 @@ static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
 	bio_set_dev(bio, first_page->dev->bdev);
 
 	for (page_num = 0; page_num < sblock->page_count; page_num++) {
-		struct scrub_page *page = sblock->pagev[page_num];
+		struct scrub_page *spage = sblock->pagev[page_num];
 
-		WARN_ON(!page->page);
-		bio_add_page(bio, page->page, PAGE_SIZE, 0);
+		WARN_ON(!spage->page);
+		bio_add_page(bio, spage->page, PAGE_SIZE, 0);
 	}
 
 	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
@@ -1473,24 +1473,24 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
 
 	for (page_num = 0; page_num < sblock->page_count; page_num++) {
 		struct bio *bio;
-		struct scrub_page *page = sblock->pagev[page_num];
+		struct scrub_page *spage = sblock->pagev[page_num];
 
-		if (page->dev->bdev == NULL) {
-			page->io_error = 1;
+		if (spage->dev->bdev == NULL) {
+			spage->io_error = 1;
 			sblock->no_io_error_seen = 0;
 			continue;
 		}
 
-		WARN_ON(!page->page);
+		WARN_ON(!spage->page);
 		bio = btrfs_io_bio_alloc(1);
-		bio_set_dev(bio, page->dev->bdev);
+		bio_set_dev(bio, spage->dev->bdev);
 
-		bio_add_page(bio, page->page, PAGE_SIZE, 0);
-		bio->bi_iter.bi_sector = page->physical >> 9;
+		bio_add_page(bio, spage->page, PAGE_SIZE, 0);
+		bio->bi_iter.bi_sector = spage->physical >> 9;
 		bio->bi_opf = REQ_OP_READ;
 
 		if (btrfsic_submit_bio_wait(bio)) {
-			page->io_error = 1;
+			spage->io_error = 1;
 			sblock->no_io_error_seen = 0;
 		}
 
@@ -1546,36 +1546,36 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 					    struct scrub_block *sblock_good,
 					    int page_num, int force_write)
 {
-	struct scrub_page *page_bad = sblock_bad->pagev[page_num];
-	struct scrub_page *page_good = sblock_good->pagev[page_num];
+	struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
+	struct scrub_page *spage_good = sblock_good->pagev[page_num];
 	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
 
-	BUG_ON(page_bad->page == NULL);
-	BUG_ON(page_good->page == NULL);
+	BUG_ON(spage_bad->page == NULL);
+	BUG_ON(spage_good->page == NULL);
 	if (force_write || sblock_bad->header_error ||
-	    sblock_bad->checksum_error || page_bad->io_error) {
+	    sblock_bad->checksum_error || spage_bad->io_error) {
 		struct bio *bio;
 		int ret;
 
-		if (!page_bad->dev->bdev) {
+		if (!spage_bad->dev->bdev) {
 			btrfs_warn_rl(fs_info,
 				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
 			return -EIO;
 		}
 
 		bio = btrfs_io_bio_alloc(1);
-		bio_set_dev(bio, page_bad->dev->bdev);
-		bio->bi_iter.bi_sector = page_bad->physical >> 9;
+		bio_set_dev(bio, spage_bad->dev->bdev);
+		bio->bi_iter.bi_sector = spage_bad->physical >> 9;
 		bio->bi_opf = REQ_OP_WRITE;
 
-		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
+		ret = bio_add_page(bio, spage_good->page, PAGE_SIZE, 0);
 		if (PAGE_SIZE != ret) {
 			bio_put(bio);
 			return -EIO;
 		}
 
 		if (btrfsic_submit_bio_wait(bio)) {
-			btrfs_dev_stat_inc_and_print(page_bad->dev,
+			btrfs_dev_stat_inc_and_print(spage_bad->dev,
 				BTRFS_DEV_STAT_WRITE_ERRS);
 			atomic64_inc(&fs_info->dev_replace.num_write_errors);
 			bio_put(bio);
-- 
2.29.2


  parent reply	other threads:[~2020-11-03 13:32 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-03 13:30 [PATCH 00/32] btrfs: preparation patches for subpage support Qu Wenruo
2020-11-03 13:30 ` [PATCH 01/32] btrfs: extent_io: remove the extent_start/extent_len for end_bio_extent_readpage() Qu Wenruo
2020-11-05  9:46   ` Nikolay Borisov
2020-11-05 10:15     ` Qu Wenruo
2020-11-05 10:32       ` Nikolay Borisov
2020-11-06  2:01         ` Qu Wenruo
2020-11-06  7:19           ` Qu Wenruo
2020-11-05 19:40   ` Josef Bacik
2020-11-06  1:52     ` Qu Wenruo
2020-11-03 13:30 ` [PATCH 02/32] btrfs: extent_io: integrate page status update into endio_readpage_release_extent() Qu Wenruo
2020-11-05 10:26   ` Nikolay Borisov
2020-11-05 11:15     ` Qu Wenruo
2020-11-05 10:35   ` Nikolay Borisov
2020-11-05 11:25     ` Qu Wenruo
2020-11-05 19:34   ` Josef Bacik
2020-11-03 13:30 ` [PATCH 03/32] btrfs: extent_io: add lockdep_assert_held() for attach_extent_buffer_page() Qu Wenruo
2020-11-03 13:30 ` [PATCH 04/32] btrfs: extent_io: extract the btree page submission code into its own helper function Qu Wenruo
2020-11-05 10:47   ` Nikolay Borisov
2020-11-06 18:11     ` David Sterba
2020-11-03 13:30 ` [PATCH 05/32] btrfs: extent-io-tests: remove invalid tests Qu Wenruo
2020-11-03 13:30 ` [PATCH 06/32] btrfs: extent_io: calculate inline extent buffer page size based on page size Qu Wenruo
2020-11-05 12:54   ` Nikolay Borisov
2020-11-03 13:30 ` [PATCH 07/32] btrfs: extent_io: make btrfs_fs_info::buffer_radix to take sector size devided values Qu Wenruo
2020-11-03 13:30 ` [PATCH 08/32] btrfs: extent_io: sink less common parameters for __set_extent_bit() Qu Wenruo
2020-11-05 13:35   ` Nikolay Borisov
2020-11-05 13:55     ` Qu Wenruo
2020-11-03 13:30 ` [PATCH 09/32] btrfs: extent_io: sink less common parameters for __clear_extent_bit() Qu Wenruo
2020-11-03 13:30 ` [PATCH 10/32] btrfs: disk_io: grab fs_info from extent_buffer::fs_info directly for btrfs_mark_buffer_dirty() Qu Wenruo
2020-11-05 13:45   ` Nikolay Borisov
2020-11-05 13:49   ` Nikolay Borisov
2020-11-03 13:30 ` [PATCH 11/32] btrfs: disk-io: make csum_tree_block() handle sectorsize smaller than page size Qu Wenruo
2020-11-06 18:58   ` David Sterba
2020-11-07  0:04     ` Qu Wenruo
2020-11-10 14:33       ` David Sterba
2020-11-11  0:08         ` Qu Wenruo
2020-11-03 13:30 ` [PATCH 12/32] btrfs: disk-io: extract the extent buffer verification from btrfs_validate_metadata_buffer() Qu Wenruo
2020-11-05 13:57   ` Nikolay Borisov
2020-11-06 19:03     ` David Sterba
2020-11-09  6:44       ` Qu Wenruo
2020-11-10 14:37         ` David Sterba
2020-11-03 13:30 ` [PATCH 13/32] btrfs: disk-io: accept bvec directly for csum_dirty_buffer() Qu Wenruo
2020-11-05 14:13   ` Nikolay Borisov
2020-11-03 13:30 ` [PATCH 14/32] btrfs: inode: make btrfs_readpage_end_io_hook() follow sector size Qu Wenruo
2020-11-05 14:28   ` Nikolay Borisov
2020-11-06 19:16     ` David Sterba
2020-11-06 19:20       ` David Sterba
2020-11-06 19:28   ` David Sterba
2020-11-03 13:30 ` [PATCH 15/32] btrfs: introduce a helper to determine if the sectorsize is smaller than PAGE_SIZE Qu Wenruo
2020-11-05 15:01   ` Nikolay Borisov
2020-11-05 22:52     ` Qu Wenruo
2020-11-06 17:28       ` David Sterba
2020-11-07  0:00         ` Qu Wenruo
2020-11-10 14:53           ` David Sterba
2020-11-11  1:34             ` Qu Wenruo
2020-11-11  2:21               ` Qu Wenruo
2020-11-03 13:30 ` [PATCH 16/32] btrfs: extent_io: allow find_first_extent_bit() to find a range with exact bits match Qu Wenruo
2020-11-05 15:03   ` Nikolay Borisov
2020-11-05 22:55     ` Qu Wenruo
2020-11-03 13:30 ` [PATCH 17/32] btrfs: extent_io: don't allow tree block to cross page boundary for subpage support Qu Wenruo
2020-11-06 11:54   ` Nikolay Borisov
2020-11-06 12:03     ` Nikolay Borisov
2020-11-06 13:25     ` Qu Wenruo
2020-11-06 14:04       ` Nikolay Borisov
2020-11-06 23:56         ` Qu Wenruo
2020-11-03 13:30 ` [PATCH 18/32] btrfs: extent_io: update num_extent_pages() to support subpage sized extent buffer Qu Wenruo
2020-11-06 12:09   ` Nikolay Borisov
2020-11-03 13:30 ` [PATCH 19/32] btrfs: handle sectorsize < PAGE_SIZE case for extent buffer accessors Qu Wenruo
2020-11-06 12:51   ` Nikolay Borisov
2020-11-09  5:49     ` Qu Wenruo
2020-11-03 13:30 ` [PATCH 20/32] btrfs: disk-io: only clear EXTENT_LOCK bit for extent_invalidatepage() Qu Wenruo
2020-11-06 13:17   ` Nikolay Borisov
2020-11-03 13:30 ` [PATCH 21/32] btrfs: extent-io: make type of extent_state::state to be at least 32 bits Qu Wenruo
2020-11-06 13:38   ` Nikolay Borisov
2020-11-03 13:30 ` [PATCH 22/32] btrfs: file-item: use nodesize to determine whether we need readahead for btrfs_lookup_bio_sums() Qu Wenruo
2020-11-06 13:55   ` Nikolay Borisov
2020-11-03 13:30 ` [PATCH 23/32] btrfs: file-item: remove the btrfs_find_ordered_sum() call in btrfs_lookup_bio_sums() Qu Wenruo
2020-11-06 14:28   ` Nikolay Borisov
2020-11-03 13:31 ` [PATCH 24/32] btrfs: file-item: refactor btrfs_lookup_bio_sums() to handle out-of-order bvecs Qu Wenruo
2020-11-06 15:22   ` Nikolay Borisov
2020-11-03 13:31 ` Qu Wenruo [this message]
2020-11-03 13:31 ` [PATCH 26/32] btrfs: scrub: remove the @force parameter of scrub_pages() Qu Wenruo
2020-11-03 13:31 ` [PATCH 27/32] btrfs: scrub: use flexible array for scrub_page::csums Qu Wenruo
2020-11-09 17:44   ` David Sterba
2020-11-10  0:53     ` Qu Wenruo
2020-11-10 14:22       ` David Sterba
2020-11-03 13:31 ` [PATCH 28/32] btrfs: scrub: refactor scrub_find_csum() Qu Wenruo
2020-11-03 13:31 ` [PATCH 29/32] btrfs: scrub: introduce scrub_page::page_len for subpage support Qu Wenruo
2020-11-09 18:17   ` David Sterba
2020-11-10  0:54     ` Qu Wenruo
2020-11-09 18:25   ` David Sterba
2020-11-10  0:56     ` Qu Wenruo
2020-11-10 14:27       ` David Sterba
2020-11-03 13:31 ` [PATCH 30/32] btrfs: scrub: always allocate one full page for one sector for RAID56 Qu Wenruo
2020-11-03 13:31 ` [PATCH 31/32] btrfs: scrub: support subpage tree block scrub Qu Wenruo
2020-11-09 18:31   ` David Sterba
2020-11-03 13:31 ` [PATCH 32/32] btrfs: scrub: support subpage data scrub Qu Wenruo
2020-11-05 19:28 ` [PATCH 00/32] btrfs: preparation patches for subpage support Josef Bacik
2020-11-06  0:02   ` Qu Wenruo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201103133108.148112-26-wqu@suse.com \
    --to=wqu@suse.com \
    --cc=linux-btrfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).