All of lore.kernel.org
 help / color / mirror / Atom feed
From: Naohiro Aota <naohiro.aota@wdc.com>
To: linux-btrfs@vger.kernel.org, David Sterba <dsterba@suse.com>
Cc: Chris Mason <clm@fb.com>, Josef Bacik <josef@toxicpanda.com>,
	Hannes Reinecke <hare@suse.com>,
	linux-fsdevel@vger.kernel.org,
	Naohiro Aota <naohiro.aota@wdc.com>
Subject: [PATCH v7 31/39] btrfs: implement copying for ZONED device-replace
Date: Fri, 11 Sep 2020 21:32:51 +0900	[thread overview]
Message-ID: <20200911123259.3782926-32-naohiro.aota@wdc.com> (raw)
In-Reply-To: <20200911123259.3782926-1-naohiro.aota@wdc.com>

This is 3/4 patch to implement device-replace on ZONED mode.

This commit implement copying. So, it track the write pointer during device
replace process. Device-replace's copying is smart to copy only used
extents on source device, we have to fill the gap to honor the sequential
write rule in the target device.

Device-replace process in ZONED mode must copy or clone all the extents in
the source device exactly once.  So, we need to use to ensure allocations
started just before the dev-replace process to have their corresponding
extent information in the B-trees. finish_extent_writes_for_zoned()
implements that functionality, which basically is the removed code in the
commit 042528f8d840 ("Btrfs: fix block group remaining RO forever after
error during device replace").

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 fs/btrfs/scrub.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++
 fs/btrfs/zoned.c | 12 +++++++
 fs/btrfs/zoned.h |  7 ++++
 3 files changed, 105 insertions(+)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f7d750b32cfb..568d90214446 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -169,6 +169,7 @@ struct scrub_ctx {
 	int			pages_per_rd_bio;
 
 	int			is_dev_replace;
+	u64			write_pointer;
 
 	struct scrub_bio        *wr_curr_bio;
 	struct mutex            wr_lock;
@@ -1623,6 +1624,25 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
 	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
 }
 
+static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
+{
+	int ret = 0;
+	u64 length;
+
+	if (!btrfs_fs_incompat(sctx->fs_info, ZONED))
+		return 0;
+
+	if (sctx->write_pointer < physical) {
+		length = physical - sctx->write_pointer;
+
+		ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
+						sctx->write_pointer, length);
+		if (!ret)
+			sctx->write_pointer = physical;
+	}
+	return ret;
+}
+
 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 				    struct scrub_page *spage)
 {
@@ -1645,6 +1665,13 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 	if (sbio->page_count == 0) {
 		struct bio *bio;
 
+		ret = fill_writer_pointer_gap(sctx,
+					      spage->physical_for_dev_replace);
+		if (ret) {
+			mutex_unlock(&sctx->wr_lock);
+			return ret;
+		}
+
 		sbio->physical = spage->physical_for_dev_replace;
 		sbio->logical = spage->logical;
 		sbio->dev = sctx->wr_tgtdev;
@@ -1706,6 +1733,10 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
 	 * doubled the write performance on spinning disks when measured
 	 * with Linux 3.5 */
 	btrfsic_submit_bio(sbio->bio);
+
+	if (btrfs_fs_incompat(sctx->fs_info, ZONED))
+		sctx->write_pointer = sbio->physical +
+			sbio->page_count * PAGE_SIZE;
 }
 
 static void scrub_wr_bio_end_io(struct bio *bio)
@@ -2973,6 +3004,21 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
 	return ret < 0 ? ret : 0;
 }
 
+static void sync_replace_for_zoned(struct scrub_ctx *sctx)
+{
+	if (!btrfs_fs_incompat(sctx->fs_info, ZONED))
+		return;
+
+	sctx->flush_all_writes = true;
+	scrub_submit(sctx);
+	mutex_lock(&sctx->wr_lock);
+	scrub_wr_submit(sctx);
+	mutex_unlock(&sctx->wr_lock);
+
+	wait_event(sctx->list_wait,
+		   atomic_read(&sctx->bios_in_flight) == 0);
+}
+
 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 					   struct map_lookup *map,
 					   struct btrfs_device *scrub_dev,
@@ -3105,6 +3151,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 	 */
 	blk_start_plug(&plug);
 
+	if (sctx->is_dev_replace &&
+	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
+		mutex_lock(&sctx->wr_lock);
+		sctx->write_pointer = physical;
+		mutex_unlock(&sctx->wr_lock);
+		sctx->flush_all_writes = true;
+	}
+
 	/*
 	 * now find all extents for each stripe and scrub them
 	 */
@@ -3292,6 +3346,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 			if (ret)
 				goto out;
 
+			if (sctx->is_dev_replace)
+				sync_replace_for_zoned(sctx);
+
 			if (extent_logical + extent_len <
 			    key.objectid + bytes) {
 				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
@@ -3414,6 +3471,25 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
 	return ret;
 }
 
+static int finish_extent_writes_for_zoned(struct btrfs_root *root,
+					  struct btrfs_block_group *cache)
+{
+	struct btrfs_fs_info *fs_info = cache->fs_info;
+	struct btrfs_trans_handle *trans;
+
+	if (!btrfs_fs_incompat(fs_info, ZONED))
+		return 0;
+
+	btrfs_wait_block_group_reservations(cache);
+	btrfs_wait_nocow_writers(cache);
+	btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
+
+	trans = btrfs_join_transaction(root);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
+	return btrfs_commit_transaction(trans);
+}
+
 static noinline_for_stack
 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 			   struct btrfs_device *scrub_dev, u64 start, u64 end)
@@ -3569,6 +3645,16 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 		 * group is not RO.
 		 */
 		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
+		if (!ret && sctx->is_dev_replace) {
+			ret = finish_extent_writes_for_zoned(root, cache);
+			if (ret) {
+				btrfs_dec_block_group_ro(cache);
+				scrub_pause_off(fs_info);
+				btrfs_put_block_group(cache);
+				break;
+			}
+		}
+
 		if (ret == 0) {
 			ro_set = 1;
 		} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 2fe659bb0709..ac88d26f1119 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1196,3 +1196,15 @@ void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
 	ASSERT(cache->meta_write_pointer == eb->start + eb->len);
 	cache->meta_write_pointer = eb->start;
 }
+
+int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical,
+			      u64 length)
+{
+	if (!btrfs_dev_is_sequential(device, physical))
+		return -EOPNOTSUPP;
+
+	return blkdev_issue_zeroout(device->bdev,
+				    physical >> SECTOR_SHIFT,
+				    length >> SECTOR_SHIFT,
+				    GFP_NOFS, 0);
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 5d4b132a4d95..dea313a61a3e 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -58,6 +58,8 @@ bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
 				    struct btrfs_block_group **cache_ret);
 void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
 				     struct extent_buffer *eb);
+int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical,
+			      u64 length);
 #else /* CONFIG_BLK_DEV_ZONED */
 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
 				     struct blk_zone *zone)
@@ -147,6 +149,11 @@ btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
 				struct extent_buffer *eb)
 {
 }
+static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device,
+					    u64 physical, u64 length)
+{
+	return -EOPNOTSUPP;
+}
 #endif
 
 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
-- 
2.27.0


  parent reply	other threads:[~2020-09-11 17:42 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-11 12:32 [PATCH v7 00/39] btrfs: zoned block device support Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 01/39] btrfs: introduce ZONED feature flag Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 02/39] btrfs: Get zone information of zoned block devices Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 03/39] btrfs: Check and enable ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 04/39] btrfs: introduce max_zone_append_size Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 05/39] btrfs: disallow space_cache in ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 06/39] btrfs: disallow NODATACOW " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 07/39] btrfs: disable fallocate " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 08/39] btrfs: disallow mixed-bg " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 09/39] btrfs: disallow inode_cache " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 10/39] btrfs: implement log-structured superblock for " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 11/39] btrfs: implement zoned chunk allocator Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 12/39] btrfs: verify device extent is aligned to zone Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 13/39] btrfs: load zone's alloction offset Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 14/39] btrfs: emulate write pointer for conventional zones Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 15/39] btrfs: track unusable bytes for zones Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 16/39] btrfs: do sequential extent allocation in ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 17/39] btrfs: reset zones of unused block groups Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 18/39] btrfs: redirty released extent buffers in ZONED mode Naohiro Aota
2020-09-14 11:48   ` Johannes Thumshirn
2020-09-11 12:32 ` [PATCH v7 19/39] btrfs: limit bio size under max_zone_append_size Naohiro Aota
2020-09-11 14:17   ` Christoph Hellwig
2020-09-12  4:14     ` Naohiro Aota
2020-09-12  5:30       ` Christoph Hellwig
2020-09-17  5:32         ` Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 20/39] btrfs: limit ordered extent size to max_zone_append_size Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 21/39] btrfs: extend btrfs_rmap_block for specifying a device Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 22/39] btrfs: use ZONE_APPEND write for ZONED btrfs Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 23/39] btrfs: handle REQ_OP_ZONE_APPEND as writing Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 24/39] btrfs: enable zone append writing for direct IO Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 25/39] btrfs: introduce dedicated data write path for ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 26/39] btrfs: serialize meta IOs on " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 27/39] btrfs: wait existing extents before truncating Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 28/39] btrfs: avoid async metadata checksum on ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 29/39] btrfs: mark block groups to copy for device-replace Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 30/39] btrfs: implement cloning for ZONED device-replace Naohiro Aota
2020-09-11 12:32 ` Naohiro Aota [this message]
2020-09-11 12:32 ` [PATCH v7 32/39] btrfs: support dev-replace in ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 33/39] btrfs: enable relocation " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 34/39] btrfs: relocate block group to repair IO failure in ZONED Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 35/39] btrfs: split alloc_log_tree() Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 36/39] btrfs: extend zoned allocator to use dedicated tree-log block group Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 37/39] btrfs: serialize log transaction on ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 38/39] btrfs: reorder log node allocation Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 39/39] btrfs: enable to mount ZONED incompat flag Naohiro Aota
2020-09-15  8:09 ` [PATCH v7 00/39] btrfs: zoned block device support David Sterba
2020-09-16 17:42   ` Johannes Thumshirn
2020-09-16 19:46     ` David Sterba
2020-09-16 19:50       ` Johannes Thumshirn
2020-09-17  5:40     ` Naohiro Aota
2020-09-17  7:14       ` Johannes Thumshirn

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200911123259.3782926-32-naohiro.aota@wdc.com \
    --to=naohiro.aota@wdc.com \
    --cc=clm@fb.com \
    --cc=dsterba@suse.com \
    --cc=hare@suse.com \
    --cc=josef@toxicpanda.com \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.