All of lore.kernel.org
 help / color / mirror / Atom feed
From: Naohiro Aota <naohiro.aota@wdc.com>
To: linux-btrfs@vger.kernel.org, dsterba@suse.com
Cc: hare@suse.com, linux-fsdevel@vger.kernel.org,
	Jens Axboe <axboe@kernel.dk>,
	Christoph Hellwig <hch@infradead.org>,
	"Darrick J. Wong" <darrick.wong@oracle.com>,
	Naohiro Aota <naohiro.aota@wdc.com>
Subject: [PATCH v11 12/40] btrfs: calculate allocation offset for conventional zones
Date: Tue, 22 Dec 2020 12:49:05 +0900	[thread overview]
Message-ID: <5101ed472a046b3fc691aeb90f84bb55790d4fc0.1608608848.git.naohiro.aota@wdc.com> (raw)
In-Reply-To: <06add214bc16ef08214de1594ecdfcc4cdcdbd78.1608608848.git.naohiro.aota@wdc.com>

Conventional zones do not have a write pointer, so we cannot use it to
determine the allocation offset if a block group contains a conventional
zone.

But instead, we can consider the end of the last allocated extent in the
block group as an allocation offset.

For new block group, we cannot calculate the allocation offset by
consulting the extent tree, because it can cause deadlock by taking extent
buffer lock after chunk mutex (which is already taken in
btrfs_make_block_group()). Since it is a new block group, we can simply set
the allocation offset to 0, anyway.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 fs/btrfs/block-group.c |  4 +-
 fs/btrfs/zoned.c       | 93 +++++++++++++++++++++++++++++++++++++++---
 fs/btrfs/zoned.h       |  4 +-
 3 files changed, 92 insertions(+), 9 deletions(-)

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 8c029e45a573..9eb1e3aa5e0f 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1867,7 +1867,7 @@ static int read_one_block_group(struct btrfs_fs_info *info,
 			goto error;
 	}
 
-	ret = btrfs_load_block_group_zone_info(cache);
+	ret = btrfs_load_block_group_zone_info(cache, false);
 	if (ret) {
 		btrfs_err(info, "zoned: failed to load zone info of bg %llu",
 			  cache->start);
@@ -2150,7 +2150,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
 		cache->needs_free_space = 1;
 
-	ret = btrfs_load_block_group_zone_info(cache);
+	ret = btrfs_load_block_group_zone_info(cache, true);
 	if (ret) {
 		btrfs_put_block_group(cache);
 		return ret;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index adca89a5ebc1..ceb6d0d7d33b 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -897,7 +897,62 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
 	return 0;
 }
 
-int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
+static int calculate_alloc_pointer(struct btrfs_block_group *cache,
+				   u64 *offset_ret)
+{
+	struct btrfs_fs_info *fs_info = cache->fs_info;
+	struct btrfs_root *root = fs_info->extent_root;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	int ret;
+	u64 length;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	key.objectid = cache->start + cache->length;
+	key.type = 0;
+	key.offset = 0;
+
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	/* We should not find the exact match */
+	if (ret <= 0) {
+		ret = -EUCLEAN;
+		goto out;
+	}
+
+	ret = btrfs_previous_extent_item(root, path, cache->start);
+	if (ret) {
+		if (ret == 1) {
+			ret = 0;
+			*offset_ret = 0;
+		}
+		goto out;
+	}
+
+	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
+
+	if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
+		length = found_key.offset;
+	else
+		length = fs_info->nodesize;
+
+	if (!(found_key.objectid >= cache->start &&
+	       found_key.objectid + length <= cache->start + cache->length)) {
+		ret = -EUCLEAN;
+		goto out;
+	}
+	*offset_ret = found_key.objectid + length - cache->start;
+	ret = 0;
+
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
 {
 	struct btrfs_fs_info *fs_info = cache->fs_info;
 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
@@ -911,6 +966,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
 	int i;
 	unsigned int nofs_flag;
 	u64 *alloc_offsets = NULL;
+	u64 last_alloc = 0;
 	u32 num_sequential = 0, num_conventional = 0;
 
 	if (!btrfs_is_zoned(fs_info))
@@ -1013,11 +1069,30 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
 
 	if (num_conventional > 0) {
 		/*
-		 * Since conventional zones do not have a write pointer, we
-		 * cannot determine alloc_offset from the pointer
+		 * Avoid calling calculate_alloc_pointer() for new BG. It
+		 * is no use for new BG. It must be always 0.
+		 *
+		 * Also, we have a lock chain of extent buffer lock ->
+		 * chunk mutex.  For new BG, this function is called from
+		 * btrfs_make_block_group() which is already taking the
+		 * chunk mutex. Thus, we cannot call
+		 * calculate_alloc_pointer() which takes extent buffer
+		 * locks to avoid deadlock.
 		 */
-		ret = -EINVAL;
-		goto out;
+		if (new) {
+			cache->alloc_offset = 0;
+			goto out;
+		}
+		ret = calculate_alloc_pointer(cache, &last_alloc);
+		if (ret || map->num_stripes == num_conventional) {
+			if (!ret)
+				cache->alloc_offset = last_alloc;
+			else
+				btrfs_err(fs_info,
+			"zoned: failed to determine allocation offset of bg %llu",
+					  cache->start);
+			goto out;
+		}
 	}
 
 	switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
@@ -1039,6 +1114,14 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
 	}
 
 out:
+	/* An extent is allocated after the write pointer */
+	if (num_conventional && last_alloc > cache->alloc_offset) {
+		btrfs_err(fs_info,
+			  "zoned: got wrong write pointer in BG %llu: %llu > %llu",
+			  logical, last_alloc, cache->alloc_offset);
+		ret = -EIO;
+	}
+
 	kfree(alloc_offsets);
 	free_extent_map(em);
 
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 491b98c97f48..b53403ba0b10 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -41,7 +41,7 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
 			    u64 length, u64 *bytes);
 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size);
-int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache);
+int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new);
 #else /* CONFIG_BLK_DEV_ZONED */
 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
 				     struct blk_zone *zone)
@@ -119,7 +119,7 @@ static inline int btrfs_ensure_empty_zones(struct btrfs_device *device,
 }
 
 static inline int btrfs_load_block_group_zone_info(
-	struct btrfs_block_group *cache)
+	struct btrfs_block_group *cache, bool new)
 {
 	return 0;
 }
-- 
2.27.0


  parent reply	other threads:[~2020-12-22  3:53 UTC|newest]

Thread overview: 86+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-22  3:48 [PATCH v11 00/40] btrfs: zoned block device support Naohiro Aota
2020-12-22  3:48 ` [PATCH v11 01/40] block: add bio_add_zone_append_page Naohiro Aota
2020-12-22  3:48   ` [PATCH v11 02/40] iomap: support REQ_OP_ZONE_APPEND Naohiro Aota
2021-01-04 22:30     ` Darrick J. Wong
2021-01-13  9:34       ` Johannes Thumshirn
2020-12-22  3:48   ` [PATCH v11 03/40] btrfs: defer loading zone info after opening trees Naohiro Aota
2021-01-11 19:16     ` Josef Bacik
2020-12-22  3:48   ` [PATCH v11 04/40] btrfs: change superblock location on conventional zone Naohiro Aota
2021-01-11 19:47     ` Josef Bacik
2021-01-14 15:10       ` Naohiro Aota
2020-12-22  3:48   ` [PATCH v11 05/40] btrfs: release path before calling into btrfs_load_block_group_zone_info Naohiro Aota
2021-01-11 20:01     ` Josef Bacik
2021-01-12  8:05       ` Johannes Thumshirn
2020-12-22  3:48   ` [PATCH v11 06/40] btrfs: do not load fs_info->zoned from incompat flag Naohiro Aota
2021-01-11 20:08     ` Josef Bacik
2021-01-12  8:00       ` Johannes Thumshirn
2020-12-22  3:49   ` [PATCH v11 07/40] btrfs: disallow fitrim in ZONED mode Naohiro Aota
2021-01-11 20:12     ` Josef Bacik
2021-01-12 10:19       ` Johannes Thumshirn
2020-12-22  3:49   ` [PATCH v11 08/40] btrfs: emulated zoned mode on non-zoned devices Naohiro Aota
2021-01-11 20:54     ` Josef Bacik
2021-01-13 17:58     ` David Sterba
2021-01-13 18:23       ` Johannes Thumshirn
2020-12-22  3:49   ` [PATCH v11 09/40] btrfs: implement zoned chunk allocator Naohiro Aota
2021-01-11 21:24     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 10/40] btrfs: verify device extent is aligned to zone Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 11/40] btrfs: load zone's allocation offset Naohiro Aota
2020-12-22  3:49   ` Naohiro Aota [this message]
2021-01-12 15:12     ` [PATCH v11 12/40] btrfs: calculate allocation offset for conventional zones Josef Bacik
2020-12-22  3:49   ` [PATCH v11 13/40] btrfs: track unusable bytes for zones Naohiro Aota
2021-01-12 15:45     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 14/40] btrfs: do sequential extent allocation in ZONED mode Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 15/40] btrfs: redirty released extent buffers " Naohiro Aota
2021-01-12 15:51     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 16/40] btrfs: advance allocation pointer after tree log node Naohiro Aota
2021-01-12 15:52     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 17/40] btrfs: enable to mount ZONED incompat flag Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 18/40] btrfs: reset zones of unused block groups Naohiro Aota
2021-01-12 15:54     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 19/40] btrfs: extract page adding function Naohiro Aota
2021-01-11 16:22     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 20/40] btrfs: use bio_add_zone_append_page for zoned btrfs Naohiro Aota
2021-01-12 15:55     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 21/40] btrfs: handle REQ_OP_ZONE_APPEND as writing Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 22/40] btrfs: split ordered extent when bio is sent Naohiro Aota
2021-01-12 15:59     ` Josef Bacik
2021-01-13 10:05       ` Johannes Thumshirn
2021-01-15  7:08     ` Su Yue
2020-12-22  3:49   ` [PATCH v11 23/40] btrfs: extend btrfs_rmap_block for specifying a device Naohiro Aota
2021-01-12 16:00     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 24/40] btrfs: cache if block-group is on a sequential zone Naohiro Aota
2021-01-12 16:01     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 25/40] btrfs: use ZONE_APPEND write for ZONED btrfs Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 26/40] btrfs: enable zone append writing for direct IO Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 27/40] btrfs: introduce dedicated data write path for ZONED mode Naohiro Aota
2021-01-12 19:24     ` Josef Bacik
2021-01-13 10:41       ` Naohiro Aota
2021-01-12 19:28     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 28/40] btrfs: serialize meta IOs on " Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 29/40] btrfs: wait existing extents before truncating Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 30/40] btrfs: avoid async metadata checksum on ZONED mode Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 31/40] btrfs: mark block groups to copy for device-replace Naohiro Aota
2021-01-12 19:30     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 32/40] btrfs: implement cloning for ZONED device-replace Naohiro Aota
2021-01-12 19:36     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 33/40] btrfs: implement copying " Naohiro Aota
2021-01-12 19:37     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 34/40] btrfs: support dev-replace in ZONED mode Naohiro Aota
2021-01-12 19:37     ` Josef Bacik
2021-01-12 19:40     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 35/40] btrfs: enable relocation " Naohiro Aota
2021-01-12 19:43     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 36/40] btrfs: relocate block group to repair IO failure in ZONED Naohiro Aota
2020-12-22  3:49   ` [PATCH v11 37/40] btrfs: split alloc_log_tree() Naohiro Aota
2021-01-12 19:44     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 38/40] btrfs: extend zoned allocator to use dedicated tree-log block group Naohiro Aota
2021-01-12 19:48     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 39/40] btrfs: serialize log transaction on ZONED mode Naohiro Aota
2021-01-12 19:50     ` Josef Bacik
2020-12-22  3:49   ` [PATCH v11 40/40] btrfs: reorder log node allocation Naohiro Aota
2020-12-22 13:35   ` [PATCH v11 01/40] block: add bio_add_zone_append_page Christoph Hellwig
2021-01-12 13:48     ` Johannes Thumshirn
2020-12-22 13:38 ` [PATCH v11 00/40] btrfs: zoned block device support Christoph Hellwig
2021-01-11 10:17   ` Johannes Thumshirn
2021-01-12 10:23     ` hch
2021-01-12 10:26       ` Johannes Thumshirn

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5101ed472a046b3fc691aeb90f84bb55790d4fc0.1608608848.git.naohiro.aota@wdc.com \
    --to=naohiro.aota@wdc.com \
    --cc=axboe@kernel.dk \
    --cc=darrick.wong@oracle.com \
    --cc=dsterba@suse.com \
    --cc=hare@suse.com \
    --cc=hch@infradead.org \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.