All of lore.kernel.org
 help / color / mirror / Atom feed
From: Qu Wenruo <wqu@suse.com>
To: linux-btrfs@vger.kernel.org
Subject: [PATCH 04/16] btrfs: extract btree inode init code into its own init/exit helpers
Date: Thu, 22 Sep 2022 08:06:21 +0800	[thread overview]
Message-ID: <0e9f2fdef904a97e68eb2665e803abff03284b71.1663804335.git.wqu@suse.com> (raw)
In-Reply-To: <cover.1663804335.git.wqu@suse.com>

Just like how we handle init_btrfs_fs(), here we also use an array to
handle the open_ctree() sequence.

This patch will do the first step by introducing the arrays, and put
btree_inode init/exit code into two helpers:

- open_ctree_btree_inode_init
- open_ctree_btree_inode_exit

[EXPOSED LABEL MISMATCH]
- Bad btrfs_mapping_tree_free() call
  Currently in fail_alloc tag, we call not only iput() on btree_inode, but
  also free the fs_info->mapping_tree, which is not correct.

  As the first time we touch mapping_tree is at btrfs_read_sys_array(),
  thus the btrfs_mapping_tree_free() call is already a label mismatch.

  This will be addressed when we refactor the chunk tree init code.

- Bad invalidate_inode_pages2() call
  After initiliazing the btree inode, we should invalidate all the page
  cache for metadata before we put the btree inode.

  But the old code is calling invalidate_inode_pages2() before stopping
  all workers.

  This is addressed by this patch as we're really the last exit function
  to be executed, thus we're safe to call invalidate_inode_pages2() then
  iput().

[SPECIAL HANDLING]
After init_mount_fs_info() if we hit some error, we don't need to undo
the work of init_mount_fs_info(), as caller will call btrfs_free_fs_info()
to free it anyway.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/disk-io.c | 116 ++++++++++++++++++++++++++++++++-------------
 1 file changed, 82 insertions(+), 34 deletions(-)

diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5fbf73daa388..79507c384146 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3290,6 +3290,69 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
 	return ret;
 }
 
+static int open_ctree_btree_inode_init(struct btrfs_fs_info *fs_info)
+{
+	int ret;
+
+	/*
+	 * The members initialized inside init_mount_fs_info() will be
+	 * handled in btrfs_free_fs_info() by the caller.
+	 * Thus we don't need to handle its error here.
+	 */
+	ret = init_mount_fs_info(fs_info);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * These need to be init'ed before we start creating inodes and such.
+	 *
+	 * And this 
+	 */
+	fs_info->tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
+					      GFP_KERNEL);
+	fs_info->chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
+					       GFP_KERNEL);
+	if (!fs_info->tree_root || !fs_info->chunk_root)
+		goto enomem;
+	fs_info->btree_inode = new_inode(fs_info->sb);
+	if (!fs_info->btree_inode)
+		goto enomem;
+
+	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
+	btrfs_init_btree_inode(fs_info);
+	invalidate_bdev(fs_info->fs_devices->latest_dev->bdev);
+	return 0;
+enomem:
+	btrfs_put_root(fs_info->tree_root);
+	btrfs_put_root(fs_info->chunk_root);
+	fs_info->tree_root = NULL;
+	fs_info->chunk_root = NULL;
+	return -ENOMEM;
+}
+
+static void open_ctree_btree_inode_exit(struct btrfs_fs_info *fs_info)
+{
+	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
+	iput(fs_info->btree_inode);
+	btrfs_put_root(fs_info->tree_root);
+	btrfs_put_root(fs_info->chunk_root);
+	fs_info->tree_root = NULL;
+	fs_info->chunk_root = NULL;
+}
+
+struct init_sequence {
+	int (*init_func)(struct btrfs_fs_info *fs_info);
+	void (*exit_func)(struct btrfs_fs_info *fs_info);
+};
+
+static const struct init_sequence open_ctree_seq[] = {
+	{
+		.init_func = open_ctree_btree_inode_init,
+		.exit_func = open_ctree_btree_inode_exit,
+	}
+};
+
+
 int __cold open_ctree(struct super_block *sb, char *options)
 {
 	u32 sectorsize;
@@ -3298,47 +3361,26 @@ int __cold open_ctree(struct super_block *sb, char *options)
 	u64 generation;
 	u64 features;
 	u16 csum_type;
+	bool open_ctree_res[ARRAY_SIZE(open_ctree_seq)] = {0};
 	struct btrfs_super_block *disk_super;
 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
-	struct btrfs_root *tree_root;
-	struct btrfs_root *chunk_root;
 	int ret;
 	int err = -EINVAL;
 	int level;
+	int i;
 
 	fs_info->sb = sb;
 
 	/* Caller should have already initialized fs_info->fs_devices. */
 	ASSERT(fs_info->fs_devices);
 
-	ret = init_mount_fs_info(fs_info);
-	if (ret) {
-		err = ret;
-		goto fail;
-	}
-
-	/* These need to be init'ed before we start creating inodes and such. */
-	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
-				     GFP_KERNEL);
-	fs_info->tree_root = tree_root;
-	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
-				      GFP_KERNEL);
-	fs_info->chunk_root = chunk_root;
-	if (!tree_root || !chunk_root) {
-		err = -ENOMEM;
-		goto fail;
-	}
-
-	fs_info->btree_inode = new_inode(fs_info->sb);
-	if (!fs_info->btree_inode) {
-		err = -ENOMEM;
-		goto fail;
+	for (i = 0; i < ARRAY_SIZE(open_ctree_seq); i++) {
+		ret = open_ctree_seq[i].init_func(fs_info);
+		if (ret < 0)
+			goto fail;
+		open_ctree_res[i] = true;
 	}
-	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
-	btrfs_init_btree_inode(fs_info);
-
-	invalidate_bdev(fs_devices->latest_dev->bdev);
 
 	/*
 	 * Read super block and check the signature bytes only
@@ -3557,14 +3599,15 @@ int __cold open_ctree(struct super_block *sb, char *options)
 
 	generation = btrfs_super_chunk_root_generation(disk_super);
 	level = btrfs_super_chunk_root_level(disk_super);
-	ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
+	ret = load_super_root(fs_info->chunk_root,
+			      btrfs_super_chunk_root(disk_super),
 			      generation, level);
 	if (ret) {
 		btrfs_err(fs_info, "failed to read chunk root");
 		goto fail_tree_roots;
 	}
 
-	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
+	read_extent_buffer(fs_info->chunk_root->node, fs_info->chunk_tree_uuid,
 			   offsetof(struct btrfs_header, chunk_tree_uuid),
 			   BTRFS_UUID_SIZE);
 
@@ -3688,7 +3731,7 @@ int __cold open_ctree(struct super_block *sb, char *options)
 		goto fail_sysfs;
 
 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
-						   tree_root,
+						   fs_info->tree_root,
 						   "btrfs-transaction");
 	if (IS_ERR(fs_info->transaction_kthread))
 		goto fail_cleaner;
@@ -3803,17 +3846,22 @@ int __cold open_ctree(struct super_block *sb, char *options)
 	if (fs_info->data_reloc_root)
 		btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
 	free_root_pointers(fs_info, true);
-	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
 
 fail_sb_buffer:
 	btrfs_stop_all_workers(fs_info);
 	btrfs_free_block_groups(fs_info);
 fail_alloc:
 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
-
-	iput(fs_info->btree_inode);
 fail:
+	for (i = ARRAY_SIZE(open_ctree_seq) - 1; i >= 0; i--) {
+		if (!open_ctree_res[i] || !open_ctree_seq[i].exit_func)
+			continue;
+		open_ctree_seq[i].exit_func(fs_info);
+		open_ctree_res[i] = false;
+	}
 	btrfs_close_devices(fs_info->fs_devices);
+	if (ret < 0)
+		err = ret;
 	return err;
 }
 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
-- 
2.37.3


  parent reply	other threads:[~2022-09-22  0:07 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-22  0:06 [PATCH 00/16] btrfs: make open_ctree() init/exit sequence strictly matched Qu Wenruo
2022-09-22  0:06 ` [PATCH 01/16] btrfs: make btrfs module init/exit match their sequence Qu Wenruo
2022-09-22  0:06 ` [PATCH 02/16] btrfs: initialize fs_info->sb at the very beginning of open_ctree() Qu Wenruo
2022-09-22  0:06 ` [PATCH 03/16] btrfs: remove @fs_devices argument from open_ctree() Qu Wenruo
2022-09-22  0:06 ` Qu Wenruo [this message]
2022-09-22  0:06 ` [PATCH 05/16] btrfs: extract super block read code into its own init helper Qu Wenruo
2022-09-22  0:06 ` [PATCH 06/16] btrfs: extract mount options and features init " Qu Wenruo
2022-09-22  0:28   ` Qu Wenruo
2022-09-22  0:06 ` [PATCH 07/16] btrfs: move btrfs_init_workqueus() and btrfs_stop_all_workers() into open_ctree_seq[] Qu Wenruo
2022-09-22  0:06 ` [PATCH 08/16] btrfs: extract chunk tree read code into its own init/exit helpers Qu Wenruo
2022-09-22  0:06 ` [PATCH 09/16] btrfs: extract tree roots and zone info initialization into " Qu Wenruo
2022-09-22  0:06 ` [PATCH 10/16] btrfs: extract mount time checks and items load code into its init helper Qu Wenruo
2022-09-22  0:06 ` [PATCH 11/16] btrfs: extract sysfs init into its own helper Qu Wenruo
2022-09-22  0:06 ` [PATCH 12/16] btrfs: extra block groups read code into its own init/exit helpers Qu Wenruo
2022-09-22  0:06 ` [PATCH 13/16] btrfs: move the fs root related " Qu Wenruo
2022-09-22  0:06 ` [PATCH 14/16] btrfs: extract kthread " Qu Wenruo
2022-09-22  0:06 ` [PATCH 15/16] btrfs: move qgroup init/exit code into open_ctree_seq[] array Qu Wenruo
2022-09-22  0:06 ` [PATCH 16/16] btrfs: introduce a debug mount option to do error injection for each stage of open_ctree() Qu Wenruo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0e9f2fdef904a97e68eb2665e803abff03284b71.1663804335.git.wqu@suse.com \
    --to=wqu@suse.com \
    --cc=linux-btrfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.