From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id D74A2C10F29 for ; Tue, 17 Mar 2020 08:13:07 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id B95FD206EC for ; Tue, 17 Mar 2020 08:13:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726682AbgCQINH (ORCPT ); Tue, 17 Mar 2020 04:13:07 -0400 Received: from mx2.suse.de ([195.135.220.15]:43172 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725536AbgCQING (ORCPT ); Tue, 17 Mar 2020 04:13:06 -0400 X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id 75000AE3C for ; Tue, 17 Mar 2020 08:13:04 +0000 (UTC) From: Qu Wenruo To: linux-btrfs@vger.kernel.org Subject: [PATCH RFC 34/39] btrfs: qgroup: Introduce qgroup_backref_cache_build() function Date: Tue, 17 Mar 2020 16:11:20 +0800 Message-Id: <20200317081125.36289-35-wqu@suse.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20200317081125.36289-1-wqu@suse.com> References: <20200317081125.36289-1-wqu@suse.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Sender: linux-btrfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-btrfs@vger.kernel.org This function is the main function to build the generic purposed backref cache for qgroup. The major difference from relocation purposed backref cache is: - No processed extent_io_tree As we don't need to bother the relocation progress - Don't care reloc root Since reloc root doesn't contribute to qgroup accounting, reloc roots are queued to useless list - Always populate backref_node::owner This is the main index for qgroup backref cache to find out the owner of one tree block. The @owner parameter is from tree block header owner, which doesn't reflect reloc tree. But backref cache mechanism will detect reloc tree and remove them from backref cache, thus the header owner is very accruate for qgroup usage. This function will be utlized in incoming patches. Signed-off-by: Qu Wenruo --- fs/btrfs/qgroup.c | 151 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 3e628b5f1c39..0a7aef96aae9 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -22,6 +22,7 @@ #include "extent_io.h" #include "qgroup.h" #include "block-group.h" +#include "misc.h" /* TODO XXX FIXME * - subvol delete -> delete when ref goes to 0? delete limits also? @@ -1609,6 +1610,156 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, return 0; } +static bool handle_useless_nodes(struct backref_cache *cache, + struct backref_node *node) +{ + struct list_head *useless_node = &cache->useless_node; + bool ret = false; + + while (!list_empty(useless_node)) { + struct backref_node *cur; + + cur = list_first_entry(useless_node, struct backref_node, + list); + list_del_init(&cur->list); + + /* Only tree root nodes can be added to @useless_nodes */ + ASSERT(list_empty(&cur->upper)); + + if (cur == node) + ret = true; + + /* The node is the lowest node */ + if (cur->lowest) { + list_del_init(&cur->lower); + cur->lowest = 0; + } + + /* Cleanup the lower edges */ + while (!list_empty(&cur->lower)) { + struct backref_edge *edge; + struct backref_node *lower; + + edge = list_entry(cur->lower.next, + struct backref_edge, list[UPPER]); + list_del(&edge->list[UPPER]); + list_del(&edge->list[LOWER]); + lower = edge->node[LOWER]; + free_backref_edge(cache, edge); + + /* Child node is also orphan, queue for cleanup */ + if (list_empty(&lower->upper)) + list_add(&lower->list, useless_node); + } + + /* + * Backref nodes for tree leaves are deleted from the cache. + * Backref nodes for upper level tree blocks are left in the + * cache to avoid unnecessary backref lookup. + */ + if (cur->level > 0) { + list_add(&cur->list, &cache->detached); + cur->detached = 1; + } else { + rb_erase(&cur->rb_node, &cache->rb_root); + free_backref_node(cache, cur); + } + } + return ret; +} + +/* + * Build backref cache for one tree block. + * + * @node_key: The first key of the tree block. + * @level: Tree level + * @bytenr: The bytenr of the tree block. + * @owner: The owner from btrfs_header. + * + * Caller must ensure the tree block belongs to a subvolume tree. + * + * Return the cached backref_node if the tree block is useful for owner + * iteration. + * + * Return NULL if the tree block doesn't make sense for owner iteration. + * (E.g. the tree block belongs to a reloc tree) + * + * Return ERR_PTR() if something wrong happened. + */ +static struct backref_node *qgroup_backref_cache_build( + struct btrfs_fs_info *fs_info, + struct btrfs_key *node_key, + int level, u64 bytenr, u64 owner) +{ + struct btrfs_backref_iter *iter; + struct backref_cache *cache = fs_info->qgroup_backref_cache; + struct btrfs_path *path; + struct backref_node *cur; + struct backref_node *node = NULL; + struct backref_edge *edge; + struct rb_node *rb_node; + int ret; + + ASSERT(is_fstree(owner)); + + rb_node = simple_search(&cache->rb_root, bytenr); + /* Already cached, return the cached node directly */ + if (rb_node) + return rb_entry(rb_node, struct backref_node, rb_node); + + iter = btrfs_backref_iter_alloc(fs_info, GFP_NOFS); + if (!iter) + return ERR_PTR(-ENOMEM); + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + goto out; + } + + node = alloc_backref_node(cache, bytenr, level); + if (!node) { + ret = -ENOMEM; + goto out; + } + node->owner = owner; + node->lowest = 1; + cur = node; + + /* Breadth-first search to build backref cache */ + do { + ret = backref_cache_add_tree_block(cache, path, iter, node_key, + cur); + if (ret < 0) + goto out; + edge = list_first_entry_or_null(&cache->pending_edge, + struct backref_edge, list[UPPER]); + /* the pending list isn't empty, take the first block to process */ + if (edge) { + list_del_init(&edge->list[UPPER]); + cur = edge->node[UPPER]; + } + } while (edge); + + /* Finish the upper linkage of newly added edges/nodes */ + ret = backref_cache_finish_upper_links(cache, node); + if (ret < 0) + goto out; + + if (handle_useless_nodes(cache, node)) + node = NULL; +out: + btrfs_backref_iter_free(iter); + btrfs_free_path(path); + if (ret < 0) { + backref_cache_error_cleanup(cache, node); + return ERR_PTR(ret); + } + ASSERT(!node || !node->detached); + ASSERT(list_empty(&cache->useless_node) && + list_empty(&cache->pending_edge)); + return node; +} + int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, struct btrfs_qgroup_extent_record *qrecord) { -- 2.25.1