From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3F49BC2BB1D for ; Tue, 17 Mar 2020 08:12:56 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 1343F20674 for ; Tue, 17 Mar 2020 08:12:56 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726667AbgCQIMz (ORCPT ); Tue, 17 Mar 2020 04:12:55 -0400 Received: from mx2.suse.de ([195.135.220.15]:42928 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726132AbgCQIMy (ORCPT ); Tue, 17 Mar 2020 04:12:54 -0400 X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id 4D6ABADA1 for ; Tue, 17 Mar 2020 08:12:49 +0000 (UTC) From: Qu Wenruo To: linux-btrfs@vger.kernel.org Subject: [PATCH RFC 29/39] btrfs: Rename finish_upper_links() to backref_cache_finish_upper_links() and move it to backref.c Date: Tue, 17 Mar 2020 16:11:15 +0800 Message-Id: <20200317081125.36289-30-wqu@suse.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20200317081125.36289-1-wqu@suse.com> References: <20200317081125.36289-1-wqu@suse.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Sender: linux-btrfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-btrfs@vger.kernel.org This the the 2nd major part of generic backref cache. Move it to backref.c so we can reuse it. Signed-off-by: Qu Wenruo --- fs/btrfs/backref.c | 114 +++++++++++++++++++++++++++++++++++++++++ fs/btrfs/backref.h | 2 + fs/btrfs/relocation.c | 115 +----------------------------------------- 3 files changed, 117 insertions(+), 114 deletions(-) diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 013e833bf5bc..0a1cfa4433d3 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -2938,3 +2938,117 @@ int backref_cache_add_tree_block(struct backref_cache *cache, btrfs_backref_iter_release(iter); return ret; } + +/* + * In backref_cache_add_tree_block(), we have only linked the lower node to the + * edge, but the upper node hasn't been linked to the edge. + * This means we can only iterate through backref_node::upper to reach parent + * edges, but not through backref_node::lower to reach children edges. + * + * This function will finish the backref_node::lower to related edges, so that + * backref cache can be bi-directionally iterated. + * + * Also, this will add the nodes to backref cache for next run. + */ +int backref_cache_finish_upper_links(struct backref_cache *cache, + struct backref_node *start) +{ + struct list_head *useless_node = &cache->useless_node; + struct backref_edge *edge; + struct rb_node *rb_node; + LIST_HEAD(pending_edge); + + ASSERT(start->checked); + + /* Insert this node to cache if it's not cowonly */ + if (!start->cowonly) { + rb_node = simple_insert(&cache->rb_root, start->bytenr, + &start->rb_node); + if (rb_node) + backref_cache_panic(cache->fs_info, start->bytenr, + -EEXIST); + list_add_tail(&start->lower, &cache->leaves); + } + + /* + * Use breadth first search to iterate all related edges. + * + * The start point is all the edges of this node + */ + list_for_each_entry(edge, &start->upper, list[LOWER]) + list_add_tail(&edge->list[UPPER], &pending_edge); + + while (!list_empty(&pending_edge)) { + struct backref_node *upper; + struct backref_node *lower; + struct rb_node *rb_node; + + edge = list_first_entry(&pending_edge, struct backref_edge, + list[UPPER]); + list_del_init(&edge->list[UPPER]); + upper = edge->node[UPPER]; + lower = edge->node[LOWER]; + + /* Parent is detached, no need to keep any edges */ + if (upper->detached) { + list_del(&edge->list[LOWER]); + free_backref_edge(cache, edge); + + /* Lower node is orphan, queue for cleanup */ + if (list_empty(&lower->upper)) + list_add(&lower->list, useless_node); + continue; + } + + /* + * All new nodes added in current build_backref_tree() haven't + * been linked to the cache rb tree. + * So if we have upper->rb_node populated, this means a cache + * hit. We only need to link the edge, as @upper and all its + * parent have already been linked. + */ + if (!RB_EMPTY_NODE(&upper->rb_node)) { + if (upper->lowest) { + list_del_init(&upper->lower); + upper->lowest = 0; + } + + list_add_tail(&edge->list[UPPER], &upper->lower); + continue; + } + + /* Sanity check, we shouldn't have any unchecked nodes */ + if (!upper->checked) { + ASSERT(0); + return -EUCLEAN; + } + + /* Sanity check, cowonly node has non-cowonly parent */ + if (start->cowonly != upper->cowonly) { + ASSERT(0); + return -EUCLEAN; + } + + /* Only cache non-cowonly (subvolume trees) tree blocks */ + if (!upper->cowonly) { + rb_node = simple_insert(&cache->rb_root, upper->bytenr, + &upper->rb_node); + if (rb_node) { + backref_cache_panic(cache->fs_info, + upper->bytenr, -EEXIST); + return -EUCLEAN; + } + } + + list_add_tail(&edge->list[UPPER], &upper->lower); + + /* + * Also queue all the parent edges of this uncached node + * to finish the upper linkage + */ + list_for_each_entry(edge, &upper->upper, list[LOWER]) + list_add_tail(&edge->list[UPPER], &pending_edge); + } + return 0; +} + diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index b17bc8d5411a..c6c9f536c359 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -371,4 +371,6 @@ int backref_cache_add_tree_block(struct backref_cache *cache, struct btrfs_backref_iter *iter, struct btrfs_key *node_key, struct backref_node *cur); +int backref_cache_finish_upper_links(struct backref_cache *cache, + struct backref_node *start); #endif diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index d133a7df90cf..ad3896dcdb48 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -383,119 +383,6 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, return btrfs_get_fs_root(fs_info, &key, false); } -/* - * In backref_cache_add_tree_block(), we have only linked the lower node to the - * edge, but the upper node hasn't been linked to the edge. - * This means we can only iterate through backref_node::upper to reach parent - * edges, but not through backref_node::lower to reach children edges. - * - * This function will finish the backref_node::lower to related edges, so that - * backref cache can be bi-directionally iterated. - * - * Also, this will add the nodes to backref cache for next run. - */ -static int finish_upper_links(struct backref_cache *cache, - struct backref_node *start) -{ - struct list_head *useless_node = &cache->useless_node; - struct backref_edge *edge; - struct rb_node *rb_node; - LIST_HEAD(pending_edge); - - ASSERT(start->checked); - - /* Insert this node to cache if it's not cowonly */ - if (!start->cowonly) { - rb_node = simple_insert(&cache->rb_root, start->bytenr, - &start->rb_node); - if (rb_node) - backref_cache_panic(cache->fs_info, start->bytenr, - -EEXIST); - list_add_tail(&start->lower, &cache->leaves); - } - - /* - * Use breadth first search to iterate all related edges. - * - * The start point is all the edges of this node - */ - list_for_each_entry(edge, &start->upper, list[LOWER]) - list_add_tail(&edge->list[UPPER], &pending_edge); - - while (!list_empty(&pending_edge)) { - struct backref_node *upper; - struct backref_node *lower; - struct rb_node *rb_node; - - edge = list_first_entry(&pending_edge, struct backref_edge, - list[UPPER]); - list_del_init(&edge->list[UPPER]); - upper = edge->node[UPPER]; - lower = edge->node[LOWER]; - - /* Parent is detached, no need to keep any edges */ - if (upper->detached) { - list_del(&edge->list[LOWER]); - free_backref_edge(cache, edge); - - /* Lower node is orphan, queue for cleanup */ - if (list_empty(&lower->upper)) - list_add(&lower->list, useless_node); - continue; - } - - /* - * All new nodes added in current build_backref_tree() haven't - * been linked to the cache rb tree. - * So if we have upper->rb_node populated, this means a cache - * hit. We only need to link the edge, as @upper and all its - * parent have already been linked. - */ - if (!RB_EMPTY_NODE(&upper->rb_node)) { - if (upper->lowest) { - list_del_init(&upper->lower); - upper->lowest = 0; - } - - list_add_tail(&edge->list[UPPER], &upper->lower); - continue; - } - - /* Sanity check, we shouldn't have any unchecked nodes */ - if (!upper->checked) { - ASSERT(0); - return -EUCLEAN; - } - - /* Sanity check, cowonly node has non-cowonly parent */ - if (start->cowonly != upper->cowonly) { - ASSERT(0); - return -EUCLEAN; - } - - /* Only cache non-cowonly (subvolume trees) tree blocks */ - if (!upper->cowonly) { - rb_node = simple_insert(&cache->rb_root, upper->bytenr, - &upper->rb_node); - if (rb_node) { - backref_cache_panic(cache->fs_info, - upper->bytenr, -EEXIST); - return -EUCLEAN; - } - } - - list_add_tail(&edge->list[UPPER], &upper->lower); - - /* - * Also queue all the parent edges of this uncached node - * to finish the upper linkage - */ - list_for_each_entry(edge, &upper->upper, list[LOWER]) - list_add_tail(&edge->list[UPPER], &pending_edge); - } - return 0; -} - /* * For useless nodes, do two major clean ups: * - Cleanup the children edges and nodes @@ -636,7 +523,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, } while (edge); /* Finish the upper linkage of newly added edges/nodes */ - ret = finish_upper_links(cache, node); + ret = backref_cache_finish_upper_links(cache, node); if (ret < 0) { err = ret; goto out; -- 2.25.1