From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.7 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS, URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id A3579C43331 for ; Thu, 26 Mar 2020 08:34:40 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 8538220719 for ; Thu, 26 Mar 2020 08:34:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727911AbgCZIej (ORCPT ); Thu, 26 Mar 2020 04:34:39 -0400 Received: from mx2.suse.de ([195.135.220.15]:50062 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727866AbgCZIej (ORCPT ); Thu, 26 Mar 2020 04:34:39 -0400 X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id 805A0AC69 for ; Thu, 26 Mar 2020 08:34:37 +0000 (UTC) From: Qu Wenruo To: linux-btrfs@vger.kernel.org Subject: [PATCH v2 33/39] btrfs: qgroup: Introduce qgroup backref cache Date: Thu, 26 Mar 2020 16:33:10 +0800 Message-Id: <20200326083316.48847-34-wqu@suse.com> X-Mailer: git-send-email 2.26.0 In-Reply-To: <20200326083316.48847-1-wqu@suse.com> References: <20200326083316.48847-1-wqu@suse.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Sender: linux-btrfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-btrfs@vger.kernel.org This adds two new members for btrfs_fs_info: - struct btrfs_backref_cache *qgroup_backref_cache Only get initialized at qgroup enable time. This is to avoid further bloating up fs_info structure. - struct mutex qgroup_backref_lock This is initialized at fs_info initial time. This patch only introduces the skeleton, just initialization and cleanup for these newly introduced members, no usage of them yet. Signed-off-by: Qu Wenruo --- fs/btrfs/ctree.h | 2 ++ fs/btrfs/disk-io.c | 1 + fs/btrfs/qgroup.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 01b03e8a671f..70e90b549d3e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -888,6 +888,8 @@ struct btrfs_fs_info { struct btrfs_workqueue *qgroup_rescan_workers; struct completion qgroup_rescan_completion; struct btrfs_work qgroup_rescan_work; + struct mutex qgroup_backref_lock; + struct btrfs_backref_cache *qgroup_backref_cache; bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */ /* filesystem state */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a6cb5cbbdb9f..e79d287c362f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2107,6 +2107,7 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) fs_info->qgroup_ulist = NULL; fs_info->qgroup_rescan_running = false; mutex_init(&fs_info->qgroup_rescan_lock); + mutex_init(&fs_info->qgroup_backref_lock); } static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index c3888fb367e7..31b320860b71 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -339,6 +339,19 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) return 0; + mutex_lock(&fs_info->qgroup_backref_lock); + if (!fs_info->qgroup_backref_cache) { + fs_info->qgroup_backref_cache = kzalloc( + sizeof(struct btrfs_backref_cache), GFP_KERNEL); + if (!fs_info->qgroup_backref_cache) { + mutex_unlock(&fs_info->qgroup_backref_lock); + return -ENOMEM; + } + btrfs_backref_init_cache(fs_info, + fs_info->qgroup_backref_cache, 0); + } + mutex_unlock(&fs_info->qgroup_backref_lock); + fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); if (!fs_info->qgroup_ulist) { ret = -ENOMEM; @@ -528,6 +541,14 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) */ ulist_free(fs_info->qgroup_ulist); fs_info->qgroup_ulist = NULL; + + mutex_lock(&fs_info->qgroup_backref_lock); + if (fs_info->qgroup_backref_cache) { + btrfs_backref_release_cache(fs_info->qgroup_backref_cache); + kfree(fs_info->qgroup_backref_cache); + fs_info->qgroup_backref_cache = NULL; + } + mutex_unlock(&fs_info->qgroup_backref_lock); } static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, @@ -891,6 +912,20 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info) int slot; mutex_lock(&fs_info->qgroup_ioctl_lock); + mutex_lock(&fs_info->qgroup_backref_lock); + if (!fs_info->qgroup_backref_cache) { + fs_info->qgroup_backref_cache = kzalloc( + sizeof(struct btrfs_backref_cache), GFP_KERNEL); + if (!fs_info->qgroup_backref_cache) { + mutex_unlock(&fs_info->qgroup_backref_lock); + ret = -ENOMEM; + goto out; + } + btrfs_backref_init_cache(fs_info, fs_info->qgroup_backref_cache, + 0); + } + mutex_unlock(&fs_info->qgroup_backref_lock); + if (fs_info->quota_root) goto out; @@ -1095,6 +1130,14 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) goto end_trans; } + mutex_lock(&fs_info->qgroup_backref_lock); + if (fs_info->qgroup_backref_cache) { + btrfs_backref_release_cache(fs_info->qgroup_backref_cache); + kfree(fs_info->qgroup_backref_cache); + fs_info->qgroup_backref_cache = NULL; + } + mutex_unlock(&fs_info->qgroup_backref_lock); + list_del("a_root->dirty_list); btrfs_tree_lock(quota_root->node); @@ -2561,6 +2604,16 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) } trace_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents); + + /* + * Qgroup accounting happens at commit transaction time, thus the + * backref cache will no longer be valid in next trans. + * Free it up. + */ + mutex_lock(&fs_info->qgroup_backref_lock); + if (fs_info->qgroup_backref_cache) + btrfs_backref_release_cache(fs_info->qgroup_backref_cache); + mutex_unlock(&fs_info->qgroup_backref_lock); return ret; } -- 2.26.0