linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: qiang.zhang@windriver.com
To: viro@zeniv.linux.org.uk, axboe@kernel.dk
Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH] fs: simplify super destroy
Date: Mon, 17 May 2021 15:41:17 +0800	[thread overview]
Message-ID: <20210517074117.7748-1-qiang.zhang@windriver.com> (raw)

From: Zqiang <qiang.zhang@windriver.com>

Simplify the super destroy process through queue_rcu_work().

Signed-off-by: Zqiang <qiang.zhang@windriver.com>
---
 fs/super.c         | 15 +++++----------
 include/linux/fs.h |  3 +--
 2 files changed, 6 insertions(+), 12 deletions(-)

diff --git a/fs/super.c b/fs/super.c
index 11b7e7213fd1..6b796bbc5ba3 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -156,8 +156,8 @@ static unsigned long super_cache_count(struct shrinker *shrink,
 
 static void destroy_super_work(struct work_struct *work)
 {
-	struct super_block *s = container_of(work, struct super_block,
-							destroy_work);
+	struct super_block *s = container_of(to_rcu_work(work), struct super_block,
+							rcu_work);
 	int i;
 
 	for (i = 0; i < SB_FREEZE_LEVELS; i++)
@@ -165,12 +165,6 @@ static void destroy_super_work(struct work_struct *work)
 	kfree(s);
 }
 
-static void destroy_super_rcu(struct rcu_head *head)
-{
-	struct super_block *s = container_of(head, struct super_block, rcu);
-	INIT_WORK(&s->destroy_work, destroy_super_work);
-	schedule_work(&s->destroy_work);
-}
 
 /* Free a superblock that has never been seen by anyone */
 static void destroy_unused_super(struct super_block *s)
@@ -185,7 +179,7 @@ static void destroy_unused_super(struct super_block *s)
 	kfree(s->s_subtype);
 	free_prealloced_shrinker(&s->s_shrink);
 	/* no delays needed */
-	destroy_super_work(&s->destroy_work);
+	destroy_super_work(&s->rcu_work.work);
 }
 
 /**
@@ -249,6 +243,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
 	spin_lock_init(&s->s_inode_list_lock);
 	INIT_LIST_HEAD(&s->s_inodes_wb);
 	spin_lock_init(&s->s_inode_wblist_lock);
+	INIT_RCU_WORK(&s->rcu_work, destroy_super_work);
 
 	s->s_count = 1;
 	atomic_set(&s->s_active, 1);
@@ -296,7 +291,7 @@ static void __put_super(struct super_block *s)
 		fscrypt_sb_free(s);
 		put_user_ns(s->s_user_ns);
 		kfree(s->s_subtype);
-		call_rcu(&s->rcu, destroy_super_rcu);
+		queue_rcu_work(system_wq, &s->rcu_work);
 	}
 }
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c3c88fdb9b2a..2fe2b4d67af2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1534,8 +1534,7 @@ struct super_block {
 	 */
 	struct list_lru		s_dentry_lru;
 	struct list_lru		s_inode_lru;
-	struct rcu_head		rcu;
-	struct work_struct	destroy_work;
+	struct rcu_work         rcu_work;
 
 	struct mutex		s_sync_lock;	/* sync serialisation lock */
 
-- 
2.17.1


                 reply	other threads:[~2021-05-17  7:40 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210517074117.7748-1-qiang.zhang@windriver.com \
    --to=qiang.zhang@windriver.com \
    --cc=axboe@kernel.dk \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).