All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/4] super: allow waiting without s_umount held
@ 2023-08-18 10:54 Christian Brauner
  2023-08-18 10:54 ` [PATCH v2 1/4] super: use locking helpers Christian Brauner
                   ` (3 more replies)
  0 siblings, 4 replies; 13+ messages in thread
From: Christian Brauner @ 2023-08-18 10:54 UTC (permalink / raw)
  To: Jan Kara, Christoph Hellwig; +Cc: Al Viro, linux-fsdevel, Christian Brauner

Hey everyone,

This is an attempty to allow concurrent mounters and iterators to wait
on superblock state changes without having to hold s_umount. This is
made necessary by recent attempts to open block devices after superblock
creation and fixing deadlocks due to blkdev_put() trying to acquire
s_umount while s_umount is already held.

This is on top of Jan's and Christoph's work in vfs.super. Obviously not
for v6.6. I hope I got it right but this is intricate. 

It reliably survives xfstests for btrfs, ext4, and xfs while
concurrently having 7 processes running ustat() hammering on
super_blocks and a while true loop that tries to mount a filsystem with
an invalid superblock hammering on sget{_fc}() concurrently as well.
Even with inserting an arbitrary 5s delay after generic_shutdown_super()
and blkdev_put() things work fine.

Thanks and don't hit me over the head with things.
Christian

Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Changes in v2:
- Rename various functions according to Jan's and Willy's suggestions.
- Remove smp_wmb() as smp_store_release() is enough.
- Remove hlist_unhashed() checks now that we wait on SB_DYING.
- Link to v1: https://lore.kernel.org/r/20230817-vfs-super-fixes-v3-v1-0-06ddeca7059b@kernel.org

---
Christian Brauner (4):
      super: use locking helpers
      super: make locking naming consistent
      super: wait for nascent superblocks
      super: wait until we passed kill super

 fs/fs-writeback.c  |   4 +-
 fs/internal.h      |   2 +-
 fs/super.c         | 404 ++++++++++++++++++++++++++++++++++++++++-------------
 include/linux/fs.h |   2 +
 4 files changed, 313 insertions(+), 99 deletions(-)
---
base-commit: f3aeab61fb15edef1e81828da8dbf0814541e49b
change-id: 20230816-vfs-super-fixes-v3-f2cff6192a50


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH v2 1/4] super: use locking helpers
  2023-08-18 10:54 [PATCH v2 0/4] super: allow waiting without s_umount held Christian Brauner
@ 2023-08-18 10:54 ` Christian Brauner
  2023-08-18 11:08   ` Jan Kara
  2023-08-18 10:54 ` [PATCH v2 2/4] super: make locking naming consistent Christian Brauner
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 13+ messages in thread
From: Christian Brauner @ 2023-08-18 10:54 UTC (permalink / raw)
  To: Jan Kara, Christoph Hellwig; +Cc: Al Viro, linux-fsdevel, Christian Brauner

Replace the open-coded {down,up}_{read,write}() calls with simple
wrappers. Follow-up patches will benefit from this as well.

Signed-off-by: Christian Brauner <brauner@kernel.org>
---
 fs/super.c | 126 ++++++++++++++++++++++++++++++++++++++-----------------------
 1 file changed, 78 insertions(+), 48 deletions(-)

diff --git a/fs/super.c b/fs/super.c
index c878e7373f93..b12e2f247e1e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -50,6 +50,42 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
 	"sb_internal",
 };
 
+static inline void super_lock(struct super_block *sb, bool excl)
+{
+	if (excl)
+		down_write(&sb->s_umount);
+	else
+		down_read(&sb->s_umount);
+}
+
+static inline void super_unlock(struct super_block *sb, bool excl)
+{
+	if (excl)
+		up_write(&sb->s_umount);
+	else
+		up_read(&sb->s_umount);
+}
+
+static inline void super_lock_excl(struct super_block *sb)
+{
+	super_lock(sb, true);
+}
+
+static inline void super_lock_shared(struct super_block *sb)
+{
+	super_lock(sb, false);
+}
+
+static inline void super_unlock_excl(struct super_block *sb)
+{
+	super_unlock(sb, true);
+}
+
+static inline void super_unlock_shared(struct super_block *sb)
+{
+	super_unlock(sb, false);
+}
+
 /*
  * One thing we have to be careful of with a per-sb shrinker is that we don't
  * drop the last active reference to the superblock from within the shrinker.
@@ -110,7 +146,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
 		freed += sb->s_op->free_cached_objects(sb, sc);
 	}
 
-	up_read(&sb->s_umount);
+	super_unlock_shared(sb);
 	return freed;
 }
 
@@ -176,7 +212,7 @@ static void destroy_unused_super(struct super_block *s)
 {
 	if (!s)
 		return;
-	up_write(&s->s_umount);
+	super_unlock_excl(s);
 	list_lru_destroy(&s->s_dentry_lru);
 	list_lru_destroy(&s->s_inode_lru);
 	security_sb_free(s);
@@ -340,7 +376,7 @@ void deactivate_locked_super(struct super_block *s)
 		put_filesystem(fs);
 		put_super(s);
 	} else {
-		up_write(&s->s_umount);
+		super_unlock_excl(s);
 	}
 }
 
@@ -357,7 +393,7 @@ EXPORT_SYMBOL(deactivate_locked_super);
 void deactivate_super(struct super_block *s)
 {
 	if (!atomic_add_unless(&s->s_active, -1, 1)) {
-		down_write(&s->s_umount);
+		super_lock_excl(s);
 		deactivate_locked_super(s);
 	}
 }
@@ -381,12 +417,12 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
 {
 	s->s_count++;
 	spin_unlock(&sb_lock);
-	down_write(&s->s_umount);
+	super_lock_excl(s);
 	if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
 		put_super(s);
 		return 1;
 	}
-	up_write(&s->s_umount);
+	super_unlock_excl(s);
 	put_super(s);
 	return 0;
 }
@@ -414,7 +450,7 @@ bool trylock_super(struct super_block *sb)
 		if (!hlist_unhashed(&sb->s_instances) &&
 		    sb->s_root && (sb->s_flags & SB_BORN))
 			return true;
-		up_read(&sb->s_umount);
+		super_unlock_shared(sb);
 	}
 
 	return false;
@@ -439,13 +475,13 @@ bool trylock_super(struct super_block *sb)
 void retire_super(struct super_block *sb)
 {
 	WARN_ON(!sb->s_bdev);
-	down_write(&sb->s_umount);
+	super_lock_excl(sb);
 	if (sb->s_iflags & SB_I_PERSB_BDI) {
 		bdi_unregister(sb->s_bdi);
 		sb->s_iflags &= ~SB_I_PERSB_BDI;
 	}
 	sb->s_iflags |= SB_I_RETIRED;
-	up_write(&sb->s_umount);
+	super_unlock_excl(sb);
 }
 EXPORT_SYMBOL(retire_super);
 
@@ -521,7 +557,7 @@ void generic_shutdown_super(struct super_block *sb)
 	/* should be initialized for __put_super_and_need_restart() */
 	hlist_del_init(&sb->s_instances);
 	spin_unlock(&sb_lock);
-	up_write(&sb->s_umount);
+	super_unlock_excl(sb);
 	if (sb->s_bdi != &noop_backing_dev_info) {
 		if (sb->s_iflags & SB_I_PERSB_BDI)
 			bdi_unregister(sb->s_bdi);
@@ -685,7 +721,7 @@ EXPORT_SYMBOL(sget);
 
 void drop_super(struct super_block *sb)
 {
-	up_read(&sb->s_umount);
+	super_unlock_shared(sb);
 	put_super(sb);
 }
 
@@ -693,7 +729,7 @@ EXPORT_SYMBOL(drop_super);
 
 void drop_super_exclusive(struct super_block *sb)
 {
-	up_write(&sb->s_umount);
+	super_unlock_excl(sb);
 	put_super(sb);
 }
 EXPORT_SYMBOL(drop_super_exclusive);
@@ -739,10 +775,10 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
 		sb->s_count++;
 		spin_unlock(&sb_lock);
 
-		down_read(&sb->s_umount);
+		super_lock_shared(sb);
 		if (sb->s_root && (sb->s_flags & SB_BORN))
 			f(sb, arg);
-		up_read(&sb->s_umount);
+		super_unlock_shared(sb);
 
 		spin_lock(&sb_lock);
 		if (p)
@@ -773,10 +809,10 @@ void iterate_supers_type(struct file_system_type *type,
 		sb->s_count++;
 		spin_unlock(&sb_lock);
 
-		down_read(&sb->s_umount);
+		super_lock_shared(sb);
 		if (sb->s_root && (sb->s_flags & SB_BORN))
 			f(sb, arg);
-		up_read(&sb->s_umount);
+		super_unlock_shared(sb);
 
 		spin_lock(&sb_lock);
 		if (p)
@@ -813,7 +849,7 @@ struct super_block *get_active_super(struct block_device *bdev)
 		if (sb->s_bdev == bdev) {
 			if (!grab_super(sb))
 				goto restart;
-			up_write(&sb->s_umount);
+			super_unlock_excl(sb);
 			return sb;
 		}
 	}
@@ -833,17 +869,11 @@ struct super_block *user_get_super(dev_t dev, bool excl)
 		if (sb->s_dev ==  dev) {
 			sb->s_count++;
 			spin_unlock(&sb_lock);
-			if (excl)
-				down_write(&sb->s_umount);
-			else
-				down_read(&sb->s_umount);
+			super_lock(sb, excl);
 			/* still alive? */
 			if (sb->s_root && (sb->s_flags & SB_BORN))
 				return sb;
-			if (excl)
-				up_write(&sb->s_umount);
-			else
-				up_read(&sb->s_umount);
+			super_unlock(sb, excl);
 			/* nope, got unmounted */
 			spin_lock(&sb_lock);
 			__put_super(sb);
@@ -889,9 +919,9 @@ int reconfigure_super(struct fs_context *fc)
 
 	if (remount_ro) {
 		if (!hlist_empty(&sb->s_pins)) {
-			up_write(&sb->s_umount);
+			super_unlock_excl(sb);
 			group_pin_kill(&sb->s_pins);
-			down_write(&sb->s_umount);
+			super_lock_excl(sb);
 			if (!sb->s_root)
 				return 0;
 			if (sb->s_writers.frozen != SB_UNFROZEN)
@@ -954,7 +984,7 @@ int reconfigure_super(struct fs_context *fc)
 
 static void do_emergency_remount_callback(struct super_block *sb)
 {
-	down_write(&sb->s_umount);
+	super_lock_excl(sb);
 	if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
 	    !sb_rdonly(sb)) {
 		struct fs_context *fc;
@@ -967,7 +997,7 @@ static void do_emergency_remount_callback(struct super_block *sb)
 			put_fs_context(fc);
 		}
 	}
-	up_write(&sb->s_umount);
+	super_unlock_excl(sb);
 }
 
 static void do_emergency_remount(struct work_struct *work)
@@ -990,12 +1020,12 @@ void emergency_remount(void)
 
 static void do_thaw_all_callback(struct super_block *sb)
 {
-	down_write(&sb->s_umount);
+	super_lock_excl(sb);
 	if (sb->s_root && sb->s_flags & SB_BORN) {
 		emergency_thaw_bdev(sb);
 		thaw_super_locked(sb);
 	} else {
-		up_write(&sb->s_umount);
+		super_unlock_excl(sb);
 	}
 }
 
@@ -1182,10 +1212,10 @@ EXPORT_SYMBOL(get_tree_keyed);
  */
 static bool lock_active_super(struct super_block *sb)
 {
-	down_read(&sb->s_umount);
+	super_lock_shared(sb);
 	if (!sb->s_root ||
 	    (sb->s_flags & (SB_ACTIVE | SB_BORN)) != (SB_ACTIVE | SB_BORN)) {
-		up_read(&sb->s_umount);
+		super_unlock_shared(sb);
 		return false;
 	}
 	return true;
@@ -1208,7 +1238,7 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
 	if (sb->s_op->shutdown)
 		sb->s_op->shutdown(sb);
 
-	up_read(&sb->s_umount);
+	super_unlock_shared(sb);
 }
 
 static void fs_bdev_sync(struct block_device *bdev)
@@ -1220,7 +1250,7 @@ static void fs_bdev_sync(struct block_device *bdev)
 	if (!lock_active_super(sb))
 		return;
 	sync_filesystem(sb);
-	up_read(&sb->s_umount);
+	super_unlock_shared(sb);
 }
 
 const struct blk_holder_ops fs_holder_ops = {
@@ -1342,9 +1372,9 @@ int get_tree_bdev(struct fs_context *fc,
 		 * bdev_mark_dead()). It is safe because we have active sb
 		 * reference and SB_BORN is not set yet.
 		 */
-		up_write(&s->s_umount);
+		super_unlock_excl(s);
 		error = setup_bdev_super(s, fc->sb_flags, fc);
-		down_write(&s->s_umount);
+		super_lock_excl(s);
 		if (!error)
 			error = fill_super(s, fc);
 		if (error) {
@@ -1394,9 +1424,9 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
 		 * bdev_mark_dead()). It is safe because we have active sb
 		 * reference and SB_BORN is not set yet.
 		 */
-		up_write(&s->s_umount);
+		super_unlock_excl(s);
 		error = setup_bdev_super(s, flags, NULL);
-		down_write(&s->s_umount);
+		super_lock_excl(s);
 		if (!error)
 			error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
 		if (error) {
@@ -1685,29 +1715,29 @@ int freeze_super(struct super_block *sb)
 	int ret;
 
 	atomic_inc(&sb->s_active);
-	down_write(&sb->s_umount);
+	super_lock_excl(sb);
 	if (sb->s_writers.frozen != SB_UNFROZEN) {
 		deactivate_locked_super(sb);
 		return -EBUSY;
 	}
 
 	if (!(sb->s_flags & SB_BORN)) {
-		up_write(&sb->s_umount);
+		super_unlock_excl(sb);
 		return 0;	/* sic - it's "nothing to do" */
 	}
 
 	if (sb_rdonly(sb)) {
 		/* Nothing to do really... */
 		sb->s_writers.frozen = SB_FREEZE_COMPLETE;
-		up_write(&sb->s_umount);
+		super_unlock_excl(sb);
 		return 0;
 	}
 
 	sb->s_writers.frozen = SB_FREEZE_WRITE;
 	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
-	up_write(&sb->s_umount);
+	super_unlock_excl(sb);
 	sb_wait_write(sb, SB_FREEZE_WRITE);
-	down_write(&sb->s_umount);
+	super_lock_excl(sb);
 
 	/* Now we go and block page faults... */
 	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
@@ -1743,7 +1773,7 @@ int freeze_super(struct super_block *sb)
 	 */
 	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
 	lockdep_sb_freeze_release(sb);
-	up_write(&sb->s_umount);
+	super_unlock_excl(sb);
 	return 0;
 }
 EXPORT_SYMBOL(freeze_super);
@@ -1753,7 +1783,7 @@ static int thaw_super_locked(struct super_block *sb)
 	int error;
 
 	if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
-		up_write(&sb->s_umount);
+		super_unlock_excl(sb);
 		return -EINVAL;
 	}
 
@@ -1770,7 +1800,7 @@ static int thaw_super_locked(struct super_block *sb)
 			printk(KERN_ERR
 				"VFS:Filesystem thaw failed\n");
 			lockdep_sb_freeze_release(sb);
-			up_write(&sb->s_umount);
+			super_unlock_excl(sb);
 			return error;
 		}
 	}
@@ -1790,7 +1820,7 @@ static int thaw_super_locked(struct super_block *sb)
  */
 int thaw_super(struct super_block *sb)
 {
-	down_write(&sb->s_umount);
+	super_lock_excl(sb);
 	return thaw_super_locked(sb);
 }
 EXPORT_SYMBOL(thaw_super);

-- 
2.34.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2 2/4] super: make locking naming consistent
  2023-08-18 10:54 [PATCH v2 0/4] super: allow waiting without s_umount held Christian Brauner
  2023-08-18 10:54 ` [PATCH v2 1/4] super: use locking helpers Christian Brauner
@ 2023-08-18 10:54 ` Christian Brauner
  2023-08-18 11:08   ` Jan Kara
  2023-08-18 10:54 ` [PATCH v2 3/4] super: wait for nascent superblocks Christian Brauner
  2023-08-18 10:54 ` [PATCH v2 4/4] super: wait until we passed kill super Christian Brauner
  3 siblings, 1 reply; 13+ messages in thread
From: Christian Brauner @ 2023-08-18 10:54 UTC (permalink / raw)
  To: Jan Kara, Christoph Hellwig; +Cc: Al Viro, linux-fsdevel, Christian Brauner

Make the naming consistent with the earlier introduced
super_lock_{read,write}() helpers.

Signed-off-by: Christian Brauner <brauner@kernel.org>
---
 fs/fs-writeback.c |  4 ++--
 fs/internal.h     |  2 +-
 fs/super.c        | 28 ++++++++++++++--------------
 3 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index aca4b4811394..969ce991b0b0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1953,9 +1953,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
 		struct inode *inode = wb_inode(wb->b_io.prev);
 		struct super_block *sb = inode->i_sb;
 
-		if (!trylock_super(sb)) {
+		if (!super_trylock_shared(sb)) {
 			/*
-			 * trylock_super() may fail consistently due to
+			 * super_trylock_shared() may fail consistently due to
 			 * s_umount being grabbed by someone else. Don't use
 			 * requeue_io() to avoid busy retrying the inode/sb.
 			 */
diff --git a/fs/internal.h b/fs/internal.h
index b94290f61714..74d3b161dd2c 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -115,7 +115,7 @@ static inline void put_file_access(struct file *file)
  * super.c
  */
 extern int reconfigure_super(struct fs_context *);
-extern bool trylock_super(struct super_block *sb);
+extern bool super_trylock_shared(struct super_block *sb);
 struct super_block *user_get_super(dev_t, bool excl);
 void put_super(struct super_block *sb);
 extern bool mount_capable(struct fs_context *);
diff --git a/fs/super.c b/fs/super.c
index b12e2f247e1e..ba5d813c5804 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -112,7 +112,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
 	if (!(sc->gfp_mask & __GFP_FS))
 		return SHRINK_STOP;
 
-	if (!trylock_super(sb))
+	if (!super_trylock_shared(sb))
 		return SHRINK_STOP;
 
 	if (sb->s_op->nr_cached_objects)
@@ -159,17 +159,17 @@ static unsigned long super_cache_count(struct shrinker *shrink,
 	sb = container_of(shrink, struct super_block, s_shrink);
 
 	/*
-	 * We don't call trylock_super() here as it is a scalability bottleneck,
-	 * so we're exposed to partial setup state. The shrinker rwsem does not
-	 * protect filesystem operations backing list_lru_shrink_count() or
-	 * s_op->nr_cached_objects(). Counts can change between
-	 * super_cache_count and super_cache_scan, so we really don't need locks
-	 * here.
+	 * We don't call super_trylock_shared() here as it is a scalability
+	 * bottleneck, so we're exposed to partial setup state. The shrinker
+	 * rwsem does not protect filesystem operations backing
+	 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
+	 * change between super_cache_count and super_cache_scan, so we really
+	 * don't need locks here.
 	 *
 	 * However, if we are currently mounting the superblock, the underlying
 	 * filesystem might be in a state of partial construction and hence it
-	 * is dangerous to access it.  trylock_super() uses a SB_BORN check to
-	 * avoid this situation, so do the same here. The memory barrier is
+	 * is dangerous to access it.  super_trylock_shared() uses a SB_BORN check
+	 * to avoid this situation, so do the same here. The memory barrier is
 	 * matched with the one in mount_fs() as we don't hold locks here.
 	 */
 	if (!(sb->s_flags & SB_BORN))
@@ -428,7 +428,7 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
 }
 
 /*
- *	trylock_super - try to grab ->s_umount shared
+ *	super_trylock_shared - try to grab ->s_umount shared
  *	@sb: reference we are trying to grab
  *
  *	Try to prevent fs shutdown.  This is used in places where we
@@ -444,7 +444,7 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
  *	of down_read().  There's a couple of places that are OK with that, but
  *	it's very much not a general-purpose interface.
  */
-bool trylock_super(struct super_block *sb)
+bool super_trylock_shared(struct super_block *sb)
 {
 	if (down_read_trylock(&sb->s_umount)) {
 		if (!hlist_unhashed(&sb->s_instances) &&
@@ -1210,7 +1210,7 @@ EXPORT_SYMBOL(get_tree_keyed);
  * and the place that clears the pointer to the superblock used by this function
  * before freeing the superblock.
  */
-static bool lock_active_super(struct super_block *sb)
+static bool super_lock_shared_active(struct super_block *sb)
 {
 	super_lock_shared(sb);
 	if (!sb->s_root ||
@@ -1228,7 +1228,7 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
 	/* bd_holder_lock ensures that the sb isn't freed */
 	lockdep_assert_held(&bdev->bd_holder_lock);
 
-	if (!lock_active_super(sb))
+	if (!super_lock_shared_active(sb))
 		return;
 
 	if (!surprise)
@@ -1247,7 +1247,7 @@ static void fs_bdev_sync(struct block_device *bdev)
 
 	lockdep_assert_held(&bdev->bd_holder_lock);
 
-	if (!lock_active_super(sb))
+	if (!super_lock_shared_active(sb))
 		return;
 	sync_filesystem(sb);
 	super_unlock_shared(sb);

-- 
2.34.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2 3/4] super: wait for nascent superblocks
  2023-08-18 10:54 [PATCH v2 0/4] super: allow waiting without s_umount held Christian Brauner
  2023-08-18 10:54 ` [PATCH v2 1/4] super: use locking helpers Christian Brauner
  2023-08-18 10:54 ` [PATCH v2 2/4] super: make locking naming consistent Christian Brauner
@ 2023-08-18 10:54 ` Christian Brauner
  2023-08-18 12:02   ` Jan Kara
  2023-08-18 10:54 ` [PATCH v2 4/4] super: wait until we passed kill super Christian Brauner
  3 siblings, 1 reply; 13+ messages in thread
From: Christian Brauner @ 2023-08-18 10:54 UTC (permalink / raw)
  To: Jan Kara, Christoph Hellwig; +Cc: Al Viro, linux-fsdevel, Christian Brauner

Recent patches experiment with making it possible to allocate a new
superblock before opening the relevant block device. Naturally this has
intricate side-effects that we get to learn about while developing this.

Superblock allocators such as sget{_fc}() return with s_umount of the
new superblock held and lock ordering currently requires that block
level locks such as bdev_lock and open_mutex rank above s_umount.

Before aca740cecbe5 ("fs: open block device after superblock creation")
ordering was guaranteed to be correct as block devices were opened prior
to superblock allocation and thus s_umount wasn't held. But now s_umount
must be dropped before opening block devices to avoid locking
violations.

This has consequences. The main one being that iterators over
@super_blocks and @fs_supers that grab a temporary reference to the
superblock can now also grab s_umount before the caller has managed to
open block devices and called fill_super(). So whereas before such
iterators or concurrent mounts would have simply slept on s_umount until
SB_BORN was set or the superblock was discard due to initalization
failure they can now needlessly spin through sget{_fc}().

If the caller is sleeping on bdev_lock or open_mutex one caller waiting
on SB_BORN will always spin somewhere and potentially this can go on for
quite a while.

It should be possible to drop s_umount while allowing iterators to wait
on a nascent superblock to either be born or discarded. This patch
implements a wait_var_event() mechanism allowing iterators to sleep
until they are woken when the superblock is born or discarded.

This also allows us to avoid relooping through @fs_supers and
@super_blocks if a superblock isn't yet born or dying.

Link: aca740cecbe5 ("fs: open block device after superblock creation")
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
 fs/super.c         | 198 +++++++++++++++++++++++++++++++++++++++--------------
 include/linux/fs.h |   1 +
 2 files changed, 148 insertions(+), 51 deletions(-)

diff --git a/fs/super.c b/fs/super.c
index ba5d813c5804..52e7b4153035 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -50,7 +50,7 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
 	"sb_internal",
 };
 
-static inline void super_lock(struct super_block *sb, bool excl)
+static inline void __super_lock(struct super_block *sb, bool excl)
 {
 	if (excl)
 		down_write(&sb->s_umount);
@@ -66,14 +66,9 @@ static inline void super_unlock(struct super_block *sb, bool excl)
 		up_read(&sb->s_umount);
 }
 
-static inline void super_lock_excl(struct super_block *sb)
+static inline void __super_lock_excl(struct super_block *sb)
 {
-	super_lock(sb, true);
-}
-
-static inline void super_lock_shared(struct super_block *sb)
-{
-	super_lock(sb, false);
+	__super_lock(sb, true);
 }
 
 static inline void super_unlock_excl(struct super_block *sb)
@@ -86,6 +81,94 @@ static inline void super_unlock_shared(struct super_block *sb)
 	super_unlock(sb, false);
 }
 
+static inline bool wait_born(struct super_block *sb)
+{
+	unsigned int flags;
+
+	/*
+	 * Pairs with smp_store_release() in super_wake() and ensures
+	 * that we see SB_BORN or SB_DYING after we're woken.
+	 */
+	flags = smp_load_acquire(&sb->s_flags);
+	return flags & (SB_BORN | SB_DYING);
+}
+
+/**
+ * super_lock - wait for superblock to become ready
+ * @sb: superblock to wait for
+ * @excl: whether exclusive access is required
+ *
+ * If the superblock has neither passed through vfs_get_tree() or
+ * generic_shutdown_super() yet wait for it to happen. Either superblock
+ * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
+ * woken and we'll see SB_DYING.
+ *
+ * The caller must have acquired a temporary reference on @sb->s_count.
+ *
+ * Return: This returns true if SB_BORN was set, false if SB_DYING was
+ *         set. The function acquires s_umount and returns with it held.
+ */
+static bool super_lock(struct super_block *sb, bool excl)
+{
+
+	lockdep_assert_not_held(&sb->s_umount);
+
+relock:
+	__super_lock(sb, excl);
+
+	/*
+	 * Has gone through generic_shutdown_super() in the meantime.
+	 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
+	 * grab a reference to this. Tell them so.
+	 */
+	if (sb->s_flags & SB_DYING)
+		return false;
+
+	/* Has called ->get_tree() successfully. */
+	if (sb->s_flags & SB_BORN)
+		return true;
+
+	super_unlock(sb, excl);
+
+	/* wait until the superblock is ready or dying */
+	wait_var_event(&sb->s_flags, wait_born(sb));
+
+	/*
+	 * Neither SB_BORN nor SB_DYING are ever unset so we never loop.
+	 * Just reacquire @sb->s_umount for the caller.
+	 */
+	goto relock;
+}
+
+/* wait and acquire read-side of @sb->s_umount */
+static inline bool super_lock_shared(struct super_block *sb)
+{
+	return super_lock(sb, false);
+}
+
+/* wait and acquire write-side of @sb->s_umount */
+static inline bool super_lock_excl(struct super_block *sb)
+{
+	return super_lock(sb, true);
+}
+
+/* wake waiters */
+#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING)
+static void super_wake(struct super_block *sb, unsigned int flag)
+{
+	unsigned int flags = sb->s_flags;
+
+	WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
+	WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
+
+	/*
+	 * Pairs with smp_load_acquire() in super_lock() and
+	 * ensures that @flag is set before we wake anyone.
+	 */
+	smp_store_release(&sb->s_flags, flags | flag);
+	wake_up_var(&sb->s_flags);
+}
+
 /*
  * One thing we have to be careful of with a per-sb shrinker is that we don't
  * drop the last active reference to the superblock from within the shrinker.
@@ -393,7 +476,7 @@ EXPORT_SYMBOL(deactivate_locked_super);
 void deactivate_super(struct super_block *s)
 {
 	if (!atomic_add_unless(&s->s_active, -1, 1)) {
-		super_lock_excl(s);
+		__super_lock_excl(s);
 		deactivate_locked_super(s);
 	}
 }
@@ -415,10 +498,12 @@ EXPORT_SYMBOL(deactivate_super);
  */
 static int grab_super(struct super_block *s) __releases(sb_lock)
 {
+	bool born;
+
 	s->s_count++;
 	spin_unlock(&sb_lock);
-	super_lock_excl(s);
-	if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
+	born = super_lock_excl(s);
+	if (born && atomic_inc_not_zero(&s->s_active)) {
 		put_super(s);
 		return 1;
 	}
@@ -447,8 +532,8 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
 bool super_trylock_shared(struct super_block *sb)
 {
 	if (down_read_trylock(&sb->s_umount)) {
-		if (!hlist_unhashed(&sb->s_instances) &&
-		    sb->s_root && (sb->s_flags & SB_BORN))
+		if (!(sb->s_flags & SB_DYING) && sb->s_root &&
+		    (sb->s_flags & SB_BORN))
 			return true;
 		super_unlock_shared(sb);
 	}
@@ -475,7 +560,7 @@ bool super_trylock_shared(struct super_block *sb)
 void retire_super(struct super_block *sb)
 {
 	WARN_ON(!sb->s_bdev);
-	super_lock_excl(sb);
+	__super_lock_excl(sb);
 	if (sb->s_iflags & SB_I_PERSB_BDI) {
 		bdi_unregister(sb->s_bdi);
 		sb->s_iflags &= ~SB_I_PERSB_BDI;
@@ -557,6 +642,13 @@ void generic_shutdown_super(struct super_block *sb)
 	/* should be initialized for __put_super_and_need_restart() */
 	hlist_del_init(&sb->s_instances);
 	spin_unlock(&sb_lock);
+	/*
+	 * Broadcast to everyone that grabbed a temporary reference to this
+	 * superblock before we removed it from @fs_supers that the superblock
+	 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
+	 * discard this superblock and treat it as dead.
+	 */
+	super_wake(sb, SB_DYING);
 	super_unlock_excl(sb);
 	if (sb->s_bdi != &noop_backing_dev_info) {
 		if (sb->s_iflags & SB_I_PERSB_BDI)
@@ -631,6 +723,11 @@ struct super_block *sget_fc(struct fs_context *fc,
 	s->s_type = fc->fs_type;
 	s->s_iflags |= fc->s_iflags;
 	strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
+	/*
+	 * Make the superblock visible on @super_blocks and @fs_supers.
+	 * It's in a nascent state and users should wait on SB_BORN or
+	 * SB_DYING to be set.
+	 */
 	list_add_tail(&s->s_list, &super_blocks);
 	hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
 	spin_unlock(&sb_lock);
@@ -740,7 +837,8 @@ static void __iterate_supers(void (*f)(struct super_block *))
 
 	spin_lock(&sb_lock);
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (hlist_unhashed(&sb->s_instances))
+		/* Pairs with smp_store_release() in super_wake(). */
+		if (smp_load_acquire(&sb->s_flags) & SB_DYING)
 			continue;
 		sb->s_count++;
 		spin_unlock(&sb_lock);
@@ -770,13 +868,13 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
 
 	spin_lock(&sb_lock);
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (hlist_unhashed(&sb->s_instances))
-			continue;
+		bool born;
+
 		sb->s_count++;
 		spin_unlock(&sb_lock);
 
-		super_lock_shared(sb);
-		if (sb->s_root && (sb->s_flags & SB_BORN))
+		born = super_lock_shared(sb);
+		if (born && sb->s_root)
 			f(sb, arg);
 		super_unlock_shared(sb);
 
@@ -806,11 +904,13 @@ void iterate_supers_type(struct file_system_type *type,
 
 	spin_lock(&sb_lock);
 	hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
+		bool born;
+
 		sb->s_count++;
 		spin_unlock(&sb_lock);
 
-		super_lock_shared(sb);
-		if (sb->s_root && (sb->s_flags & SB_BORN))
+		born = super_lock_shared(sb);
+		if (born && sb->s_root)
 			f(sb, arg);
 		super_unlock_shared(sb);
 
@@ -841,14 +941,11 @@ struct super_block *get_active_super(struct block_device *bdev)
 	if (!bdev)
 		return NULL;
 
-restart:
 	spin_lock(&sb_lock);
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (hlist_unhashed(&sb->s_instances))
-			continue;
 		if (sb->s_bdev == bdev) {
 			if (!grab_super(sb))
-				goto restart;
+				return NULL;
 			super_unlock_excl(sb);
 			return sb;
 		}
@@ -862,22 +959,21 @@ struct super_block *user_get_super(dev_t dev, bool excl)
 	struct super_block *sb;
 
 	spin_lock(&sb_lock);
-rescan:
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (hlist_unhashed(&sb->s_instances))
-			continue;
 		if (sb->s_dev ==  dev) {
+			bool born;
+
 			sb->s_count++;
 			spin_unlock(&sb_lock);
-			super_lock(sb, excl);
 			/* still alive? */
-			if (sb->s_root && (sb->s_flags & SB_BORN))
+			born = super_lock(sb, excl);
+			if (born && sb->s_root)
 				return sb;
 			super_unlock(sb, excl);
 			/* nope, got unmounted */
 			spin_lock(&sb_lock);
 			__put_super(sb);
-			goto rescan;
+			break;
 		}
 	}
 	spin_unlock(&sb_lock);
@@ -921,7 +1017,7 @@ int reconfigure_super(struct fs_context *fc)
 		if (!hlist_empty(&sb->s_pins)) {
 			super_unlock_excl(sb);
 			group_pin_kill(&sb->s_pins);
-			super_lock_excl(sb);
+			__super_lock_excl(sb);
 			if (!sb->s_root)
 				return 0;
 			if (sb->s_writers.frozen != SB_UNFROZEN)
@@ -984,9 +1080,9 @@ int reconfigure_super(struct fs_context *fc)
 
 static void do_emergency_remount_callback(struct super_block *sb)
 {
-	super_lock_excl(sb);
-	if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
-	    !sb_rdonly(sb)) {
+	bool born = super_lock_excl(sb);
+
+	if (born && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
 		struct fs_context *fc;
 
 		fc = fs_context_for_reconfigure(sb->s_root,
@@ -1020,8 +1116,9 @@ void emergency_remount(void)
 
 static void do_thaw_all_callback(struct super_block *sb)
 {
-	super_lock_excl(sb);
-	if (sb->s_root && sb->s_flags & SB_BORN) {
+	bool born = super_lock_excl(sb);
+
+	if (born && sb->s_root) {
 		emergency_thaw_bdev(sb);
 		thaw_super_locked(sb);
 	} else {
@@ -1212,9 +1309,9 @@ EXPORT_SYMBOL(get_tree_keyed);
  */
 static bool super_lock_shared_active(struct super_block *sb)
 {
-	super_lock_shared(sb);
-	if (!sb->s_root ||
-	    (sb->s_flags & (SB_ACTIVE | SB_BORN)) != (SB_ACTIVE | SB_BORN)) {
+	bool born = super_lock_shared(sb);
+
+	if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
 		super_unlock_shared(sb);
 		return false;
 	}
@@ -1374,7 +1471,7 @@ int get_tree_bdev(struct fs_context *fc,
 		 */
 		super_unlock_excl(s);
 		error = setup_bdev_super(s, fc->sb_flags, fc);
-		super_lock_excl(s);
+		__super_lock_excl(s);
 		if (!error)
 			error = fill_super(s, fc);
 		if (error) {
@@ -1426,7 +1523,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
 		 */
 		super_unlock_excl(s);
 		error = setup_bdev_super(s, flags, NULL);
-		super_lock_excl(s);
+		__super_lock_excl(s);
 		if (!error)
 			error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
 		if (error) {
@@ -1566,13 +1663,12 @@ int vfs_get_tree(struct fs_context *fc)
 	WARN_ON(!sb->s_bdi);
 
 	/*
-	 * Write barrier is for super_cache_count(). We place it before setting
-	 * SB_BORN as the data dependency between the two functions is the
-	 * superblock structure contents that we just set up, not the SB_BORN
-	 * flag.
+	 * super_wake() contains a smp_store_release() which also takes care of
+	 * ordering for super_cache_count(). We place it before setting SB_BORN
+	 * as the data dependency between the two functions is the superblock
+	 * structure contents that we just set up, not the SB_BORN flag.
 	 */
-	smp_wmb();
-	sb->s_flags |= SB_BORN;
+	super_wake(sb, SB_BORN);
 
 	error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
 	if (unlikely(error)) {
@@ -1715,7 +1811,7 @@ int freeze_super(struct super_block *sb)
 	int ret;
 
 	atomic_inc(&sb->s_active);
-	super_lock_excl(sb);
+	__super_lock_excl(sb);
 	if (sb->s_writers.frozen != SB_UNFROZEN) {
 		deactivate_locked_super(sb);
 		return -EBUSY;
@@ -1737,7 +1833,7 @@ int freeze_super(struct super_block *sb)
 	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
 	super_unlock_excl(sb);
 	sb_wait_write(sb, SB_FREEZE_WRITE);
-	super_lock_excl(sb);
+	__super_lock_excl(sb);
 
 	/* Now we go and block page faults... */
 	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
@@ -1820,7 +1916,7 @@ static int thaw_super_locked(struct super_block *sb)
  */
 int thaw_super(struct super_block *sb)
 {
-	super_lock_excl(sb);
+	__super_lock_excl(sb);
 	return thaw_super_locked(sb);
 }
 EXPORT_SYMBOL(thaw_super);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 14b5777a24a0..173672645156 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1095,6 +1095,7 @@ extern int send_sigurg(struct fown_struct *fown);
 #define SB_LAZYTIME     BIT(25)	/* Update the on-disk [acm]times lazily */
 
 /* These sb flags are internal to the kernel */
+#define SB_DYING        BIT(24)
 #define SB_SUBMOUNT     BIT(26)
 #define SB_FORCE        BIT(27)
 #define SB_NOSEC        BIT(28)

-- 
2.34.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2 4/4] super: wait until we passed kill super
  2023-08-18 10:54 [PATCH v2 0/4] super: allow waiting without s_umount held Christian Brauner
                   ` (2 preceding siblings ...)
  2023-08-18 10:54 ` [PATCH v2 3/4] super: wait for nascent superblocks Christian Brauner
@ 2023-08-18 10:54 ` Christian Brauner
  2023-08-18 12:26   ` Jan Kara
  2023-08-25  1:48   ` kernel test robot
  3 siblings, 2 replies; 13+ messages in thread
From: Christian Brauner @ 2023-08-18 10:54 UTC (permalink / raw)
  To: Jan Kara, Christoph Hellwig; +Cc: Al Viro, linux-fsdevel, Christian Brauner

Recent rework moved block device closing out of sb->put_super() and into
sb->kill_sb() to avoid deadlocks as s_umount is held in put_super() and
blkdev_put() can end up taking s_umount again.

That means we need to move the removal of the superblock from @fs_supers
out of generic_shutdown_super() and into deactivate_locked_super() to
ensure that concurrent mounters don't fail to open block devices that
are still in use because blkdev_put() in sb->kill_sb() hasn't been
called yet.

We can now do this as we can make iterators through @fs_super and
@super_blocks wait without holding s_umount. Concurrent mounts will wait
until a dying superblock is fully dead so until sb->kill_sb() has been
called and SB_DEAD been set. Concurrent iterators can already discard
any SB_DYING superblock.

Signed-off-by: Christian Brauner <brauner@kernel.org>
---
 fs/super.c         | 100 +++++++++++++++++++++++++++++++++++++++++++++++++----
 include/linux/fs.h |   1 +
 2 files changed, 94 insertions(+), 7 deletions(-)

diff --git a/fs/super.c b/fs/super.c
index 52e7b4153035..045d8611c44b 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -93,6 +93,18 @@ static inline bool wait_born(struct super_block *sb)
 	return flags & (SB_BORN | SB_DYING);
 }
 
+static inline bool wait_dead(struct super_block *sb)
+{
+	unsigned int flags;
+
+	/*
+	 * Pairs with smp_store_release() in super_wake() and ensures
+	 * that we see SB_DEAD after we're woken.
+	 */
+	flags = smp_load_acquire(&sb->s_flags);
+	return flags & SB_DEAD;
+}
+
 /**
  * super_lock - wait for superblock to become ready
  * @sb: superblock to wait for
@@ -140,6 +152,33 @@ static bool super_lock(struct super_block *sb, bool excl)
 	goto relock;
 }
 
+/**
+ * super_lock_dead - wait for superblock to become ready or fully dead
+ * @sb: superblock to wait for
+ *
+ * Wait for a superblock to be SB_BORN or to be SB_DEAD. In other words,
+ * don't just wait for the superblock to be shutdown in
+ * generic_shutdown_super() but actually wait until sb->kill_sb() has
+ * finished.
+ *
+ * The caller must have acquired a temporary reference on @sb->s_count.
+ *
+ * Return: This returns true if SB_BORN was set, false if SB_DEAD was
+ *         set. The function acquires s_umount and returns with it held.
+ */
+static bool super_lock_dead(struct super_block *sb)
+{
+	if (super_lock(sb, true))
+		return true;
+
+	lockdep_assert_held(&sb->s_umount);
+	super_unlock_excl(sb);
+	/* If superblock is dying, wait for everything to be shutdown. */
+	wait_var_event(&sb->s_flags, wait_dead(sb));
+	__super_lock_excl(sb);
+	return false;
+}
+
 /* wait and acquire read-side of @sb->s_umount */
 static inline bool super_lock_shared(struct super_block *sb)
 {
@@ -153,7 +192,7 @@ static inline bool super_lock_excl(struct super_block *sb)
 }
 
 /* wake waiters */
-#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING)
+#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
 static void super_wake(struct super_block *sb, unsigned int flag)
 {
 	unsigned int flags = sb->s_flags;
@@ -169,6 +208,35 @@ static void super_wake(struct super_block *sb, unsigned int flag)
 	wake_up_var(&sb->s_flags);
 }
 
+/**
+ * grab_super_dead - acquire an active reference to a superblock
+ * @sb: superblock to acquire
+ *
+ * Acquire a temporary reference on a superblock and try to trade it for
+ * an active reference. This is used in sget{_fc}() to wait for a
+ * superblock to either become SB_BORN or for it to pass through
+ * sb->kill() and be marked as SB_DEAD.
+ *
+ * Return: This returns true if an active reference could be acquired,
+ *         false if not. The function acquires s_umount and returns with
+ *         it held.
+ */
+static bool grab_super_dead(struct super_block *s) __releases(sb_lock)
+{
+	bool born;
+
+	s->s_count++;
+	spin_unlock(&sb_lock);
+	born = super_lock_dead(s);
+	if (born && atomic_inc_not_zero(&s->s_active)) {
+		put_super(s);
+		return true;
+	}
+	up_write(&s->s_umount);
+	put_super(s);
+	return false;
+}
+
 /*
  * One thing we have to be careful of with a per-sb shrinker is that we don't
  * drop the last active reference to the superblock from within the shrinker.
@@ -456,6 +524,25 @@ void deactivate_locked_super(struct super_block *s)
 		list_lru_destroy(&s->s_dentry_lru);
 		list_lru_destroy(&s->s_inode_lru);
 
+		/*
+		 * Remove it from @fs_supers so it isn't found by new
+		 * sget{_fc}() walkers anymore. Any concurrent mounter still
+		 * managing to grab a temporary reference is guaranteed to
+		 * already see SB_DYING and will wait until we notify them about
+		 * SB_DEAD.
+		 */
+		spin_lock(&sb_lock);
+		hlist_del_init(&s->s_instances);
+		spin_unlock(&sb_lock);
+
+		/*
+		 * Let concurrent mounts know that this thing is really dead.
+		 * We don't need @sb->s_umount here as every concurrent caller
+		 * will see SB_DYING and either discard the superblock or wait
+		 * for SB_DEAD.
+		 */
+		super_wake(s, SB_DEAD);
+
 		put_filesystem(fs);
 		put_super(s);
 	} else {
@@ -638,15 +725,14 @@ void generic_shutdown_super(struct super_block *sb)
 			spin_unlock(&sb->s_inode_list_lock);
 		}
 	}
-	spin_lock(&sb_lock);
-	/* should be initialized for __put_super_and_need_restart() */
-	hlist_del_init(&sb->s_instances);
-	spin_unlock(&sb_lock);
 	/*
 	 * Broadcast to everyone that grabbed a temporary reference to this
 	 * superblock before we removed it from @fs_supers that the superblock
 	 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
 	 * discard this superblock and treat it as dead.
+	 *
+	 * We leave the superblock on @fs_supers so it can be found by
+	 * sget{_fc}() until we passed sb->kill_sb().
 	 */
 	super_wake(sb, SB_DYING);
 	super_unlock_excl(sb);
@@ -741,7 +827,7 @@ struct super_block *sget_fc(struct fs_context *fc,
 		destroy_unused_super(s);
 		return ERR_PTR(-EBUSY);
 	}
-	if (!grab_super(old))
+	if (!grab_super_dead(old))
 		goto retry;
 	destroy_unused_super(s);
 	return old;
@@ -785,7 +871,7 @@ struct super_block *sget(struct file_system_type *type,
 				destroy_unused_super(s);
 				return ERR_PTR(-EBUSY);
 			}
-			if (!grab_super(old))
+			if (!grab_super_dead(old))
 				goto retry;
 			destroy_unused_super(s);
 			return old;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 173672645156..a63da68305e9 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1095,6 +1095,7 @@ extern int send_sigurg(struct fown_struct *fown);
 #define SB_LAZYTIME     BIT(25)	/* Update the on-disk [acm]times lazily */
 
 /* These sb flags are internal to the kernel */
+#define SB_DEAD         BIT(21)
 #define SB_DYING        BIT(24)
 #define SB_SUBMOUNT     BIT(26)
 #define SB_FORCE        BIT(27)

-- 
2.34.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 1/4] super: use locking helpers
  2023-08-18 10:54 ` [PATCH v2 1/4] super: use locking helpers Christian Brauner
@ 2023-08-18 11:08   ` Jan Kara
  0 siblings, 0 replies; 13+ messages in thread
From: Jan Kara @ 2023-08-18 11:08 UTC (permalink / raw)
  To: Christian Brauner; +Cc: Jan Kara, Christoph Hellwig, Al Viro, linux-fsdevel

On Fri 18-08-23 12:54:15, Christian Brauner wrote:
> Replace the open-coded {down,up}_{read,write}() calls with simple
> wrappers. Follow-up patches will benefit from this as well.
> 
> Signed-off-by: Christian Brauner <brauner@kernel.org>

Looks good. Feel free to add:

Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

> ---
>  fs/super.c | 126 ++++++++++++++++++++++++++++++++++++++-----------------------
>  1 file changed, 78 insertions(+), 48 deletions(-)
> 
> diff --git a/fs/super.c b/fs/super.c
> index c878e7373f93..b12e2f247e1e 100644
> --- a/fs/super.c
> +++ b/fs/super.c
> @@ -50,6 +50,42 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
>  	"sb_internal",
>  };
>  
> +static inline void super_lock(struct super_block *sb, bool excl)
> +{
> +	if (excl)
> +		down_write(&sb->s_umount);
> +	else
> +		down_read(&sb->s_umount);
> +}
> +
> +static inline void super_unlock(struct super_block *sb, bool excl)
> +{
> +	if (excl)
> +		up_write(&sb->s_umount);
> +	else
> +		up_read(&sb->s_umount);
> +}
> +
> +static inline void super_lock_excl(struct super_block *sb)
> +{
> +	super_lock(sb, true);
> +}
> +
> +static inline void super_lock_shared(struct super_block *sb)
> +{
> +	super_lock(sb, false);
> +}
> +
> +static inline void super_unlock_excl(struct super_block *sb)
> +{
> +	super_unlock(sb, true);
> +}
> +
> +static inline void super_unlock_shared(struct super_block *sb)
> +{
> +	super_unlock(sb, false);
> +}
> +
>  /*
>   * One thing we have to be careful of with a per-sb shrinker is that we don't
>   * drop the last active reference to the superblock from within the shrinker.
> @@ -110,7 +146,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
>  		freed += sb->s_op->free_cached_objects(sb, sc);
>  	}
>  
> -	up_read(&sb->s_umount);
> +	super_unlock_shared(sb);
>  	return freed;
>  }
>  
> @@ -176,7 +212,7 @@ static void destroy_unused_super(struct super_block *s)
>  {
>  	if (!s)
>  		return;
> -	up_write(&s->s_umount);
> +	super_unlock_excl(s);
>  	list_lru_destroy(&s->s_dentry_lru);
>  	list_lru_destroy(&s->s_inode_lru);
>  	security_sb_free(s);
> @@ -340,7 +376,7 @@ void deactivate_locked_super(struct super_block *s)
>  		put_filesystem(fs);
>  		put_super(s);
>  	} else {
> -		up_write(&s->s_umount);
> +		super_unlock_excl(s);
>  	}
>  }
>  
> @@ -357,7 +393,7 @@ EXPORT_SYMBOL(deactivate_locked_super);
>  void deactivate_super(struct super_block *s)
>  {
>  	if (!atomic_add_unless(&s->s_active, -1, 1)) {
> -		down_write(&s->s_umount);
> +		super_lock_excl(s);
>  		deactivate_locked_super(s);
>  	}
>  }
> @@ -381,12 +417,12 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
>  {
>  	s->s_count++;
>  	spin_unlock(&sb_lock);
> -	down_write(&s->s_umount);
> +	super_lock_excl(s);
>  	if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
>  		put_super(s);
>  		return 1;
>  	}
> -	up_write(&s->s_umount);
> +	super_unlock_excl(s);
>  	put_super(s);
>  	return 0;
>  }
> @@ -414,7 +450,7 @@ bool trylock_super(struct super_block *sb)
>  		if (!hlist_unhashed(&sb->s_instances) &&
>  		    sb->s_root && (sb->s_flags & SB_BORN))
>  			return true;
> -		up_read(&sb->s_umount);
> +		super_unlock_shared(sb);
>  	}
>  
>  	return false;
> @@ -439,13 +475,13 @@ bool trylock_super(struct super_block *sb)
>  void retire_super(struct super_block *sb)
>  {
>  	WARN_ON(!sb->s_bdev);
> -	down_write(&sb->s_umount);
> +	super_lock_excl(sb);
>  	if (sb->s_iflags & SB_I_PERSB_BDI) {
>  		bdi_unregister(sb->s_bdi);
>  		sb->s_iflags &= ~SB_I_PERSB_BDI;
>  	}
>  	sb->s_iflags |= SB_I_RETIRED;
> -	up_write(&sb->s_umount);
> +	super_unlock_excl(sb);
>  }
>  EXPORT_SYMBOL(retire_super);
>  
> @@ -521,7 +557,7 @@ void generic_shutdown_super(struct super_block *sb)
>  	/* should be initialized for __put_super_and_need_restart() */
>  	hlist_del_init(&sb->s_instances);
>  	spin_unlock(&sb_lock);
> -	up_write(&sb->s_umount);
> +	super_unlock_excl(sb);
>  	if (sb->s_bdi != &noop_backing_dev_info) {
>  		if (sb->s_iflags & SB_I_PERSB_BDI)
>  			bdi_unregister(sb->s_bdi);
> @@ -685,7 +721,7 @@ EXPORT_SYMBOL(sget);
>  
>  void drop_super(struct super_block *sb)
>  {
> -	up_read(&sb->s_umount);
> +	super_unlock_shared(sb);
>  	put_super(sb);
>  }
>  
> @@ -693,7 +729,7 @@ EXPORT_SYMBOL(drop_super);
>  
>  void drop_super_exclusive(struct super_block *sb)
>  {
> -	up_write(&sb->s_umount);
> +	super_unlock_excl(sb);
>  	put_super(sb);
>  }
>  EXPORT_SYMBOL(drop_super_exclusive);
> @@ -739,10 +775,10 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
>  		sb->s_count++;
>  		spin_unlock(&sb_lock);
>  
> -		down_read(&sb->s_umount);
> +		super_lock_shared(sb);
>  		if (sb->s_root && (sb->s_flags & SB_BORN))
>  			f(sb, arg);
> -		up_read(&sb->s_umount);
> +		super_unlock_shared(sb);
>  
>  		spin_lock(&sb_lock);
>  		if (p)
> @@ -773,10 +809,10 @@ void iterate_supers_type(struct file_system_type *type,
>  		sb->s_count++;
>  		spin_unlock(&sb_lock);
>  
> -		down_read(&sb->s_umount);
> +		super_lock_shared(sb);
>  		if (sb->s_root && (sb->s_flags & SB_BORN))
>  			f(sb, arg);
> -		up_read(&sb->s_umount);
> +		super_unlock_shared(sb);
>  
>  		spin_lock(&sb_lock);
>  		if (p)
> @@ -813,7 +849,7 @@ struct super_block *get_active_super(struct block_device *bdev)
>  		if (sb->s_bdev == bdev) {
>  			if (!grab_super(sb))
>  				goto restart;
> -			up_write(&sb->s_umount);
> +			super_unlock_excl(sb);
>  			return sb;
>  		}
>  	}
> @@ -833,17 +869,11 @@ struct super_block *user_get_super(dev_t dev, bool excl)
>  		if (sb->s_dev ==  dev) {
>  			sb->s_count++;
>  			spin_unlock(&sb_lock);
> -			if (excl)
> -				down_write(&sb->s_umount);
> -			else
> -				down_read(&sb->s_umount);
> +			super_lock(sb, excl);
>  			/* still alive? */
>  			if (sb->s_root && (sb->s_flags & SB_BORN))
>  				return sb;
> -			if (excl)
> -				up_write(&sb->s_umount);
> -			else
> -				up_read(&sb->s_umount);
> +			super_unlock(sb, excl);
>  			/* nope, got unmounted */
>  			spin_lock(&sb_lock);
>  			__put_super(sb);
> @@ -889,9 +919,9 @@ int reconfigure_super(struct fs_context *fc)
>  
>  	if (remount_ro) {
>  		if (!hlist_empty(&sb->s_pins)) {
> -			up_write(&sb->s_umount);
> +			super_unlock_excl(sb);
>  			group_pin_kill(&sb->s_pins);
> -			down_write(&sb->s_umount);
> +			super_lock_excl(sb);
>  			if (!sb->s_root)
>  				return 0;
>  			if (sb->s_writers.frozen != SB_UNFROZEN)
> @@ -954,7 +984,7 @@ int reconfigure_super(struct fs_context *fc)
>  
>  static void do_emergency_remount_callback(struct super_block *sb)
>  {
> -	down_write(&sb->s_umount);
> +	super_lock_excl(sb);
>  	if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
>  	    !sb_rdonly(sb)) {
>  		struct fs_context *fc;
> @@ -967,7 +997,7 @@ static void do_emergency_remount_callback(struct super_block *sb)
>  			put_fs_context(fc);
>  		}
>  	}
> -	up_write(&sb->s_umount);
> +	super_unlock_excl(sb);
>  }
>  
>  static void do_emergency_remount(struct work_struct *work)
> @@ -990,12 +1020,12 @@ void emergency_remount(void)
>  
>  static void do_thaw_all_callback(struct super_block *sb)
>  {
> -	down_write(&sb->s_umount);
> +	super_lock_excl(sb);
>  	if (sb->s_root && sb->s_flags & SB_BORN) {
>  		emergency_thaw_bdev(sb);
>  		thaw_super_locked(sb);
>  	} else {
> -		up_write(&sb->s_umount);
> +		super_unlock_excl(sb);
>  	}
>  }
>  
> @@ -1182,10 +1212,10 @@ EXPORT_SYMBOL(get_tree_keyed);
>   */
>  static bool lock_active_super(struct super_block *sb)
>  {
> -	down_read(&sb->s_umount);
> +	super_lock_shared(sb);
>  	if (!sb->s_root ||
>  	    (sb->s_flags & (SB_ACTIVE | SB_BORN)) != (SB_ACTIVE | SB_BORN)) {
> -		up_read(&sb->s_umount);
> +		super_unlock_shared(sb);
>  		return false;
>  	}
>  	return true;
> @@ -1208,7 +1238,7 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
>  	if (sb->s_op->shutdown)
>  		sb->s_op->shutdown(sb);
>  
> -	up_read(&sb->s_umount);
> +	super_unlock_shared(sb);
>  }
>  
>  static void fs_bdev_sync(struct block_device *bdev)
> @@ -1220,7 +1250,7 @@ static void fs_bdev_sync(struct block_device *bdev)
>  	if (!lock_active_super(sb))
>  		return;
>  	sync_filesystem(sb);
> -	up_read(&sb->s_umount);
> +	super_unlock_shared(sb);
>  }
>  
>  const struct blk_holder_ops fs_holder_ops = {
> @@ -1342,9 +1372,9 @@ int get_tree_bdev(struct fs_context *fc,
>  		 * bdev_mark_dead()). It is safe because we have active sb
>  		 * reference and SB_BORN is not set yet.
>  		 */
> -		up_write(&s->s_umount);
> +		super_unlock_excl(s);
>  		error = setup_bdev_super(s, fc->sb_flags, fc);
> -		down_write(&s->s_umount);
> +		super_lock_excl(s);
>  		if (!error)
>  			error = fill_super(s, fc);
>  		if (error) {
> @@ -1394,9 +1424,9 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
>  		 * bdev_mark_dead()). It is safe because we have active sb
>  		 * reference and SB_BORN is not set yet.
>  		 */
> -		up_write(&s->s_umount);
> +		super_unlock_excl(s);
>  		error = setup_bdev_super(s, flags, NULL);
> -		down_write(&s->s_umount);
> +		super_lock_excl(s);
>  		if (!error)
>  			error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
>  		if (error) {
> @@ -1685,29 +1715,29 @@ int freeze_super(struct super_block *sb)
>  	int ret;
>  
>  	atomic_inc(&sb->s_active);
> -	down_write(&sb->s_umount);
> +	super_lock_excl(sb);
>  	if (sb->s_writers.frozen != SB_UNFROZEN) {
>  		deactivate_locked_super(sb);
>  		return -EBUSY;
>  	}
>  
>  	if (!(sb->s_flags & SB_BORN)) {
> -		up_write(&sb->s_umount);
> +		super_unlock_excl(sb);
>  		return 0;	/* sic - it's "nothing to do" */
>  	}
>  
>  	if (sb_rdonly(sb)) {
>  		/* Nothing to do really... */
>  		sb->s_writers.frozen = SB_FREEZE_COMPLETE;
> -		up_write(&sb->s_umount);
> +		super_unlock_excl(sb);
>  		return 0;
>  	}
>  
>  	sb->s_writers.frozen = SB_FREEZE_WRITE;
>  	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
> -	up_write(&sb->s_umount);
> +	super_unlock_excl(sb);
>  	sb_wait_write(sb, SB_FREEZE_WRITE);
> -	down_write(&sb->s_umount);
> +	super_lock_excl(sb);
>  
>  	/* Now we go and block page faults... */
>  	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
> @@ -1743,7 +1773,7 @@ int freeze_super(struct super_block *sb)
>  	 */
>  	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
>  	lockdep_sb_freeze_release(sb);
> -	up_write(&sb->s_umount);
> +	super_unlock_excl(sb);
>  	return 0;
>  }
>  EXPORT_SYMBOL(freeze_super);
> @@ -1753,7 +1783,7 @@ static int thaw_super_locked(struct super_block *sb)
>  	int error;
>  
>  	if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
> -		up_write(&sb->s_umount);
> +		super_unlock_excl(sb);
>  		return -EINVAL;
>  	}
>  
> @@ -1770,7 +1800,7 @@ static int thaw_super_locked(struct super_block *sb)
>  			printk(KERN_ERR
>  				"VFS:Filesystem thaw failed\n");
>  			lockdep_sb_freeze_release(sb);
> -			up_write(&sb->s_umount);
> +			super_unlock_excl(sb);
>  			return error;
>  		}
>  	}
> @@ -1790,7 +1820,7 @@ static int thaw_super_locked(struct super_block *sb)
>   */
>  int thaw_super(struct super_block *sb)
>  {
> -	down_write(&sb->s_umount);
> +	super_lock_excl(sb);
>  	return thaw_super_locked(sb);
>  }
>  EXPORT_SYMBOL(thaw_super);
> 
> -- 
> 2.34.1
> 
-- 
Jan Kara <jack@suse.com>
SUSE Labs, CR

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 2/4] super: make locking naming consistent
  2023-08-18 10:54 ` [PATCH v2 2/4] super: make locking naming consistent Christian Brauner
@ 2023-08-18 11:08   ` Jan Kara
  0 siblings, 0 replies; 13+ messages in thread
From: Jan Kara @ 2023-08-18 11:08 UTC (permalink / raw)
  To: Christian Brauner; +Cc: Jan Kara, Christoph Hellwig, Al Viro, linux-fsdevel

On Fri 18-08-23 12:54:16, Christian Brauner wrote:
> Make the naming consistent with the earlier introduced
> super_lock_{read,write}() helpers.
> 
> Signed-off-by: Christian Brauner <brauner@kernel.org>

Looks good. Feel free to add:

Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

> ---
>  fs/fs-writeback.c |  4 ++--
>  fs/internal.h     |  2 +-
>  fs/super.c        | 28 ++++++++++++++--------------
>  3 files changed, 17 insertions(+), 17 deletions(-)
> 
> diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
> index aca4b4811394..969ce991b0b0 100644
> --- a/fs/fs-writeback.c
> +++ b/fs/fs-writeback.c
> @@ -1953,9 +1953,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
>  		struct inode *inode = wb_inode(wb->b_io.prev);
>  		struct super_block *sb = inode->i_sb;
>  
> -		if (!trylock_super(sb)) {
> +		if (!super_trylock_shared(sb)) {
>  			/*
> -			 * trylock_super() may fail consistently due to
> +			 * super_trylock_shared() may fail consistently due to
>  			 * s_umount being grabbed by someone else. Don't use
>  			 * requeue_io() to avoid busy retrying the inode/sb.
>  			 */
> diff --git a/fs/internal.h b/fs/internal.h
> index b94290f61714..74d3b161dd2c 100644
> --- a/fs/internal.h
> +++ b/fs/internal.h
> @@ -115,7 +115,7 @@ static inline void put_file_access(struct file *file)
>   * super.c
>   */
>  extern int reconfigure_super(struct fs_context *);
> -extern bool trylock_super(struct super_block *sb);
> +extern bool super_trylock_shared(struct super_block *sb);
>  struct super_block *user_get_super(dev_t, bool excl);
>  void put_super(struct super_block *sb);
>  extern bool mount_capable(struct fs_context *);
> diff --git a/fs/super.c b/fs/super.c
> index b12e2f247e1e..ba5d813c5804 100644
> --- a/fs/super.c
> +++ b/fs/super.c
> @@ -112,7 +112,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
>  	if (!(sc->gfp_mask & __GFP_FS))
>  		return SHRINK_STOP;
>  
> -	if (!trylock_super(sb))
> +	if (!super_trylock_shared(sb))
>  		return SHRINK_STOP;
>  
>  	if (sb->s_op->nr_cached_objects)
> @@ -159,17 +159,17 @@ static unsigned long super_cache_count(struct shrinker *shrink,
>  	sb = container_of(shrink, struct super_block, s_shrink);
>  
>  	/*
> -	 * We don't call trylock_super() here as it is a scalability bottleneck,
> -	 * so we're exposed to partial setup state. The shrinker rwsem does not
> -	 * protect filesystem operations backing list_lru_shrink_count() or
> -	 * s_op->nr_cached_objects(). Counts can change between
> -	 * super_cache_count and super_cache_scan, so we really don't need locks
> -	 * here.
> +	 * We don't call super_trylock_shared() here as it is a scalability
> +	 * bottleneck, so we're exposed to partial setup state. The shrinker
> +	 * rwsem does not protect filesystem operations backing
> +	 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
> +	 * change between super_cache_count and super_cache_scan, so we really
> +	 * don't need locks here.
>  	 *
>  	 * However, if we are currently mounting the superblock, the underlying
>  	 * filesystem might be in a state of partial construction and hence it
> -	 * is dangerous to access it.  trylock_super() uses a SB_BORN check to
> -	 * avoid this situation, so do the same here. The memory barrier is
> +	 * is dangerous to access it.  super_trylock_shared() uses a SB_BORN check
> +	 * to avoid this situation, so do the same here. The memory barrier is
>  	 * matched with the one in mount_fs() as we don't hold locks here.
>  	 */
>  	if (!(sb->s_flags & SB_BORN))
> @@ -428,7 +428,7 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
>  }
>  
>  /*
> - *	trylock_super - try to grab ->s_umount shared
> + *	super_trylock_shared - try to grab ->s_umount shared
>   *	@sb: reference we are trying to grab
>   *
>   *	Try to prevent fs shutdown.  This is used in places where we
> @@ -444,7 +444,7 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
>   *	of down_read().  There's a couple of places that are OK with that, but
>   *	it's very much not a general-purpose interface.
>   */
> -bool trylock_super(struct super_block *sb)
> +bool super_trylock_shared(struct super_block *sb)
>  {
>  	if (down_read_trylock(&sb->s_umount)) {
>  		if (!hlist_unhashed(&sb->s_instances) &&
> @@ -1210,7 +1210,7 @@ EXPORT_SYMBOL(get_tree_keyed);
>   * and the place that clears the pointer to the superblock used by this function
>   * before freeing the superblock.
>   */
> -static bool lock_active_super(struct super_block *sb)
> +static bool super_lock_shared_active(struct super_block *sb)
>  {
>  	super_lock_shared(sb);
>  	if (!sb->s_root ||
> @@ -1228,7 +1228,7 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
>  	/* bd_holder_lock ensures that the sb isn't freed */
>  	lockdep_assert_held(&bdev->bd_holder_lock);
>  
> -	if (!lock_active_super(sb))
> +	if (!super_lock_shared_active(sb))
>  		return;
>  
>  	if (!surprise)
> @@ -1247,7 +1247,7 @@ static void fs_bdev_sync(struct block_device *bdev)
>  
>  	lockdep_assert_held(&bdev->bd_holder_lock);
>  
> -	if (!lock_active_super(sb))
> +	if (!super_lock_shared_active(sb))
>  		return;
>  	sync_filesystem(sb);
>  	super_unlock_shared(sb);
> 
> -- 
> 2.34.1
> 
-- 
Jan Kara <jack@suse.com>
SUSE Labs, CR

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] super: wait for nascent superblocks
  2023-08-18 10:54 ` [PATCH v2 3/4] super: wait for nascent superblocks Christian Brauner
@ 2023-08-18 12:02   ` Jan Kara
  2023-08-18 12:46     ` Christian Brauner
  0 siblings, 1 reply; 13+ messages in thread
From: Jan Kara @ 2023-08-18 12:02 UTC (permalink / raw)
  To: Christian Brauner; +Cc: Jan Kara, Christoph Hellwig, Al Viro, linux-fsdevel

On Fri 18-08-23 12:54:17, Christian Brauner wrote:
> Recent patches experiment with making it possible to allocate a new
> superblock before opening the relevant block device. Naturally this has
> intricate side-effects that we get to learn about while developing this.
> 
> Superblock allocators such as sget{_fc}() return with s_umount of the
> new superblock held and lock ordering currently requires that block
> level locks such as bdev_lock and open_mutex rank above s_umount.
> 
> Before aca740cecbe5 ("fs: open block device after superblock creation")
> ordering was guaranteed to be correct as block devices were opened prior
> to superblock allocation and thus s_umount wasn't held. But now s_umount
> must be dropped before opening block devices to avoid locking
> violations.
> 
> This has consequences. The main one being that iterators over
> @super_blocks and @fs_supers that grab a temporary reference to the
> superblock can now also grab s_umount before the caller has managed to
> open block devices and called fill_super(). So whereas before such
> iterators or concurrent mounts would have simply slept on s_umount until
> SB_BORN was set or the superblock was discard due to initalization
> failure they can now needlessly spin through sget{_fc}().
> 
> If the caller is sleeping on bdev_lock or open_mutex one caller waiting
> on SB_BORN will always spin somewhere and potentially this can go on for
> quite a while.
> 
> It should be possible to drop s_umount while allowing iterators to wait
> on a nascent superblock to either be born or discarded. This patch
> implements a wait_var_event() mechanism allowing iterators to sleep
> until they are woken when the superblock is born or discarded.
> 
> This also allows us to avoid relooping through @fs_supers and
> @super_blocks if a superblock isn't yet born or dying.
> 
> Link: aca740cecbe5 ("fs: open block device after superblock creation")
> Signed-off-by: Christian Brauner <brauner@kernel.org>

Looks mostly good to me. I've spotted only a couple of nits and one
possible memory ordering issue...

> @@ -86,6 +81,94 @@ static inline void super_unlock_shared(struct super_block *sb)
>  	super_unlock(sb, false);
>  }
>  
> +static inline bool wait_born(struct super_block *sb)
> +{
> +	unsigned int flags;
> +
> +	/*
> +	 * Pairs with smp_store_release() in super_wake() and ensures
> +	 * that we see SB_BORN or SB_DYING after we're woken.
> +	 */
> +	flags = smp_load_acquire(&sb->s_flags);
> +	return flags & (SB_BORN | SB_DYING);
> +}
> +
> +/**
> + * super_lock - wait for superblock to become ready

Perhaps expand this a bit to "wait for superblock to become ready and
lock it"

> + * @sb: superblock to wait for
> + * @excl: whether exclusive access is required
> + *
> + * If the superblock has neither passed through vfs_get_tree() or
> + * generic_shutdown_super() yet wait for it to happen. Either superblock
> + * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
> + * woken and we'll see SB_DYING.
> + *
> + * The caller must have acquired a temporary reference on @sb->s_count.
> + *
> + * Return: This returns true if SB_BORN was set, false if SB_DYING was
> + *         set. The function acquires s_umount and returns with it held.
> + */
> +static bool super_lock(struct super_block *sb, bool excl)

Perhaps we can make the function __must_check? Because if you don't care
about the result you should be using __super_lock().

> +{
> +
> +	lockdep_assert_not_held(&sb->s_umount);
> +
> +relock:
> +	__super_lock(sb, excl);
> +
> +	/*
> +	 * Has gone through generic_shutdown_super() in the meantime.
> +	 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
> +	 * grab a reference to this. Tell them so.
> +	 */
> +	if (sb->s_flags & SB_DYING)
> +		return false;
> +
> +	/* Has called ->get_tree() successfully. */
> +	if (sb->s_flags & SB_BORN)
> +		return true;
> +
> +	super_unlock(sb, excl);
> +
> +	/* wait until the superblock is ready or dying */
> +	wait_var_event(&sb->s_flags, wait_born(sb));
> +
> +	/*
> +	 * Neither SB_BORN nor SB_DYING are ever unset so we never loop.
> +	 * Just reacquire @sb->s_umount for the caller.
> +	 */
> +	goto relock;
> +}
> +
> +/* wait and acquire read-side of @sb->s_umount */
> +static inline bool super_lock_shared(struct super_block *sb)
> +{
> +	return super_lock(sb, false);
> +}
> +
> +/* wait and acquire write-side of @sb->s_umount */
> +static inline bool super_lock_excl(struct super_block *sb)
> +{
> +	return super_lock(sb, true);
> +}
> +
> +/* wake waiters */
> +#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING)
> +static void super_wake(struct super_block *sb, unsigned int flag)
> +{
> +	unsigned int flags = sb->s_flags;
> +
> +	WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
> +	WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);

Maybe assert here that s_umount is held?

> +
> +	/*
> +	 * Pairs with smp_load_acquire() in super_lock() and
> +	 * ensures that @flag is set before we wake anyone.
> +	 */
> +	smp_store_release(&sb->s_flags, flags | flag);
> +	wake_up_var(&sb->s_flags);

As I'm thinking about it now, we may need at least a smp_rmb() between the
store and wake_up_var(). What I'm worried about is the following:

TASK1					TASK2
super_wake()				super_lock()
					  check s_flags, SB_BORN not set yet
  waitqueue_active() from wake_up_var()
    which got reordered by the CPU before
    smp_store_release(). This seems possible
    because release is a one-way permeable in
    this direction.
					  wait_var_event(..)
					    prepare_to_wait_event()
					    wait_born()
					      SB_BORN still not set => sleep
  smp_store_release() sets SB_BORN
  wake_up_var() does nothing because it thinks
    the waitqueue is empty.

								Honza
-- 
Jan Kara <jack@suse.com>
SUSE Labs, CR

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 4/4] super: wait until we passed kill super
  2023-08-18 10:54 ` [PATCH v2 4/4] super: wait until we passed kill super Christian Brauner
@ 2023-08-18 12:26   ` Jan Kara
  2023-08-18 12:48     ` Christian Brauner
  2023-08-25  1:48   ` kernel test robot
  1 sibling, 1 reply; 13+ messages in thread
From: Jan Kara @ 2023-08-18 12:26 UTC (permalink / raw)
  To: Christian Brauner; +Cc: Jan Kara, Christoph Hellwig, Al Viro, linux-fsdevel

On Fri 18-08-23 12:54:18, Christian Brauner wrote:
> Recent rework moved block device closing out of sb->put_super() and into
> sb->kill_sb() to avoid deadlocks as s_umount is held in put_super() and
> blkdev_put() can end up taking s_umount again.
> 
> That means we need to move the removal of the superblock from @fs_supers
> out of generic_shutdown_super() and into deactivate_locked_super() to
> ensure that concurrent mounters don't fail to open block devices that
> are still in use because blkdev_put() in sb->kill_sb() hasn't been
> called yet.
> 
> We can now do this as we can make iterators through @fs_super and
> @super_blocks wait without holding s_umount. Concurrent mounts will wait
> until a dying superblock is fully dead so until sb->kill_sb() has been
> called and SB_DEAD been set. Concurrent iterators can already discard
> any SB_DYING superblock.
> 
> Signed-off-by: Christian Brauner <brauner@kernel.org>

One nit below:

> +static inline bool wait_dead(struct super_block *sb)
> +{
> +	unsigned int flags;
> +
> +	/*
> +	 * Pairs with smp_store_release() in super_wake() and ensures
> +	 * that we see SB_DEAD after we're woken.
> +	 */
> +	flags = smp_load_acquire(&sb->s_flags);
> +	return flags & SB_DEAD;
> +}
> +
>  /**
>   * super_lock - wait for superblock to become ready
>   * @sb: superblock to wait for
> @@ -140,6 +152,33 @@ static bool super_lock(struct super_block *sb, bool excl)
>  	goto relock;
>  }
>  
> +/**
> + * super_lock_dead - wait for superblock to become ready or fully dead
> + * @sb: superblock to wait for
> + *
> + * Wait for a superblock to be SB_BORN or to be SB_DEAD. In other words,
> + * don't just wait for the superblock to be shutdown in
> + * generic_shutdown_super() but actually wait until sb->kill_sb() has
> + * finished.
> + *
> + * The caller must have acquired a temporary reference on @sb->s_count.
> + *
> + * Return: This returns true if SB_BORN was set, false if SB_DEAD was
> + *         set. The function acquires s_umount and returns with it held.
> + */
> +static bool super_lock_dead(struct super_block *sb)
> +{
> +	if (super_lock(sb, true))
> +		return true;
> +
> +	lockdep_assert_held(&sb->s_umount);
> +	super_unlock_excl(sb);
> +	/* If superblock is dying, wait for everything to be shutdown. */
> +	wait_var_event(&sb->s_flags, wait_dead(sb));
> +	__super_lock_excl(sb);
> +	return false;
> +}
> +
>  /* wait and acquire read-side of @sb->s_umount */
>  static inline bool super_lock_shared(struct super_block *sb)
>  {
> @@ -153,7 +192,7 @@ static inline bool super_lock_excl(struct super_block *sb)
>  }
>  
>  /* wake waiters */
> -#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING)
> +#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
>  static void super_wake(struct super_block *sb, unsigned int flag)
>  {
>  	unsigned int flags = sb->s_flags;
> @@ -169,6 +208,35 @@ static void super_wake(struct super_block *sb, unsigned int flag)
>  	wake_up_var(&sb->s_flags);
>  }
>  
> +/**
> + * grab_super_dead - acquire an active reference to a superblock
> + * @sb: superblock to acquire
> + *
> + * Acquire a temporary reference on a superblock and try to trade it for
> + * an active reference. This is used in sget{_fc}() to wait for a
> + * superblock to either become SB_BORN or for it to pass through
> + * sb->kill() and be marked as SB_DEAD.
> + *
> + * Return: This returns true if an active reference could be acquired,
> + *         false if not. The function acquires s_umount and returns with
> + *         it held.
> + */
> +static bool grab_super_dead(struct super_block *s) __releases(sb_lock)
> +{
> +	bool born;
> +
> +	s->s_count++;
> +	spin_unlock(&sb_lock);
> +	born = super_lock_dead(s);
> +	if (born && atomic_inc_not_zero(&s->s_active)) {
> +		put_super(s);
> +		return true;
> +	}
> +	up_write(&s->s_umount);
> +	put_super(s);
> +	return false;
> +}
> +

As I'm looking at it now, I'm wondering whether we are not overdoing it a
bit. Why not implement grab_super_dead() as:

static bool grab_super_dead(struct super_block *s) __releases(sb_lock)
{
	s->s_count++;
	if (grab_super(s))
		return true;
	wait_var_event(&sb->s_flags, wait_dead(sb));
	put_super(s);
	return false;
}

And just remove super_lock_dead() altogether? I don't expect more users of
that functionality...

								Honza
-- 
Jan Kara <jack@suse.com>
SUSE Labs, CR

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] super: wait for nascent superblocks
  2023-08-18 12:02   ` Jan Kara
@ 2023-08-18 12:46     ` Christian Brauner
  2023-08-18 13:06       ` Jan Kara
  0 siblings, 1 reply; 13+ messages in thread
From: Christian Brauner @ 2023-08-18 12:46 UTC (permalink / raw)
  To: Jan Kara; +Cc: Christoph Hellwig, Al Viro, linux-fsdevel

On Fri, Aug 18, 2023 at 02:02:15PM +0200, Jan Kara wrote:
> On Fri 18-08-23 12:54:17, Christian Brauner wrote:
> > Recent patches experiment with making it possible to allocate a new
> > superblock before opening the relevant block device. Naturally this has
> > intricate side-effects that we get to learn about while developing this.
> > 
> > Superblock allocators such as sget{_fc}() return with s_umount of the
> > new superblock held and lock ordering currently requires that block
> > level locks such as bdev_lock and open_mutex rank above s_umount.
> > 
> > Before aca740cecbe5 ("fs: open block device after superblock creation")
> > ordering was guaranteed to be correct as block devices were opened prior
> > to superblock allocation and thus s_umount wasn't held. But now s_umount
> > must be dropped before opening block devices to avoid locking
> > violations.
> > 
> > This has consequences. The main one being that iterators over
> > @super_blocks and @fs_supers that grab a temporary reference to the
> > superblock can now also grab s_umount before the caller has managed to
> > open block devices and called fill_super(). So whereas before such
> > iterators or concurrent mounts would have simply slept on s_umount until
> > SB_BORN was set or the superblock was discard due to initalization
> > failure they can now needlessly spin through sget{_fc}().
> > 
> > If the caller is sleeping on bdev_lock or open_mutex one caller waiting
> > on SB_BORN will always spin somewhere and potentially this can go on for
> > quite a while.
> > 
> > It should be possible to drop s_umount while allowing iterators to wait
> > on a nascent superblock to either be born or discarded. This patch
> > implements a wait_var_event() mechanism allowing iterators to sleep
> > until they are woken when the superblock is born or discarded.
> > 
> > This also allows us to avoid relooping through @fs_supers and
> > @super_blocks if a superblock isn't yet born or dying.
> > 
> > Link: aca740cecbe5 ("fs: open block device after superblock creation")
> > Signed-off-by: Christian Brauner <brauner@kernel.org>
> 
> Looks mostly good to me. I've spotted only a couple of nits and one
> possible memory ordering issue...
> 
> > @@ -86,6 +81,94 @@ static inline void super_unlock_shared(struct super_block *sb)
> >  	super_unlock(sb, false);
> >  }
> >  
> > +static inline bool wait_born(struct super_block *sb)
> > +{
> > +	unsigned int flags;
> > +
> > +	/*
> > +	 * Pairs with smp_store_release() in super_wake() and ensures
> > +	 * that we see SB_BORN or SB_DYING after we're woken.
> > +	 */
> > +	flags = smp_load_acquire(&sb->s_flags);
> > +	return flags & (SB_BORN | SB_DYING);
> > +}
> > +
> > +/**
> > + * super_lock - wait for superblock to become ready
> 
> Perhaps expand this a bit to "wait for superblock to become ready and
> lock it"

Ok.

> 
> > + * @sb: superblock to wait for
> > + * @excl: whether exclusive access is required
> > + *
> > + * If the superblock has neither passed through vfs_get_tree() or
> > + * generic_shutdown_super() yet wait for it to happen. Either superblock
> > + * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
> > + * woken and we'll see SB_DYING.
> > + *
> > + * The caller must have acquired a temporary reference on @sb->s_count.
> > + *
> > + * Return: This returns true if SB_BORN was set, false if SB_DYING was
> > + *         set. The function acquires s_umount and returns with it held.
> > + */
> > +static bool super_lock(struct super_block *sb, bool excl)
> 
> Perhaps we can make the function __must_check? Because if you don't care
> about the result you should be using __super_lock().

Ok.

> 
> > +{
> > +
> > +	lockdep_assert_not_held(&sb->s_umount);
> > +
> > +relock:
> > +	__super_lock(sb, excl);
> > +
> > +	/*
> > +	 * Has gone through generic_shutdown_super() in the meantime.
> > +	 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
> > +	 * grab a reference to this. Tell them so.
> > +	 */
> > +	if (sb->s_flags & SB_DYING)
> > +		return false;
> > +
> > +	/* Has called ->get_tree() successfully. */
> > +	if (sb->s_flags & SB_BORN)
> > +		return true;
> > +
> > +	super_unlock(sb, excl);
> > +
> > +	/* wait until the superblock is ready or dying */
> > +	wait_var_event(&sb->s_flags, wait_born(sb));
> > +
> > +	/*
> > +	 * Neither SB_BORN nor SB_DYING are ever unset so we never loop.
> > +	 * Just reacquire @sb->s_umount for the caller.
> > +	 */
> > +	goto relock;
> > +}
> > +
> > +/* wait and acquire read-side of @sb->s_umount */
> > +static inline bool super_lock_shared(struct super_block *sb)
> > +{
> > +	return super_lock(sb, false);
> > +}
> > +
> > +/* wait and acquire write-side of @sb->s_umount */
> > +static inline bool super_lock_excl(struct super_block *sb)
> > +{
> > +	return super_lock(sb, true);
> > +}
> > +
> > +/* wake waiters */
> > +#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING)
> > +static void super_wake(struct super_block *sb, unsigned int flag)
> > +{
> > +	unsigned int flags = sb->s_flags;
> > +
> > +	WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
> > +	WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
> 
> Maybe assert here that s_umount is held?

I think that should be asserted in callers because we don't hold it when
we do wake SB_DEAD in deactivate_locked_super() because we don't have or
need it.

> 
> > +
> > +	/*
> > +	 * Pairs with smp_load_acquire() in super_lock() and
> > +	 * ensures that @flag is set before we wake anyone.
> > +	 */
> > +	smp_store_release(&sb->s_flags, flags | flag);
> > +	wake_up_var(&sb->s_flags);
> 
> As I'm thinking about it now, we may need at least a smp_rmb() between the
> store and wake_up_var(). What I'm worried about is the following:
> 
> TASK1					TASK2
> super_wake()				super_lock()
> 					  check s_flags, SB_BORN not set yet
>   waitqueue_active() from wake_up_var()
>     which got reordered by the CPU before
>     smp_store_release(). This seems possible
>     because release is a one-way permeable in
>     this direction.
> 					  wait_var_event(..)
> 					    prepare_to_wait_event()
> 					    wait_born()
> 					      SB_BORN still not set => sleep
>   smp_store_release() sets SB_BORN
>   wake_up_var() does nothing because it thinks
>     the waitqueue is empty.

Then I propse we use smp_mb() here similar to what we do for __I_NEW.
Does that sounds ok?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 4/4] super: wait until we passed kill super
  2023-08-18 12:26   ` Jan Kara
@ 2023-08-18 12:48     ` Christian Brauner
  0 siblings, 0 replies; 13+ messages in thread
From: Christian Brauner @ 2023-08-18 12:48 UTC (permalink / raw)
  To: Jan Kara; +Cc: Christoph Hellwig, Al Viro, linux-fsdevel

On Fri, Aug 18, 2023 at 02:26:09PM +0200, Jan Kara wrote:
> On Fri 18-08-23 12:54:18, Christian Brauner wrote:
> > Recent rework moved block device closing out of sb->put_super() and into
> > sb->kill_sb() to avoid deadlocks as s_umount is held in put_super() and
> > blkdev_put() can end up taking s_umount again.
> > 
> > That means we need to move the removal of the superblock from @fs_supers
> > out of generic_shutdown_super() and into deactivate_locked_super() to
> > ensure that concurrent mounters don't fail to open block devices that
> > are still in use because blkdev_put() in sb->kill_sb() hasn't been
> > called yet.
> > 
> > We can now do this as we can make iterators through @fs_super and
> > @super_blocks wait without holding s_umount. Concurrent mounts will wait
> > until a dying superblock is fully dead so until sb->kill_sb() has been
> > called and SB_DEAD been set. Concurrent iterators can already discard
> > any SB_DYING superblock.
> > 
> > Signed-off-by: Christian Brauner <brauner@kernel.org>
> 
> One nit below:
> 
> > +static inline bool wait_dead(struct super_block *sb)
> > +{
> > +	unsigned int flags;
> > +
> > +	/*
> > +	 * Pairs with smp_store_release() in super_wake() and ensures
> > +	 * that we see SB_DEAD after we're woken.
> > +	 */
> > +	flags = smp_load_acquire(&sb->s_flags);
> > +	return flags & SB_DEAD;
> > +}
> > +
> >  /**
> >   * super_lock - wait for superblock to become ready
> >   * @sb: superblock to wait for
> > @@ -140,6 +152,33 @@ static bool super_lock(struct super_block *sb, bool excl)
> >  	goto relock;
> >  }
> >  
> > +/**
> > + * super_lock_dead - wait for superblock to become ready or fully dead
> > + * @sb: superblock to wait for
> > + *
> > + * Wait for a superblock to be SB_BORN or to be SB_DEAD. In other words,
> > + * don't just wait for the superblock to be shutdown in
> > + * generic_shutdown_super() but actually wait until sb->kill_sb() has
> > + * finished.
> > + *
> > + * The caller must have acquired a temporary reference on @sb->s_count.
> > + *
> > + * Return: This returns true if SB_BORN was set, false if SB_DEAD was
> > + *         set. The function acquires s_umount and returns with it held.
> > + */
> > +static bool super_lock_dead(struct super_block *sb)
> > +{
> > +	if (super_lock(sb, true))
> > +		return true;
> > +
> > +	lockdep_assert_held(&sb->s_umount);
> > +	super_unlock_excl(sb);
> > +	/* If superblock is dying, wait for everything to be shutdown. */
> > +	wait_var_event(&sb->s_flags, wait_dead(sb));
> > +	__super_lock_excl(sb);
> > +	return false;
> > +}
> > +
> >  /* wait and acquire read-side of @sb->s_umount */
> >  static inline bool super_lock_shared(struct super_block *sb)
> >  {
> > @@ -153,7 +192,7 @@ static inline bool super_lock_excl(struct super_block *sb)
> >  }
> >  
> >  /* wake waiters */
> > -#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING)
> > +#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
> >  static void super_wake(struct super_block *sb, unsigned int flag)
> >  {
> >  	unsigned int flags = sb->s_flags;
> > @@ -169,6 +208,35 @@ static void super_wake(struct super_block *sb, unsigned int flag)
> >  	wake_up_var(&sb->s_flags);
> >  }
> >  
> > +/**
> > + * grab_super_dead - acquire an active reference to a superblock
> > + * @sb: superblock to acquire
> > + *
> > + * Acquire a temporary reference on a superblock and try to trade it for
> > + * an active reference. This is used in sget{_fc}() to wait for a
> > + * superblock to either become SB_BORN or for it to pass through
> > + * sb->kill() and be marked as SB_DEAD.
> > + *
> > + * Return: This returns true if an active reference could be acquired,
> > + *         false if not. The function acquires s_umount and returns with
> > + *         it held.
> > + */
> > +static bool grab_super_dead(struct super_block *s) __releases(sb_lock)
> > +{
> > +	bool born;
> > +
> > +	s->s_count++;
> > +	spin_unlock(&sb_lock);
> > +	born = super_lock_dead(s);
> > +	if (born && atomic_inc_not_zero(&s->s_active)) {
> > +		put_super(s);
> > +		return true;
> > +	}
> > +	up_write(&s->s_umount);
> > +	put_super(s);
> > +	return false;
> > +}
> > +
> 
> As I'm looking at it now, I'm wondering whether we are not overdoing it a
> bit. Why not implement grab_super_dead() as:
> 
> static bool grab_super_dead(struct super_block *s) __releases(sb_lock)
> {
> 	s->s_count++;
> 	if (grab_super(s))
> 		return true;
> 	wait_var_event(&sb->s_flags, wait_dead(sb));
> 	put_super(s);
> 	return false;
> }

Sounds good. Thanks for the suggestion.

> 
> And just remove super_lock_dead() altogether? I don't expect more users of
> that functionality...

Famous last words... :)

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] super: wait for nascent superblocks
  2023-08-18 12:46     ` Christian Brauner
@ 2023-08-18 13:06       ` Jan Kara
  0 siblings, 0 replies; 13+ messages in thread
From: Jan Kara @ 2023-08-18 13:06 UTC (permalink / raw)
  To: Christian Brauner; +Cc: Jan Kara, Christoph Hellwig, Al Viro, linux-fsdevel

On Fri 18-08-23 14:46:57, Christian Brauner wrote:
> On Fri, Aug 18, 2023 at 02:02:15PM +0200, Jan Kara wrote:
> > On Fri 18-08-23 12:54:17, Christian Brauner wrote:
> > > Recent patches experiment with making it possible to allocate a new
> > > superblock before opening the relevant block device. Naturally this has
> > > intricate side-effects that we get to learn about while developing this.
> > > 
> > > Superblock allocators such as sget{_fc}() return with s_umount of the
> > > new superblock held and lock ordering currently requires that block
> > > level locks such as bdev_lock and open_mutex rank above s_umount.
> > > 
> > > Before aca740cecbe5 ("fs: open block device after superblock creation")
> > > ordering was guaranteed to be correct as block devices were opened prior
> > > to superblock allocation and thus s_umount wasn't held. But now s_umount
> > > must be dropped before opening block devices to avoid locking
> > > violations.
> > > 
> > > This has consequences. The main one being that iterators over
> > > @super_blocks and @fs_supers that grab a temporary reference to the
> > > superblock can now also grab s_umount before the caller has managed to
> > > open block devices and called fill_super(). So whereas before such
> > > iterators or concurrent mounts would have simply slept on s_umount until
> > > SB_BORN was set or the superblock was discard due to initalization
> > > failure they can now needlessly spin through sget{_fc}().
> > > 
> > > If the caller is sleeping on bdev_lock or open_mutex one caller waiting
> > > on SB_BORN will always spin somewhere and potentially this can go on for
> > > quite a while.
> > > 
> > > It should be possible to drop s_umount while allowing iterators to wait
> > > on a nascent superblock to either be born or discarded. This patch
> > > implements a wait_var_event() mechanism allowing iterators to sleep
> > > until they are woken when the superblock is born or discarded.
> > > 
> > > This also allows us to avoid relooping through @fs_supers and
> > > @super_blocks if a superblock isn't yet born or dying.
> > > 
> > > Link: aca740cecbe5 ("fs: open block device after superblock creation")
> > > Signed-off-by: Christian Brauner <brauner@kernel.org>
> > 
> > Looks mostly good to me. I've spotted only a couple of nits and one
> > possible memory ordering issue...
...
> > > +/* wait and acquire read-side of @sb->s_umount */
> > > +static inline bool super_lock_shared(struct super_block *sb)
> > > +{
> > > +	return super_lock(sb, false);
> > > +}
> > > +
> > > +/* wait and acquire write-side of @sb->s_umount */
> > > +static inline bool super_lock_excl(struct super_block *sb)
> > > +{
> > > +	return super_lock(sb, true);
> > > +}
> > > +
> > > +/* wake waiters */
> > > +#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING)
> > > +static void super_wake(struct super_block *sb, unsigned int flag)
> > > +{
> > > +	unsigned int flags = sb->s_flags;
> > > +
> > > +	WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
> > > +	WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
> > 
> > Maybe assert here that s_umount is held?
> 
> I think that should be asserted in callers because we don't hold it when
> we do wake SB_DEAD in deactivate_locked_super() because we don't have or
> need it.

Right, I've realized that after I've sent this.

> > > +
> > > +	/*
> > > +	 * Pairs with smp_load_acquire() in super_lock() and
> > > +	 * ensures that @flag is set before we wake anyone.
> > > +	 */
> > > +	smp_store_release(&sb->s_flags, flags | flag);
> > > +	wake_up_var(&sb->s_flags);
> > 
> > As I'm thinking about it now, we may need at least a smp_rmb() between the
> > store and wake_up_var(). What I'm worried about is the following:
> > 
> > TASK1					TASK2
> > super_wake()				super_lock()
> > 					  check s_flags, SB_BORN not set yet
> >   waitqueue_active() from wake_up_var()
> >     which got reordered by the CPU before
> >     smp_store_release(). This seems possible
> >     because release is a one-way permeable in
> >     this direction.
> > 					  wait_var_event(..)
> > 					    prepare_to_wait_event()
> > 					    wait_born()
> > 					      SB_BORN still not set => sleep
> >   smp_store_release() sets SB_BORN
> >   wake_up_var() does nothing because it thinks
> >     the waitqueue is empty.
> 
> Then I propse we use smp_mb() here similar to what we do for __I_NEW.
> Does that sounds ok?

Sure, fine by me.

								Honza

-- 
Jan Kara <jack@suse.com>
SUSE Labs, CR

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 4/4] super: wait until we passed kill super
  2023-08-18 10:54 ` [PATCH v2 4/4] super: wait until we passed kill super Christian Brauner
  2023-08-18 12:26   ` Jan Kara
@ 2023-08-25  1:48   ` kernel test robot
  1 sibling, 0 replies; 13+ messages in thread
From: kernel test robot @ 2023-08-25  1:48 UTC (permalink / raw)
  To: Christian Brauner; +Cc: oe-kbuild-all

Hi Christian,

kernel test robot noticed the following build warnings:

[auto build test WARNING on f3aeab61fb15edef1e81828da8dbf0814541e49b]

url:    https://github.com/intel-lab-lkp/linux/commits/Christian-Brauner/super-use-locking-helpers/20230821-103401
base:   f3aeab61fb15edef1e81828da8dbf0814541e49b
patch link:    https://lore.kernel.org/r/20230818-vfs-super-fixes-v3-v2-4-cdab45934983%40kernel.org
patch subject: [PATCH v2 4/4] super: wait until we passed kill super
config: x86_64-defconfig (https://download.01.org/0day-ci/archive/20230825/202308250920.J7SzhuBk-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-12) 11.3.0
reproduce: (https://download.01.org/0day-ci/archive/20230825/202308250920.J7SzhuBk-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202308250920.J7SzhuBk-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> fs/super.c:225: warning: Function parameter or member 's' not described in 'grab_super_dead'
>> fs/super.c:225: warning: Excess function parameter 'sb' description in 'grab_super_dead'


vim +225 fs/super.c

   210	
   211	/**
   212	 * grab_super_dead - acquire an active reference to a superblock
   213	 * @sb: superblock to acquire
   214	 *
   215	 * Acquire a temporary reference on a superblock and try to trade it for
   216	 * an active reference. This is used in sget{_fc}() to wait for a
   217	 * superblock to either become SB_BORN or for it to pass through
   218	 * sb->kill() and be marked as SB_DEAD.
   219	 *
   220	 * Return: This returns true if an active reference could be acquired,
   221	 *         false if not. The function acquires s_umount and returns with
   222	 *         it held.
   223	 */
   224	static bool grab_super_dead(struct super_block *s) __releases(sb_lock)
 > 225	{
   226		bool born;
   227	
   228		s->s_count++;
   229		spin_unlock(&sb_lock);
   230		born = super_lock_dead(s);
   231		if (born && atomic_inc_not_zero(&s->s_active)) {
   232			put_super(s);
   233			return true;
   234		}
   235		up_write(&s->s_umount);
   236		put_super(s);
   237		return false;
   238	}
   239	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2023-08-25  1:48 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-18 10:54 [PATCH v2 0/4] super: allow waiting without s_umount held Christian Brauner
2023-08-18 10:54 ` [PATCH v2 1/4] super: use locking helpers Christian Brauner
2023-08-18 11:08   ` Jan Kara
2023-08-18 10:54 ` [PATCH v2 2/4] super: make locking naming consistent Christian Brauner
2023-08-18 11:08   ` Jan Kara
2023-08-18 10:54 ` [PATCH v2 3/4] super: wait for nascent superblocks Christian Brauner
2023-08-18 12:02   ` Jan Kara
2023-08-18 12:46     ` Christian Brauner
2023-08-18 13:06       ` Jan Kara
2023-08-18 10:54 ` [PATCH v2 4/4] super: wait until we passed kill super Christian Brauner
2023-08-18 12:26   ` Jan Kara
2023-08-18 12:48     ` Christian Brauner
2023-08-25  1:48   ` kernel test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.