[v3,1/2] tmpfs: Add per-superblock i_ino support
diff mbox series

Message ID 19ff8eddfe9cbafc87e55949189704f31d123172.1578072481.git.chris@chrisdown.name
State Superseded
Headers show
Series
  • fs: inode: shmem: Reduce risk of inum overflow
Related show

Commit Message

Chris Down Jan. 3, 2020, 5:30 p.m. UTC
get_next_ino has a number of problems:

- It uses and returns a uint, which is susceptible to become overflowed
  if a lot of volatile inodes that use get_next_ino are created.
- It's global, with no specificity per-sb or even per-filesystem. This
  means it's not that difficult to cause inode number wraparounds on a
  single device, which can result in having multiple distinct inodes
  with the same inode number.

This patch adds a per-superblock counter that mitigates the second case.
This design also allows us to later have a specific i_ino size
per-device, for example, allowing users to choose whether to use 32- or
64-bit inodes for each tmpfs mount. This is implemented in the next
commit.

Signed-off-by: Chris Down <chris@chrisdown.name>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Amir Goldstein <amir73il@gmail.com>
Cc: Jeff Layton <jlayton@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: linux-fsdevel@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: kernel-team@fb.com
---
 include/linux/shmem_fs.h |  1 +
 mm/shmem.c               | 33 ++++++++++++++++++++++++++++++++-
 2 files changed, 33 insertions(+), 1 deletion(-)

Comments

Amir Goldstein Jan. 4, 2020, 7:09 p.m. UTC | #1
On Fri, Jan 3, 2020 at 7:30 PM Chris Down <chris@chrisdown.name> wrote:
>
> get_next_ino has a number of problems:
>
> - It uses and returns a uint, which is susceptible to become overflowed
>   if a lot of volatile inodes that use get_next_ino are created.
> - It's global, with no specificity per-sb or even per-filesystem. This
>   means it's not that difficult to cause inode number wraparounds on a
>   single device, which can result in having multiple distinct inodes
>   with the same inode number.
>
> This patch adds a per-superblock counter that mitigates the second case.
> This design also allows us to later have a specific i_ino size
> per-device, for example, allowing users to choose whether to use 32- or
> 64-bit inodes for each tmpfs mount. This is implemented in the next
> commit.
>
> Signed-off-by: Chris Down <chris@chrisdown.name>
> Cc: Al Viro <viro@zeniv.linux.org.uk>
> Cc: Matthew Wilcox <willy@infradead.org>
> Cc: Amir Goldstein <amir73il@gmail.com>
> Cc: Jeff Layton <jlayton@kernel.org>
> Cc: Johannes Weiner <hannes@cmpxchg.org>
> Cc: Tejun Heo <tj@kernel.org>
> Cc: linux-fsdevel@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org
> Cc: kernel-team@fb.com
> ---

Some nits. When fixed you may add:
Reviewed-by: Amir Goldstein <amir73il@gmail.com>


>  include/linux/shmem_fs.h |  1 +
>  mm/shmem.c               | 33 ++++++++++++++++++++++++++++++++-
>  2 files changed, 33 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
> index de8e4b71e3ba..7fac91f490dc 100644
> --- a/include/linux/shmem_fs.h
> +++ b/include/linux/shmem_fs.h
> @@ -35,6 +35,7 @@ struct shmem_sb_info {
>         unsigned char huge;         /* Whether to try for hugepages */
>         kuid_t uid;                 /* Mount uid for root directory */
>         kgid_t gid;                 /* Mount gid for root directory */
> +       ino_t next_ino;             /* The next per-sb inode number to use */
>         struct mempolicy *mpol;     /* default memory policy for mappings */
>         spinlock_t shrinklist_lock;   /* Protects shrinklist */
>         struct list_head shrinklist;  /* List of shinkable inodes */
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 8793e8cc1a48..638b1e30625f 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -2236,6 +2236,15 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
>         return 0;
>  }
>
> +/*
> + * shmem_get_inode - reserve, allocate, and initialise a new inode
> + *
> + * If SB_KERNMOUNT, we use the per-sb inode allocator to avoid wraparound.
> + * Otherwise, we use get_next_ino, which is global.

Its the other way around.

> + *
> + * If max_inodes is greater than 0 (ie. non-SB_KERNMOUNT), we may have to grab
> + * the per-sb stat_lock.

It's not a "may" it's for sure, but I don't see what this comment adds
in this context.
The comment about stat_lock below seems enough to me.

> + */
>  static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
>                                      umode_t mode, dev_t dev, unsigned long flags)
>  {
> @@ -2248,7 +2257,28 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
>
>         inode = new_inode(sb);
>         if (inode) {
> -               inode->i_ino = get_next_ino();
> +               if (sb->s_flags & SB_KERNMOUNT) {
> +                       /*
> +                        * __shmem_file_setup, one of our callers, is lock-free:
> +                        * it doesn't hold stat_lock in shmem_reserve_inode
> +                        * since max_inodes is always 0, and is called from
> +                        * potentially unknown contexts. As such, use the global
> +                        * allocator which doesn't require the per-sb
.
> +                        */
> +                       inode->i_ino = get_next_ino();
> +               } else {
> +                       spin_lock(&sbinfo->stat_lock);
> +                       if (unlikely(sbinfo->next_ino > UINT_MAX)) {
> +                               /*
> +                                * Emulate get_next_ino uint wraparound for
> +                                * compatibility
> +                                */
> +                               sbinfo->next_ino = 1;
> +                       }
> +                       inode->i_ino = sbinfo->next_ino++;
> +                       spin_unlock(&sbinfo->stat_lock);
> +               }
> +
>                 inode_init_owner(inode, dir, mode);
>                 inode->i_blocks = 0;
>                 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
> @@ -3662,6 +3692,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
>  #else
>         sb->s_flags |= SB_NOUSER;
>  #endif
> +       sbinfo->next_ino = 1;
>         sbinfo->max_blocks = ctx->blocks;
>         sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
>         sbinfo->uid = ctx->uid;
> --
> 2.24.1
>
Chris Down Jan. 5, 2020, 11:28 a.m. UTC | #2
Amir Goldstein writes:
>Some nits. When fixed you may add:
>Reviewed-by: Amir Goldstein <amir73il@gmail.com>

Thanks! I'll fix these and comments on the other patches and send v4 here and 
with linux-mm/tmpfs maintainer on cc. :-)

Patch
diff mbox series

diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index de8e4b71e3ba..7fac91f490dc 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -35,6 +35,7 @@  struct shmem_sb_info {
 	unsigned char huge;	    /* Whether to try for hugepages */
 	kuid_t uid;		    /* Mount uid for root directory */
 	kgid_t gid;		    /* Mount gid for root directory */
+	ino_t next_ino;		    /* The next per-sb inode number to use */
 	struct mempolicy *mpol;     /* default memory policy for mappings */
 	spinlock_t shrinklist_lock;   /* Protects shrinklist */
 	struct list_head shrinklist;  /* List of shinkable inodes */
diff --git a/mm/shmem.c b/mm/shmem.c
index 8793e8cc1a48..638b1e30625f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2236,6 +2236,15 @@  static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
 	return 0;
 }
 
+/*
+ * shmem_get_inode - reserve, allocate, and initialise a new inode
+ *
+ * If SB_KERNMOUNT, we use the per-sb inode allocator to avoid wraparound.
+ * Otherwise, we use get_next_ino, which is global.
+ *
+ * If max_inodes is greater than 0 (ie. non-SB_KERNMOUNT), we may have to grab
+ * the per-sb stat_lock.
+ */
 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
 				     umode_t mode, dev_t dev, unsigned long flags)
 {
@@ -2248,7 +2257,28 @@  static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
 
 	inode = new_inode(sb);
 	if (inode) {
-		inode->i_ino = get_next_ino();
+		if (sb->s_flags & SB_KERNMOUNT) {
+			/*
+			 * __shmem_file_setup, one of our callers, is lock-free:
+			 * it doesn't hold stat_lock in shmem_reserve_inode
+			 * since max_inodes is always 0, and is called from
+			 * potentially unknown contexts. As such, use the global
+			 * allocator which doesn't require the per-sb stat_lock.
+			 */
+			inode->i_ino = get_next_ino();
+		} else {
+			spin_lock(&sbinfo->stat_lock);
+			if (unlikely(sbinfo->next_ino > UINT_MAX)) {
+				/*
+				 * Emulate get_next_ino uint wraparound for
+				 * compatibility
+				 */
+				sbinfo->next_ino = 1;
+			}
+			inode->i_ino = sbinfo->next_ino++;
+			spin_unlock(&sbinfo->stat_lock);
+		}
+
 		inode_init_owner(inode, dir, mode);
 		inode->i_blocks = 0;
 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
@@ -3662,6 +3692,7 @@  static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
 #else
 	sb->s_flags |= SB_NOUSER;
 #endif
+	sbinfo->next_ino = 1;
 	sbinfo->max_blocks = ctx->blocks;
 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
 	sbinfo->uid = ctx->uid;