linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Carlos Maiolino <cem@kernel.org>
To: Jan Kara <jack@suse.cz>
Cc: hughd@google.com, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org, djwong@kernel.org
Subject: Re: [PATCH 5/6] shmem: quota support
Date: Tue, 11 Apr 2023 11:37:26 +0200	[thread overview]
Message-ID: <20230411093726.ry3e6espmocvwq6f@andromeda> (raw)
In-Reply-To: <20230405114245.nnzorjm5nlr4l4g6@quack3>

On Wed, Apr 05, 2023 at 01:42:45PM +0200, Jan Kara wrote:
> On Mon 03-04-23 10:47:58, cem@kernel.org wrote:
> > From: Lukas Czerner <lczerner@redhat.com>
> >
> > Now the basic infra-structure is in place, enable quota support for tmpfs.
> >
> > Signed-off-by: Lukas Czerner <lczerner@redhat.com>
> > Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com>
> 
> Some comments below...
> 
> > diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
> > index cf38381bdb4c1..3e7e18726feb5 100644
> > --- a/include/linux/shmem_fs.h
> > +++ b/include/linux/shmem_fs.h
> > @@ -26,6 +26,9 @@ struct shmem_inode_info {
> >  	atomic_t		stop_eviction;	/* hold when working on inode */
> >  	struct timespec64	i_crtime;	/* file creation time */
> >  	unsigned int		fsflags;	/* flags for FS_IOC_[SG]ETFLAGS */
> > +#ifdef CONFIG_TMPFS_QUOTA
> > +	struct dquot		*i_dquot[MAXQUOTAS];
> > +#endif
> >  	struct inode		vfs_inode;
> >  };
> >
> > @@ -171,4 +174,10 @@ extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
> >  #define SHMEM_QUOTA_MAX_SPC_LIMIT 0x7fffffffffffffffLL /* 2^63-1 */
> >  #define SHMEM_QUOTA_MAX_INO_LIMIT 0x7fffffffffffffffLL
> >
> > +#ifdef CONFIG_TMPFS_QUOTA
> > +#define SHMEM_MAXQUOTAS 2
> 
> You have this definition already in mm/shmem_quota.c.
> 

True, that define is not visible from here though, I'll simply remove the one
from mm/shmem_quota.c and keep it here in shmem_fs.h.


> > +extern const struct dquot_operations shmem_quota_operations;
> > +extern struct quota_format_type shmem_quota_format;
> > +#endif /* CONFIG_TMPFS_QUOTA */
> > +
> >  #endif
> > diff --git a/mm/shmem.c b/mm/shmem.c
> > index 88e13930fc013..d7529c883eaf5 100644
> > --- a/mm/shmem.c
> > +++ b/mm/shmem.c
> > @@ -79,6 +79,7 @@ static struct vfsmount *shm_mnt;
> >  #include <linux/userfaultfd_k.h>
> >  #include <linux/rmap.h>
> >  #include <linux/uuid.h>
> > +#include <linux/quotaops.h>
> >
> >  #include <linux/uaccess.h>
> >
> > @@ -116,10 +117,12 @@ struct shmem_options {
> >  	bool full_inums;
> >  	int huge;
> >  	int seen;
> > +	unsigned short quota_types;
> >  #define SHMEM_SEEN_BLOCKS 1
> >  #define SHMEM_SEEN_INODES 2
> >  #define SHMEM_SEEN_HUGE 4
> >  #define SHMEM_SEEN_INUMS 8
> > +#define SHMEM_SEEN_QUOTA 16
> >  };
> >
> >  #ifdef CONFIG_TMPFS
> > @@ -211,8 +214,11 @@ static inline int shmem_inode_acct_block(struct inode *inode, long pages)
> >  		if (percpu_counter_compare(&sbinfo->used_blocks,
> >  					   sbinfo->max_blocks - pages) > 0)
> >  			goto unacct;
> > +		if ((err = dquot_alloc_block_nodirty(inode, pages)) != 0)
> > +			goto unacct;
> 
> We generally try to avoid assignments in conditions so I'd do:
> 
> 		err = dquot_alloc_block_nodirty(inode, pages);
> 		if (err)
> 			goto unacct;

Fair enough. Will update it for the new version
> 
> >  		percpu_counter_add(&sbinfo->used_blocks, pages);
> > -	}
> > +	} else if ((err = dquot_alloc_block_nodirty(inode, pages)) != 0)
> > +		goto unacct;
> >
> 
> The same here...
> 
> > @@ -1133,6 +1179,15 @@ static int shmem_setattr(struct mnt_idmap *idmap,
> >  		}
> >  	}
> >
> > +	/* Transfer quota accounting */
> > +	if (i_uid_needs_update(idmap, attr, inode) ||
> > +	    i_gid_needs_update(idmap, attr,inode)) {
> > +		error = dquot_transfer(idmap, inode, attr);
> > +
> > +		if (error)
> > +			return error;
> > +	}
> > +
> 
> I think you also need to add:
> 
>         if (is_quota_modification(idmap, inode, attr)) {
>                 error = dquot_initialize(inode);
>                 if (error)
>                         return error;
>         }
> 
> to shmem_setattr().

Ok.

> 
> >  	setattr_copy(idmap, inode, attr);
> >  	if (attr->ia_valid & ATTR_MODE)
> >  		error = posix_acl_chmod(idmap, dentry, inode->i_mode);
> > @@ -1178,7 +1233,9 @@ static void shmem_evict_inode(struct inode *inode)
> >  	simple_xattrs_free(&info->xattrs);
> >  	WARN_ON(inode->i_blocks);
> >  	shmem_free_inode(inode->i_sb);
> > +	dquot_free_inode(inode);
> >  	clear_inode(inode);
> > +	dquot_drop(inode);
> >  }
> >
> >  static int shmem_find_swap_entries(struct address_space *mapping,
> > @@ -1975,7 +2032,6 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
> >
> >  	spin_lock_irq(&info->lock);
> >  	info->alloced += folio_nr_pages(folio);
> > -	inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
> >  	shmem_recalc_inode(inode);
> >  	spin_unlock_irq(&info->lock);
> >  	alloced = true;
> > @@ -2346,9 +2402,10 @@ static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
> >  #define shmem_initxattrs NULL
> >  #endif
> >
> > -static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb,
> > -				     struct inode *dir, umode_t mode, dev_t dev,
> > -				     unsigned long flags)
> > +static struct inode *shmem_get_inode_noquota(struct mnt_idmap *idmap,
> > +					     struct super_block *sb,
> > +					     struct inode *dir, umode_t mode,
> > +					     dev_t dev, unsigned long flags)
> >  {
> >  	struct inode *inode;
> >  	struct shmem_inode_info *info;
> > @@ -2422,6 +2479,37 @@ static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block
> >  	return inode;
> >  }
> >
> > +static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
> > +				     struct super_block *sb, struct inode *dir,
> > +				     umode_t mode, dev_t dev, unsigned long flags)
> > +{
> > +	int err;
> > +	struct inode *inode;
> > +
> > +	inode = shmem_get_inode_noquota(idmap, sb, dir, mode, dev, flags);
> > +	if (IS_ERR(inode))
> > +		return inode;
> > +
> > +	err = dquot_initialize(inode);
> > +	if (err)
> > +		goto errout;
> > +
> > +	err = dquot_alloc_inode(inode);
> > +	if (err) {
> > +		dquot_drop(inode);
> > +		goto errout;
> > +	}
> > +	return inode;
> > +
> > +errout:
> > +	inode->i_flags |= S_NOQUOTA;
> > +	iput(inode);
> > +	shmem_free_inode(sb);
> 
> I think shmem_free_inode() is superfluous here. iput() above should already
> unaccount the inode...

Right, I see it can be called from .evict_inode during iput_final(). Thanks for
spotting it.

> 
> > +	if (err)
> 
> How could err be possibly unset here?

I don't think it can, I'll update it.

> 
> > +		return ERR_PTR(err);
> > +	return NULL;
> > +}
> > +
> 
> > @@ -3582,6 +3679,18 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
> >  		ctx->full_inums = true;
> >  		ctx->seen |= SHMEM_SEEN_INUMS;
> >  		break;
> > +	case Opt_quota:
> > +		ctx->seen |= SHMEM_SEEN_QUOTA;
> > +		ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
> > +		break;
> > +	case Opt_usrquota:
> > +		ctx->seen |= SHMEM_SEEN_QUOTA;
> > +		ctx->quota_types |= QTYPE_MASK_USR;
> > +		break;
> > +	case Opt_grpquota:
> > +		ctx->seen |= SHMEM_SEEN_QUOTA;
> > +		ctx->quota_types |= QTYPE_MASK_GRP;
> > +		break;
> >  	}
> >  	return 0;
> >
> > @@ -3681,6 +3790,12 @@ static int shmem_reconfigure(struct fs_context *fc)
> >  		goto out;
> >  	}
> >
> > +	if (ctx->seen & SHMEM_SEEN_QUOTA &&
> > +	    !sb_any_quota_loaded(fc->root->d_sb)) {
> > +		err = "Cannot enable quota on remount";
> > +		goto out;
> > +	}
> > +
> >  	if (ctx->seen & SHMEM_SEEN_HUGE)
> >  		sbinfo->huge = ctx->huge;
> >  	if (ctx->seen & SHMEM_SEEN_INUMS)
> > @@ -3763,6 +3878,9 @@ static void shmem_put_super(struct super_block *sb)
> >  {
> >  	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
> >
> > +#ifdef CONFIG_TMPFS_QUOTA
> > +	shmem_disable_quotas(sb);
> > +#endif
> >  	free_percpu(sbinfo->ino_batch);
> >  	percpu_counter_destroy(&sbinfo->used_blocks);
> >  	mpol_put(sbinfo->mpol);
> > @@ -3841,6 +3959,17 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
> >  #endif
> >  	uuid_gen(&sb->s_uuid);
> >
> > +#ifdef CONFIG_TMPFS_QUOTA
> > +	if (ctx->seen & SHMEM_SEEN_QUOTA) {
> > +		sb->dq_op = &shmem_quota_operations;
> > +		sb->s_qcop = &dquot_quotactl_sysfile_ops;
> > +		sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
> 
> s_quota_types should rather be copied from ctx, shouldn't it? Or why is
> s_quota_types inconsistent with ctx->quota_types?

I believe s_qupta_types here is a bitmask of supported quota types, while
ctx->quota_types refers to the mount options being passed from the user.

So we should enable in sb->s_quota_types which quota types the filesystem
supports, not which were enabled by the user.

Cheers.

-- 
Carlos Maiolino


  reply	other threads:[~2023-04-11  9:37 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-03  8:47 [PATCH 0/6] shmem: Add user and group quota support for tmpfs cem
2023-04-03  8:47 ` [PATCH 1/6] shmem: make shmem_inode_acct_block() return error cem
2023-04-04 10:59   ` Jan Kara
2023-04-03  8:47 ` [PATCH 2/6] shmem: make shmem_get_inode() return ERR_PTR instead of NULL cem
2023-04-03 10:23   ` Jan Kara
2023-04-11  7:47     ` Carlos Maiolino
2023-04-11  8:14       ` Jan Kara
2023-04-11  8:41         ` Carlos Maiolino
2023-04-03 21:10   ` kernel test robot
2023-04-04  4:26   ` kernel test robot
2023-04-03  8:47 ` [PATCH 3/6] quota: Check presence of quota operation structures instead of ->quota_read and ->quota_write callbacks cem
2023-04-03  8:47 ` [PATCH 4/6] shmem: prepare shmem quota infrastructure cem
2023-04-04 12:34   ` Jan Kara
2023-04-04 13:48     ` Carlos Maiolino
2023-04-05 11:04       ` Jan Kara
2023-04-12  9:44       ` Carlos Maiolino
2023-04-12 10:04         ` Jan Kara
2023-04-12 11:14           ` Carlos Maiolino
2023-04-12 11:23             ` Jan Kara
2023-04-03  8:47 ` [PATCH 5/6] shmem: quota support cem
2023-04-03 14:31   ` kernel test robot
2023-04-03 18:46   ` Darrick J. Wong
2023-04-04 13:41     ` Carlos Maiolino
2023-04-04 16:45       ` Darrick J. Wong
2023-04-03 22:03   ` kernel test robot
2023-04-04  6:22   ` kernel test robot
2023-04-05 11:42   ` Jan Kara
2023-04-11  9:37     ` Carlos Maiolino [this message]
2023-04-11 13:03       ` Jan Kara
2023-04-03  8:47 ` [PATCH 6/6] Add default quota limit mount options cem
2023-04-05  8:52 ` [PATCH 0/6] shmem: Add user and group quota support for tmpfs Christian Brauner
2023-04-05 10:44   ` Carlos Maiolino
2023-04-05 13:11     ` Christian Brauner
2023-04-06  8:08       ` Carlos Maiolino
2023-04-26 10:20 [PATCH V4 " cem
2023-04-26 10:20 ` [PATCH 5/6] shmem: quota support cem
2023-07-13 13:48 [PATCH RESEND V4 0/6] shmem: Add user and group quota support for tmpfs cem
2023-07-13 13:48 ` [PATCH 5/6] shmem: quota support cem
2023-07-14  9:54   ` Christian Brauner
2023-07-14 10:40     ` Carlos Maiolino
2023-07-14 12:26     ` Carlos Maiolino
2023-07-14 13:48       ` Christian Brauner
2023-07-14 14:47         ` Carlos Maiolino
2023-07-17 11:52 [PATCH V5 0/6] shmem: Add user and group quota support for tmpfs cem
2023-07-17 11:52 ` [PATCH 5/6] shmem: quota support cem

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230411093726.ry3e6espmocvwq6f@andromeda \
    --to=cem@kernel.org \
    --cc=djwong@kernel.org \
    --cc=hughd@google.com \
    --cc=jack@suse.cz \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).