All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/9] quota updates V2
@ 2012-02-15  2:29 Christoph Hellwig
  2012-02-15  2:29 ` [PATCH 1/9] xfs: use per-filesystem dquot LRU lists Christoph Hellwig
                   ` (8 more replies)
  0 siblings, 9 replies; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

Resend of the my previous two quota series.  The changes are:

 - drop the first patch is it's now included in mainline
 - rebase one patch on top of other changes in the tree
 - make various commit messages nicer
 - include a tested-by statement for arekm, who confirms the impressive
   speedups on his real life system

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 1/9] xfs: use per-filesystem dquot LRU lists
  2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
@ 2012-02-15  2:29 ` Christoph Hellwig
  2012-02-15 22:10   ` Dave Chinner
  2012-02-15  2:29 ` [PATCH 2/9] xfs: use per-filesystem radix trees for dquot lookup Christoph Hellwig
                   ` (7 subsequent siblings)
  8 siblings, 1 reply; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

[-- Attachment #1: xfs-quota-per-mount-lru-2 --]
[-- Type: text/plain, Size: 11111 bytes --]

Replace the global dquot lru lists with a per-filesystem one.

Note that the shrinker isn't wired up to the per-superblock VFS shrinker
infrastructure, as doing so would cause problems due to summing up and
splitting out again the counts for inodes and dquots.  I do not believe this
is a major issue as the quota cache is not deeply interwinded with inode
and dentry caching.

Also temporarily stop tracking the system-wide count of dquots on the LRU
lists for /proc/fs/xfs/xqm, which will be added back later in the series.

Signed-off-by: Christoph Hellwig <hch@lst.de>

---
 fs/xfs/xfs_dquot.c    |   37 +++++++++++++++--------------
 fs/xfs/xfs_dquot.h    |    2 -
 fs/xfs/xfs_qm.c       |   62 +++++++++++++++++++++-----------------------------
 fs/xfs/xfs_qm.h       |    7 +++--
 fs/xfs/xfs_qm_stats.c |    2 -
 5 files changed, 52 insertions(+), 58 deletions(-)

Index: xfs/fs/xfs/xfs_dquot.c
===================================================================
--- xfs.orig/fs/xfs/xfs_dquot.c	2012-02-12 13:18:46.000000000 -0800
+++ xfs/fs/xfs/xfs_dquot.c	2012-02-12 13:19:02.840266068 -0800
@@ -47,7 +47,7 @@
  *     qi->qi_dqlist_lock
  *       dquot->q_qlock (xfs_dqlock() and friends)
  *         dquot->q_flush (xfs_dqflock() and friends)
- *         xfs_Gqm->qm_dqfrlist_lock
+ *         qi->qi_lru_lock
  *
  * If two dquots need to be locked the order is user before group/project,
  * otherwise by the lowest id first, see xfs_dqlock2.
@@ -69,7 +69,7 @@ void
 xfs_qm_dqdestroy(
 	xfs_dquot_t	*dqp)
 {
-	ASSERT(list_empty(&dqp->q_freelist));
+	ASSERT(list_empty(&dqp->q_lru));
 
 	mutex_destroy(&dqp->q_qlock);
 	kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
@@ -497,7 +497,7 @@ xfs_qm_dqread(
 	dqp->dq_flags = type;
 	dqp->q_core.d_id = cpu_to_be32(id);
 	dqp->q_mount = mp;
-	INIT_LIST_HEAD(&dqp->q_freelist);
+	INIT_LIST_HEAD(&dqp->q_lru);
 	mutex_init(&dqp->q_qlock);
 	init_waitqueue_head(&dqp->q_pinwait);
 
@@ -843,7 +843,6 @@ restart:
 	return (0);
 }
 
-
 /*
  * Release a reference to the dquot (decrement ref-count)
  * and unlock it. If there is a group quota attached to this
@@ -869,12 +868,13 @@ recurse:
 
 	trace_xfs_dqput_free(dqp);
 
-	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
-	if (list_empty(&dqp->q_freelist)) {
-		list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
-		xfs_Gqm->qm_dqfrlist_cnt++;
+	mutex_lock(&dqp->q_mount->m_quotainfo->qi_lru_lock);
+	if (list_empty(&dqp->q_lru)) {
+		list_add_tail(&dqp->q_lru,
+			      &dqp->q_mount->m_quotainfo->qi_lru_list);
+		dqp->q_mount->m_quotainfo->qi_lru_count++;
 	}
-	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+	mutex_unlock(&dqp->q_mount->m_quotainfo->qi_lru_lock);
 
 	/*
 	 * If we just added a udquot to the freelist, then we want to release
@@ -1125,6 +1125,7 @@ xfs_qm_dqpurge(
 {
 	struct xfs_mount	*mp = dqp->q_mount;
 	struct xfs_dqhash	*qh = dqp->q_hash;
+	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 
 	xfs_dqlock(dqp);
 
@@ -1175,21 +1176,21 @@ xfs_qm_dqpurge(
 	qh->qh_version++;
 	mutex_unlock(&qh->qh_lock);
 
-	mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
+	mutex_lock(&qi->qi_dqlist_lock);
 	list_del_init(&dqp->q_mplist);
-	mp->m_quotainfo->qi_dqreclaims++;
-	mp->m_quotainfo->qi_dquots--;
-	mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+	qi->qi_dqreclaims++;
+	qi->qi_dquots--;
+	mutex_unlock(&qi->qi_dqlist_lock);
 
 	/*
 	 * We move dquots to the freelist as soon as their reference count
 	 * hits zero, so it really should be on the freelist here.
 	 */
-	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
-	ASSERT(!list_empty(&dqp->q_freelist));
-	list_del_init(&dqp->q_freelist);
-	xfs_Gqm->qm_dqfrlist_cnt--;
-	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+	mutex_lock(&qi->qi_lru_lock);
+	ASSERT(!list_empty(&dqp->q_lru));
+	list_del_init(&dqp->q_lru);
+	qi->qi_lru_count--;
+	mutex_unlock(&qi->qi_lru_lock);
 
 	xfs_qm_dqdestroy(dqp);
 }
Index: xfs/fs/xfs/xfs_dquot.h
===================================================================
--- xfs.orig/fs/xfs/xfs_dquot.h	2012-02-10 09:18:14.885180611 -0800
+++ xfs/fs/xfs/xfs_dquot.h	2012-02-12 13:19:02.840266068 -0800
@@ -47,7 +47,7 @@ struct xfs_trans;
  */
 typedef struct xfs_dquot {
 	uint		 dq_flags;	/* various flags (XFS_DQ_*) */
-	struct list_head q_freelist;	/* global free list of dquots */
+	struct list_head q_lru;		/* global free list of dquots */
 	struct list_head q_mplist;	/* mount's list of dquots */
 	struct list_head q_hashlist;	/* gloabl hash list of dquots */
 	xfs_dqhash_t	*q_hash;	/* the hashchain header */
Index: xfs/fs/xfs/xfs_qm.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.c	2012-02-12 13:18:46.000000000 -0800
+++ xfs/fs/xfs/xfs_qm.c	2012-02-12 13:19:02.840266068 -0800
@@ -61,11 +61,6 @@ STATIC int	xfs_qm_init_quotainos(xfs_mou
 STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
 STATIC int	xfs_qm_shake(struct shrinker *, struct shrink_control *);
 
-static struct shrinker xfs_qm_shaker = {
-	.shrink = xfs_qm_shake,
-	.seeks = DEFAULT_SEEKS,
-};
-
 /*
  * Initialize the XQM structure.
  * Note that there is not one quota manager per file system.
@@ -106,13 +101,6 @@ xfs_Gqm_init(void)
 	}
 
 	/*
-	 * Freelist of all dquots of all file systems
-	 */
-	INIT_LIST_HEAD(&xqm->qm_dqfrlist);
-	xqm->qm_dqfrlist_cnt = 0;
-	mutex_init(&xqm->qm_dqfrlist_lock);
-
-	/*
 	 * dquot zone. we register our own low-memory callback.
 	 */
 	if (!qm_dqzone) {
@@ -122,8 +110,6 @@ xfs_Gqm_init(void)
 	} else
 		xqm->qm_dqzone = qm_dqzone;
 
-	register_shrinker(&xfs_qm_shaker);
-
 	/*
 	 * The t_dqinfo portion of transactions.
 	 */
@@ -156,12 +142,6 @@ xfs_qm_destroy(
 	ASSERT(xqm != NULL);
 	ASSERT(xqm->qm_nrefs == 0);
 
-	unregister_shrinker(&xfs_qm_shaker);
-
-	mutex_lock(&xqm->qm_dqfrlist_lock);
-	ASSERT(list_empty(&xqm->qm_dqfrlist));
-	mutex_unlock(&xqm->qm_dqfrlist_lock);
-
 	hsize = xqm->qm_dqhashmask + 1;
 	for (i = 0; i < hsize; i++) {
 		xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
@@ -827,6 +807,10 @@ xfs_qm_init_quotainfo(
 	mutex_init(&qinf->qi_dqlist_lock);
 	lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
 
+	INIT_LIST_HEAD(&qinf->qi_lru_list);
+	qinf->qi_lru_count = 0;
+	mutex_init(&qinf->qi_lru_lock);
+
 	qinf->qi_dqreclaims = 0;
 
 	/* mutex used to serialize quotaoffs */
@@ -894,6 +878,9 @@ xfs_qm_init_quotainfo(
 		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
 	}
 
+	qinf->qi_shrinker.shrink = xfs_qm_shake;
+	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
+	register_shrinker(&qinf->qi_shrinker);
 	return 0;
 }
 
@@ -913,6 +900,8 @@ xfs_qm_destroy_quotainfo(
 	ASSERT(qi != NULL);
 	ASSERT(xfs_Gqm != NULL);
 
+	unregister_shrinker(&qi->qi_shrinker);
+
 	/*
 	 * Release the reference that XQM kept, so that we know
 	 * when the XQM structure should be freed. We cannot assume
@@ -1624,6 +1613,7 @@ xfs_qm_dqreclaim_one(
 	struct list_head	*dispose_list)
 {
 	struct xfs_mount	*mp = dqp->q_mount;
+	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 	int			error;
 
 	if (!xfs_dqlock_nowait(dqp))
@@ -1639,8 +1629,8 @@ xfs_qm_dqreclaim_one(
 		trace_xfs_dqreclaim_want(dqp);
 		XQM_STATS_INC(xqmstats.xs_qm_dqwants);
 
-		list_del_init(&dqp->q_freelist);
-		xfs_Gqm->qm_dqfrlist_cnt--;
+		list_del_init(&dqp->q_lru);
+		qi->qi_lru_count--;
 		return;
 	}
 
@@ -1688,8 +1678,8 @@ xfs_qm_dqreclaim_one(
 	xfs_dqunlock(dqp);
 
 	ASSERT(dqp->q_nrefs == 0);
-	list_move_tail(&dqp->q_freelist, dispose_list);
-	xfs_Gqm->qm_dqfrlist_cnt--;
+	list_move_tail(&dqp->q_lru, dispose_list);
+	qi->qi_lru_count--;
 
 	trace_xfs_dqreclaim_done(dqp);
 	XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
@@ -1701,7 +1691,7 @@ out_busy:
 	/*
 	 * Move the dquot to the tail of the list so that we don't spin on it.
 	 */
-	list_move_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
+	list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
 
 	trace_xfs_dqreclaim_busy(dqp);
 	XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
@@ -1712,6 +1702,8 @@ xfs_qm_shake(
 	struct shrinker		*shrink,
 	struct shrink_control	*sc)
 {
+	struct xfs_quotainfo	*qi =
+		container_of(shrink, struct xfs_quotainfo, qi_shrinker);
 	int			nr_to_scan = sc->nr_to_scan;
 	LIST_HEAD		(dispose_list);
 	struct xfs_dquot	*dqp;
@@ -1721,24 +1713,23 @@ xfs_qm_shake(
 	if (!nr_to_scan)
 		goto out;
 
-	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
-	while (!list_empty(&xfs_Gqm->qm_dqfrlist)) {
+	mutex_lock(&qi->qi_lru_lock);
+	while (!list_empty(&qi->qi_lru_list)) {
 		if (nr_to_scan-- <= 0)
 			break;
-		dqp = list_first_entry(&xfs_Gqm->qm_dqfrlist, struct xfs_dquot,
-				       q_freelist);
+		dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
+				       q_lru);
 		xfs_qm_dqreclaim_one(dqp, &dispose_list);
 	}
-	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+	mutex_unlock(&qi->qi_lru_lock);
 
 	while (!list_empty(&dispose_list)) {
-		dqp = list_first_entry(&dispose_list, struct xfs_dquot,
-				       q_freelist);
-		list_del_init(&dqp->q_freelist);
+		dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
+		list_del_init(&dqp->q_lru);
 		xfs_qm_dqfree_one(dqp);
 	}
 out:
-	return (xfs_Gqm->qm_dqfrlist_cnt / 100) * sysctl_vfs_cache_pressure;
+	return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
 }
 
 /*
Index: xfs/fs/xfs/xfs_qm.h
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.h	2012-02-12 13:18:46.000000000 -0800
+++ xfs/fs/xfs/xfs_qm.h	2012-02-12 13:19:02.843599401 -0800
@@ -57,9 +57,6 @@ typedef struct xfs_qm {
 	xfs_dqlist_t	*qm_usr_dqhtable;/* udquot hash table */
 	xfs_dqlist_t	*qm_grp_dqhtable;/* gdquot hash table */
 	uint		 qm_dqhashmask;	 /* # buckets in dq hashtab - 1 */
-	struct list_head qm_dqfrlist;	 /* freelist of dquots */
-	struct mutex	 qm_dqfrlist_lock;
-	int		 qm_dqfrlist_cnt;
 	atomic_t	 qm_totaldquots; /* total incore dquots */
 	uint		 qm_nrefs;	 /* file systems with quota on */
 	kmem_zone_t	*qm_dqzone;	 /* dquot mem-alloc zone */
@@ -73,6 +70,9 @@ typedef struct xfs_qm {
 typedef struct xfs_quotainfo {
 	xfs_inode_t	*qi_uquotaip;	 /* user quota inode */
 	xfs_inode_t	*qi_gquotaip;	 /* group quota inode */
+	struct list_head qi_lru_list;
+	struct mutex	 qi_lru_lock;
+	int		 qi_lru_count;
 	struct list_head qi_dqlist;	 /* all dquots in filesys */
 	struct mutex	 qi_dqlist_lock;
 	int		 qi_dquots;
@@ -93,6 +93,7 @@ typedef struct xfs_quotainfo {
 	xfs_qcnt_t	 qi_isoftlimit;	 /* default inode count soft limit */
 	xfs_qcnt_t	 qi_rtbhardlimit;/* default realtime blk hard limit */
 	xfs_qcnt_t	 qi_rtbsoftlimit;/* default realtime blk soft limit */
+	struct shrinker  qi_shrinker;
 } xfs_quotainfo_t;
 
 
Index: xfs/fs/xfs/xfs_qm_stats.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm_stats.c	2012-02-12 13:18:46.000000000 -0800
+++ xfs/fs/xfs/xfs_qm_stats.c	2012-02-12 13:19:02.843599401 -0800
@@ -45,7 +45,7 @@ static int xqm_proc_show(struct seq_file
 			0,
 			xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
 			0,
-			xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0);
+			0);
 	return 0;
 }
 

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 2/9] xfs: use per-filesystem radix trees for dquot lookup
  2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
  2012-02-15  2:29 ` [PATCH 1/9] xfs: use per-filesystem dquot LRU lists Christoph Hellwig
@ 2012-02-15  2:29 ` Christoph Hellwig
  2012-02-15 22:21   ` Dave Chinner
  2012-02-15  2:29 ` [PATCH 3/9] xfs: remove the per-filesystem list of dquots Christoph Hellwig
                   ` (6 subsequent siblings)
  8 siblings, 1 reply; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

[-- Attachment #1: xfs-quota-radix-tree --]
[-- Type: text/plain, Size: 17194 bytes --]

Replace the global hash tables for looking up in-memory dquot structures
with per-filesystem radix trees to allow scaling to a large number of
in-memory dquot structures.

Signed-off-by: Christoph Hellwig <hch@lst.de>

---
 fs/xfs/xfs_dquot.c      |  175 ++++++++++--------------------------------------
 fs/xfs/xfs_dquot.h      |   12 ---
 fs/xfs/xfs_qm.c         |   95 ++------------------------
 fs/xfs/xfs_qm.h         |   19 ++---
 fs/xfs/xfs_quota_priv.h |   11 ---
 fs/xfs/xfs_trace.h      |    4 -
 6 files changed, 58 insertions(+), 258 deletions(-)

Index: xfs/fs/xfs/xfs_qm.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.c	2012-02-12 13:19:02.840266068 -0800
+++ xfs/fs/xfs/xfs_qm.c	2012-02-12 13:19:18.433599690 -0800
@@ -54,9 +54,6 @@ struct xfs_qm	*xfs_Gqm;
 kmem_zone_t	*qm_dqzone;
 kmem_zone_t	*qm_dqtrxzone;
 
-STATIC void	xfs_qm_list_init(xfs_dqlist_t *, char *, int);
-STATIC void	xfs_qm_list_destroy(xfs_dqlist_t *);
-
 STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
 STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
 STATIC int	xfs_qm_shake(struct shrinker *, struct shrink_control *);
@@ -68,37 +65,9 @@ STATIC int	xfs_qm_shake(struct shrinker
 STATIC struct xfs_qm *
 xfs_Gqm_init(void)
 {
-	xfs_dqhash_t	*udqhash, *gdqhash;
 	xfs_qm_t	*xqm;
-	size_t		hsize;
-	uint		i;
-
-	/*
-	 * Initialize the dquot hash tables.
-	 */
-	udqhash = kmem_zalloc_greedy(&hsize,
-				     XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
-				     XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
-	if (!udqhash)
-		goto out;
-
-	gdqhash = kmem_zalloc_large(hsize);
-	if (!gdqhash)
-		goto out_free_udqhash;
-
-	hsize /= sizeof(xfs_dqhash_t);
 
 	xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
-	xqm->qm_dqhashmask = hsize - 1;
-	xqm->qm_usr_dqhtable = udqhash;
-	xqm->qm_grp_dqhtable = gdqhash;
-	ASSERT(xqm->qm_usr_dqhtable != NULL);
-	ASSERT(xqm->qm_grp_dqhtable != NULL);
-
-	for (i = 0; i < hsize; i++) {
-		xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i);
-		xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i);
-	}
 
 	/*
 	 * dquot zone. we register our own low-memory callback.
@@ -123,11 +92,6 @@ xfs_Gqm_init(void)
 	atomic_set(&xqm->qm_totaldquots, 0);
 	xqm->qm_nrefs = 0;
 	return xqm;
-
- out_free_udqhash:
-	kmem_free_large(udqhash);
- out:
-	return NULL;
 }
 
 /*
@@ -137,22 +101,9 @@ STATIC void
 xfs_qm_destroy(
 	struct xfs_qm	*xqm)
 {
-	int		hsize, i;
-
 	ASSERT(xqm != NULL);
 	ASSERT(xqm->qm_nrefs == 0);
 
-	hsize = xqm->qm_dqhashmask + 1;
-	for (i = 0; i < hsize; i++) {
-		xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
-		xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
-	}
-	kmem_free_large(xqm->qm_usr_dqhtable);
-	kmem_free_large(xqm->qm_grp_dqhtable);
-	xqm->qm_usr_dqhtable = NULL;
-	xqm->qm_grp_dqhtable = NULL;
-	xqm->qm_dqhashmask = 0;
-
 	kmem_free(xqm);
 }
 
@@ -763,14 +714,6 @@ xfs_qm_dqdetach(
 }
 
 /*
- * The hash chains and the mplist use the same xfs_dqhash structure as
- * their list head, but we can take the mplist qh_lock and one of the
- * hash qh_locks at the same time without any problem as they aren't
- * related.
- */
-static struct lock_class_key xfs_quota_mplist_class;
-
-/*
  * This initializes all the quota information that's kept in the
  * mount structure
  */
@@ -803,9 +746,12 @@ xfs_qm_init_quotainfo(
 		return error;
 	}
 
+	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
+	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
+	mutex_init(&qinf->qi_tree_lock);
+
 	INIT_LIST_HEAD(&qinf->qi_dqlist);
 	mutex_init(&qinf->qi_dqlist_lock);
-	lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
 
 	INIT_LIST_HEAD(&qinf->qi_lru_list);
 	qinf->qi_lru_count = 0;
@@ -925,30 +871,6 @@ xfs_qm_destroy_quotainfo(
 	mp->m_quotainfo = NULL;
 }
 
-
-
-/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
-
-/* ARGSUSED */
-STATIC void
-xfs_qm_list_init(
-	xfs_dqlist_t	*list,
-	char		*str,
-	int		n)
-{
-	mutex_init(&list->qh_lock);
-	INIT_LIST_HEAD(&list->qh_list);
-	list->qh_version = 0;
-	list->qh_nelems = 0;
-}
-
-STATIC void
-xfs_qm_list_destroy(
-	xfs_dqlist_t	*list)
-{
-	mutex_destroy(&(list->qh_lock));
-}
-
 /*
  * Create an inode and return with a reference already taken, but unlocked
  * This is how we create quota inodes
@@ -1593,10 +1515,10 @@ xfs_qm_dqfree_one(
 	struct xfs_mount	*mp = dqp->q_mount;
 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 
-	mutex_lock(&dqp->q_hash->qh_lock);
-	list_del_init(&dqp->q_hashlist);
-	dqp->q_hash->qh_version++;
-	mutex_unlock(&dqp->q_hash->qh_lock);
+	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
+	radix_tree_delete(XFS_DQUOT_TREE(mp, dqp->q_core.d_flags),
+			  be32_to_cpu(dqp->q_core.d_id));
+	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
 
 	mutex_lock(&qi->qi_dqlist_lock);
 	list_del_init(&dqp->q_mplist);
@@ -1634,7 +1556,6 @@ xfs_qm_dqreclaim_one(
 		return;
 	}
 
-	ASSERT(dqp->q_hash);
 	ASSERT(!list_empty(&dqp->q_mplist));
 
 	/*
Index: xfs/fs/xfs/xfs_qm.h
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.h	2012-02-12 13:19:02.843599401 -0800
+++ xfs/fs/xfs/xfs_qm.h	2012-02-12 13:19:18.433599690 -0800
@@ -32,12 +32,6 @@ extern kmem_zone_t	*qm_dqzone;
 extern kmem_zone_t	*qm_dqtrxzone;
 
 /*
- * Dquot hashtable constants/threshold values.
- */
-#define XFS_QM_HASHSIZE_LOW		(PAGE_SIZE / sizeof(xfs_dqhash_t))
-#define XFS_QM_HASHSIZE_HIGH		((PAGE_SIZE * 4) / sizeof(xfs_dqhash_t))
-
-/*
  * This defines the unit of allocation of dquots.
  * Currently, it is just one file system block, and a 4K blk contains 30
  * (136 * 30 = 4080) dquots. It's probably not worth trying to make
@@ -48,15 +42,10 @@ extern kmem_zone_t	*qm_dqtrxzone;
  */
 #define XFS_DQUOT_CLUSTER_SIZE_FSB	(xfs_filblks_t)1
 
-typedef xfs_dqhash_t	xfs_dqlist_t;
-
 /*
  * Quota Manager (global) structure. Lives only in core.
  */
 typedef struct xfs_qm {
-	xfs_dqlist_t	*qm_usr_dqhtable;/* udquot hash table */
-	xfs_dqlist_t	*qm_grp_dqhtable;/* gdquot hash table */
-	uint		 qm_dqhashmask;	 /* # buckets in dq hashtab - 1 */
 	atomic_t	 qm_totaldquots; /* total incore dquots */
 	uint		 qm_nrefs;	 /* file systems with quota on */
 	kmem_zone_t	*qm_dqzone;	 /* dquot mem-alloc zone */
@@ -68,6 +57,9 @@ typedef struct xfs_qm {
  * The mount structure keeps a pointer to this.
  */
 typedef struct xfs_quotainfo {
+	struct radix_tree_root qi_uquota_tree;
+	struct radix_tree_root qi_gquota_tree;
+	struct mutex qi_tree_lock;
 	xfs_inode_t	*qi_uquotaip;	 /* user quota inode */
 	xfs_inode_t	*qi_gquotaip;	 /* group quota inode */
 	struct list_head qi_lru_list;
@@ -96,6 +88,11 @@ typedef struct xfs_quotainfo {
 	struct shrinker  qi_shrinker;
 } xfs_quotainfo_t;
 
+#define XFS_DQUOT_TREE(mp, type) \
+	((type & XFS_DQ_USER) ? \
+	 &((mp)->m_quotainfo->qi_uquota_tree) : \
+	 &((mp)->m_quotainfo->qi_gquota_tree))
+
 
 extern void	xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long);
 extern int	xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *,
Index: xfs/fs/xfs/xfs_dquot.c
===================================================================
--- xfs.orig/fs/xfs/xfs_dquot.c	2012-02-12 13:19:02.840266068 -0800
+++ xfs/fs/xfs/xfs_dquot.c	2012-02-12 13:20:58.460268213 -0800
@@ -43,7 +43,7 @@
  * Lock order:
  *
  * ip->i_lock
- *   qh->qh_lock
+ *   qi->qi_tree_lock
  *     qi->qi_dqlist_lock
  *       dquot->q_qlock (xfs_dqlock() and friends)
  *         dquot->q_flush (xfs_dqflock() and friends)
@@ -602,60 +602,6 @@ error0:
 }
 
 /*
- * Lookup a dquot in the incore dquot hashtable. We keep two separate
- * hashtables for user and group dquots; and, these are global tables
- * inside the XQM, not per-filesystem tables.
- * The hash chain must be locked by caller, and it is left locked
- * on return. Returning dquot is locked.
- */
-STATIC int
-xfs_qm_dqlookup(
-	xfs_mount_t		*mp,
-	xfs_dqid_t		id,
-	xfs_dqhash_t		*qh,
-	xfs_dquot_t		**O_dqpp)
-{
-	xfs_dquot_t		*dqp;
-
-	ASSERT(mutex_is_locked(&qh->qh_lock));
-
-	/*
-	 * Traverse the hashchain looking for a match
-	 */
-	list_for_each_entry(dqp, &qh->qh_list, q_hashlist) {
-		/*
-		 * We already have the hashlock. We don't need the
-		 * dqlock to look at the id field of the dquot, since the
-		 * id can't be modified without the hashlock anyway.
-		 */
-		if (be32_to_cpu(dqp->q_core.d_id) != id || dqp->q_mount != mp)
-			continue;
-
-		trace_xfs_dqlookup_found(dqp);
-
-		xfs_dqlock(dqp);
-		if (dqp->dq_flags & XFS_DQ_FREEING) {
-			*O_dqpp = NULL;
-			xfs_dqunlock(dqp);
-			return -1;
-		}
-
-		dqp->q_nrefs++;
-
-		/*
-		 * move the dquot to the front of the hashchain
-		 */
-		list_move(&dqp->q_hashlist, &qh->qh_list);
-		trace_xfs_dqlookup_done(dqp);
-		*O_dqpp = dqp;
-		return 0;
-	}
-
-	*O_dqpp = NULL;
-	return 1;
-}
-
-/*
  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
  * a locked dquot, doing an allocation (if requested) as needed.
  * When both an inode and an id are given, the inode's id takes precedence.
@@ -672,10 +618,9 @@ xfs_qm_dqget(
 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
 {
-	xfs_dquot_t	*dqp, *dqp1;
-	xfs_dqhash_t	*h;
-	uint		version;
-	int		error;
+	struct radix_tree_root *tree = XFS_DQUOT_TREE(mp, type);
+	struct xfs_dquot	*dqp;
+	int			error;
 
 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
@@ -683,7 +628,6 @@ xfs_qm_dqget(
 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
 		return (ESRCH);
 	}
-	h = XFS_DQ_HASH(mp, id, type);
 
 #ifdef DEBUG
 	if (xfs_do_dqerror) {
@@ -704,34 +648,29 @@ xfs_qm_dqget(
 #endif
 
 restart:
-	mutex_lock(&h->qh_lock);
+	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
+	dqp = radix_tree_lookup(tree, id);
+	if (dqp) {
+		xfs_dqlock(dqp);
+		if (dqp->dq_flags & XFS_DQ_FREEING) {
+			xfs_dqunlock(dqp);
+			mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
+			trace_xfs_dqget_freeing(dqp);
+			delay(1);
+			goto restart;
+		}
 
-	/*
-	 * Look in the cache (hashtable).
-	 * The chain is kept locked during lookup.
-	 */
-	switch (xfs_qm_dqlookup(mp, id, h, O_dqpp)) {
-	case -1:
-		XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
-		mutex_unlock(&h->qh_lock);
-		delay(1);
-		goto restart;
-	case 0:
+		dqp->q_nrefs++;
+		mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
+
+		trace_xfs_dqget_hit(dqp);
 		XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
-		/*
-		 * The dquot was found, moved to the front of the chain,
-		 * taken off the freelist if it was on it, and locked
-		 * at this point. Just unlock the hashchain and return.
-		 */
-		ASSERT(*O_dqpp);
-		ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
-		mutex_unlock(&h->qh_lock);
-		trace_xfs_dqget_hit(*O_dqpp);
-		return 0;	/* success */
-	default:
-		XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
-		break;
+		*O_dqpp = dqp;
+		return 0;
 	}
+	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
+
+	XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
 
 	/*
 	 * Dquot cache miss. We don't want to keep the inode lock across
@@ -742,12 +681,6 @@ restart:
 	 */
 	if (ip)
 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	/*
-	 * Save the hashchain version stamp, and unlock the chain, so that
-	 * we don't keep the lock across a disk read
-	 */
-	version = h->qh_version;
-	mutex_unlock(&h->qh_lock);
 
 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
 
@@ -757,15 +690,14 @@ restart:
 	if (error)
 		return error;
 
-	/*
-	 * Dquot lock comes after hashlock in the lock ordering
-	 */
 	if (ip) {
 		/*
 		 * A dquot could be attached to this inode by now, since
 		 * we had dropped the ilock.
 		 */
 		if (xfs_this_quota_on(mp, type)) {
+			struct xfs_dquot	*dqp1;
+
 			dqp1 = xfs_inode_dquot(ip, type);
 			if (dqp1) {
 				xfs_qm_dqdestroy(dqp);
@@ -780,46 +712,21 @@ restart:
 		}
 	}
 
-	/*
-	 * Hashlock comes after ilock in lock order
-	 */
-	mutex_lock(&h->qh_lock);
-	if (version != h->qh_version) {
-		xfs_dquot_t *tmpdqp;
+	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
+	error = -radix_tree_insert(tree, id, dqp);
+	if (unlikely(error)) {
+		WARN_ON(error != EEXIST);
+
 		/*
-		 * Now, see if somebody else put the dquot in the
-		 * hashtable before us. This can happen because we didn't
-		 * keep the hashchain lock. We don't have to worry about
-		 * lock order between the two dquots here since dqp isn't
-		 * on any findable lists yet.
+		 * Duplicate found. Just throw away the new dquot and start
+		 * over.
 		 */
-		switch (xfs_qm_dqlookup(mp, id, h, &tmpdqp)) {
-		case 0:
-		case -1:
-			/*
-			 * Duplicate found, either in cache or on its way out.
-			 * Just throw away the new dquot and start over.
-			 */
-			if (tmpdqp)
-				xfs_qm_dqput(tmpdqp);
-			mutex_unlock(&h->qh_lock);
-			xfs_qm_dqdestroy(dqp);
-			XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
-			goto restart;
-		default:
-			break;
-		}
+		mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
+		trace_xfs_dqget_dup(dqp);
+		xfs_qm_dqdestroy(dqp);
+		XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
+		goto restart;
 	}
-
-	/*
-	 * Put the dquot at the beginning of the hash-chain and mp's list
-	 * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock ..
-	 */
-	ASSERT(mutex_is_locked(&h->qh_lock));
-	dqp->q_hash = h;
-	list_add(&dqp->q_hashlist, &h->qh_list);
-	h->qh_version++;
-
 	/*
 	 * Attach this dquot to this filesystem's list of all dquots,
 	 * kept inside the mount structure in m_quotainfo field
@@ -835,7 +742,8 @@ restart:
 	list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist);
 	mp->m_quotainfo->qi_dquots++;
 	mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
-	mutex_unlock(&h->qh_lock);
+	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
+
  dqret:
 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
 	trace_xfs_dqget_miss(dqp);
@@ -1124,7 +1032,6 @@ xfs_qm_dqpurge(
 	struct xfs_dquot	*dqp)
 {
 	struct xfs_mount	*mp = dqp->q_mount;
-	struct xfs_dqhash	*qh = dqp->q_hash;
 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 
 	xfs_dqlock(dqp);
@@ -1171,10 +1078,10 @@ xfs_qm_dqpurge(
 	xfs_dqfunlock(dqp);
 	xfs_dqunlock(dqp);
 
-	mutex_lock(&qh->qh_lock);
-	list_del_init(&dqp->q_hashlist);
-	qh->qh_version++;
-	mutex_unlock(&qh->qh_lock);
+	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
+	radix_tree_delete(XFS_DQUOT_TREE(mp, dqp->q_core.d_flags),
+			  be32_to_cpu(dqp->q_core.d_id));
+	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
 
 	mutex_lock(&qi->qi_dqlist_lock);
 	list_del_init(&dqp->q_mplist);
Index: xfs/fs/xfs/xfs_dquot.h
===================================================================
--- xfs.orig/fs/xfs/xfs_dquot.h	2012-02-12 13:19:02.840266068 -0800
+++ xfs/fs/xfs/xfs_dquot.h	2012-02-12 13:19:18.440266355 -0800
@@ -29,16 +29,6 @@
  * when quotas are off.
  */
 
-/*
- * The hash chain headers (hash buckets)
- */
-typedef struct xfs_dqhash {
-	struct list_head  qh_list;
-	struct mutex	  qh_lock;
-	uint		  qh_version;	/* ever increasing version */
-	uint		  qh_nelems;	/* number of dquots on the list */
-} xfs_dqhash_t;
-
 struct xfs_mount;
 struct xfs_trans;
 
@@ -49,8 +39,6 @@ typedef struct xfs_dquot {
 	uint		 dq_flags;	/* various flags (XFS_DQ_*) */
 	struct list_head q_lru;		/* global free list of dquots */
 	struct list_head q_mplist;	/* mount's list of dquots */
-	struct list_head q_hashlist;	/* gloabl hash list of dquots */
-	xfs_dqhash_t	*q_hash;	/* the hashchain header */
 	struct xfs_mount*q_mount;	/* filesystem this relates to */
 	struct xfs_trans*q_transp;	/* trans this belongs to currently */
 	uint		 q_nrefs;	/* # active refs from inodes */
Index: xfs/fs/xfs/xfs_quota_priv.h
===================================================================
--- xfs.orig/fs/xfs/xfs_quota_priv.h	2012-02-07 10:00:57.000000000 -0800
+++ xfs/fs/xfs/xfs_quota_priv.h	2012-02-12 13:19:18.440266355 -0800
@@ -24,17 +24,6 @@
  */
 #define XFS_DQITER_MAP_SIZE	10
 
-/*
- * Hash into a bucket in the dquot hash table, based on <mp, id>.
- */
-#define XFS_DQ_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
-				 (__psunsigned_t)(id)) & \
-				(xfs_Gqm->qm_dqhashmask - 1))
-#define XFS_DQ_HASH(mp, id, type)   (type == XFS_DQ_USER ? \
-				     (xfs_Gqm->qm_usr_dqhtable + \
-				      XFS_DQ_HASHVAL(mp, id)) : \
-				     (xfs_Gqm->qm_grp_dqhtable + \
-				      XFS_DQ_HASHVAL(mp, id)))
 #define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
 	!dqp->q_core.d_blk_hardlimit && \
 	!dqp->q_core.d_blk_softlimit && \
Index: xfs/fs/xfs/xfs_trace.h
===================================================================
--- xfs.orig/fs/xfs/xfs_trace.h	2012-02-12 13:18:57.000000000 -0800
+++ xfs/fs/xfs/xfs_trace.h	2012-02-12 13:19:18.440266355 -0800
@@ -741,10 +741,10 @@ DEFINE_DQUOT_EVENT(xfs_dqalloc);
 DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
 DEFINE_DQUOT_EVENT(xfs_dqread);
 DEFINE_DQUOT_EVENT(xfs_dqread_fail);
-DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
-DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
 DEFINE_DQUOT_EVENT(xfs_dqget_hit);
 DEFINE_DQUOT_EVENT(xfs_dqget_miss);
+DEFINE_DQUOT_EVENT(xfs_dqget_freeing);
+DEFINE_DQUOT_EVENT(xfs_dqget_dup);
 DEFINE_DQUOT_EVENT(xfs_dqput);
 DEFINE_DQUOT_EVENT(xfs_dqput_wait);
 DEFINE_DQUOT_EVENT(xfs_dqput_free);

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 3/9] xfs: remove the per-filesystem list of dquots
  2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
  2012-02-15  2:29 ` [PATCH 1/9] xfs: use per-filesystem dquot LRU lists Christoph Hellwig
  2012-02-15  2:29 ` [PATCH 2/9] xfs: use per-filesystem radix trees for dquot lookup Christoph Hellwig
@ 2012-02-15  2:29 ` Christoph Hellwig
  2012-02-15 22:59   ` Dave Chinner
  2012-02-15  2:29 ` [PATCH 4/9] xfs: use per-CPU data for the quota statistics Christoph Hellwig
                   ` (5 subsequent siblings)
  8 siblings, 1 reply; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

[-- Attachment #1: xfs-quota-remove-mplist --]
[-- Type: text/plain, Size: 14155 bytes --]

Instead of keeping a separate per-filesystem list of dquots we can walk
the radix tree for the two places where we need to iterate all quota
structures.

Signed-off-by: Christoph Hellwig <hch@lst.de>

---
 fs/xfs/xfs_dquot.c |   35 ++----
 fs/xfs/xfs_dquot.h |    2 
 fs/xfs/xfs_qm.c    |  287 +++++++++++++++++++++++------------------------------
 fs/xfs/xfs_qm.h    |    4 
 4 files changed, 143 insertions(+), 185 deletions(-)

Index: xfs/fs/xfs/xfs_dquot.c
===================================================================
--- xfs.orig/fs/xfs/xfs_dquot.c	2012-02-12 13:20:58.460268213 -0800
+++ xfs/fs/xfs/xfs_dquot.c	2012-02-12 13:22:33.326936637 -0800
@@ -44,10 +44,9 @@
  *
  * ip->i_lock
  *   qi->qi_tree_lock
- *     qi->qi_dqlist_lock
- *       dquot->q_qlock (xfs_dqlock() and friends)
- *         dquot->q_flush (xfs_dqflock() and friends)
- *         qi->qi_lru_lock
+ *     dquot->q_qlock (xfs_dqlock() and friends)
+ *       dquot->q_flush (xfs_dqflock() and friends)
+ *       qi->qi_lru_lock
  *
  * If two dquots need to be locked the order is user before group/project,
  * otherwise by the lowest id first, see xfs_dqlock2.
@@ -727,11 +726,6 @@ restart:
 		XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
 		goto restart;
 	}
-	/*
-	 * Attach this dquot to this filesystem's list of all dquots,
-	 * kept inside the mount structure in m_quotainfo field
-	 */
-	mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
 
 	/*
 	 * We return a locked dquot to the caller, with a reference taken
@@ -739,9 +733,7 @@ restart:
 	xfs_dqlock(dqp);
 	dqp->q_nrefs = 1;
 
-	list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist);
 	mp->m_quotainfo->qi_dquots++;
-	mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
 	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
 
  dqret:
@@ -1025,16 +1017,23 @@ xfs_dqlock2(
 
 /*
  * Take a dquot out of the mount's dqlist as well as the hashlist.  This is
- * called via unmount as well as quotaoff, and the purge will always succeed.
+ * called via unmount as well as quotaoff.
  */
-void
+int
 xfs_qm_dqpurge(
-	struct xfs_dquot	*dqp)
+	struct xfs_dquot	*dqp,
+	int			flags)
 {
 	struct xfs_mount	*mp = dqp->q_mount;
 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 
 	xfs_dqlock(dqp);
+	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
+		xfs_dqlock(dqp);
+		return EAGAIN;
+	}
+
+	dqp->dq_flags |= XFS_DQ_FREEING;
 
 	/*
 	 * If we're turning off quotas, we have to make sure that, for
@@ -1078,16 +1077,9 @@ xfs_qm_dqpurge(
 	xfs_dqfunlock(dqp);
 	xfs_dqunlock(dqp);
 
-	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
 	radix_tree_delete(XFS_DQUOT_TREE(mp, dqp->q_core.d_flags),
 			  be32_to_cpu(dqp->q_core.d_id));
-	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
-
-	mutex_lock(&qi->qi_dqlist_lock);
-	list_del_init(&dqp->q_mplist);
-	qi->qi_dqreclaims++;
 	qi->qi_dquots--;
-	mutex_unlock(&qi->qi_dqlist_lock);
 
 	/*
 	 * We move dquots to the freelist as soon as their reference count
@@ -1100,6 +1092,7 @@ xfs_qm_dqpurge(
 	mutex_unlock(&qi->qi_lru_lock);
 
 	xfs_qm_dqdestroy(dqp);
+	return 0;
 }
 
 /*
Index: xfs/fs/xfs/xfs_qm.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.c	2012-02-12 13:19:18.433599690 -0800
+++ xfs/fs/xfs/xfs_qm.c	2012-02-12 13:22:33.326936637 -0800
@@ -308,172 +308,157 @@ xfs_qm_unmount_quotas(
 }
 
 /*
- * Flush all dquots of the given file system to disk. The dquots are
- * _not_ purged from memory here, just their data written to disk.
+ * The quota lookup is done in batches to keep the amount of lock traffic and
+ * radix tree lookups to a minimum. The batch size is a trade off between
+ * lookup reduction and stack usage.
  */
+#define XFS_DQ_LOOKUP_BATCH	32
+
 STATIC int
-xfs_qm_dqflush_all(
-	struct xfs_mount	*mp)
-{
-	struct xfs_quotainfo	*q = mp->m_quotainfo;
-	int			recl;
-	struct xfs_dquot	*dqp;
-	int			error;
+xfs_qm_dquot_walk(
+	struct xfs_mount	*mp,
+	int			type,
+	int			(*execute)(struct xfs_dquot *dqp, int flags),
+	int			flags)
+{
+	struct radix_tree_root	*tree = XFS_DQUOT_TREE(mp, type);
+	uint32_t		first_index;
+	int			last_error = 0;
+	int			skipped;
+	int			nr_found;
+
+restart:
+	skipped = 0;
+	first_index = 0;
+	nr_found = 0;
 
-	if (!q)
-		return 0;
-again:
-	mutex_lock(&q->qi_dqlist_lock);
-	list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
-		xfs_dqlock(dqp);
-		if ((dqp->dq_flags & XFS_DQ_FREEING) ||
-		    !XFS_DQ_IS_DIRTY(dqp)) {
-			xfs_dqunlock(dqp);
-			continue;
-		}
+	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
+	do {
+		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
+		int		error = 0;
+		int		i;
+
+		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
+					first_index, XFS_DQ_LOOKUP_BATCH);
+		if (!nr_found)
+			break;
 
-		/* XXX a sentinel would be better */
-		recl = q->qi_dqreclaims;
-		if (!xfs_dqflock_nowait(dqp)) {
-			/*
-			 * If we can't grab the flush lock then check
-			 * to see if the dquot has been flushed delayed
-			 * write.  If so, grab its buffer and send it
-			 * out immediately.  We'll be able to acquire
-			 * the flush lock when the I/O completes.
-			 */
-			xfs_dqflock_pushbuf_wait(dqp);
+		for (i = 0; i < nr_found; i++) {
+			struct xfs_dquot *dqp = batch[i];
+
+			first_index = be32_to_cpu(dqp->q_core.d_id) + 1;
+
+			error = execute(batch[i], flags);
+			if (error == EAGAIN) {
+				skipped++;
+				continue;
+			}
+			if (error && last_error != EFSCORRUPTED)
+				last_error = error;
+		}
+		/* bail out if the filesystem is corrupted.  */
+		if (error == EFSCORRUPTED) {
+			skipped = 0;
+			break;
 		}
-		/*
-		 * Let go of the mplist lock. We don't want to hold it
-		 * across a disk write.
-		 */
-		mutex_unlock(&q->qi_dqlist_lock);
-		error = xfs_qm_dqflush(dqp, 0);
-		xfs_dqunlock(dqp);
-		if (error)
-			return error;
 
-		mutex_lock(&q->qi_dqlist_lock);
-		if (recl != q->qi_dqreclaims) {
-			mutex_unlock(&q->qi_dqlist_lock);
-			/* XXX restart limit */
-			goto again;
+		if (need_resched()) {
+			mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
+			cond_resched();
+			mutex_lock(&mp->m_quotainfo->qi_tree_lock);
 		}
+	} while (nr_found);
+	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
+
+	if (skipped) {
+		delay(1);
+		goto restart;
 	}
 
-	mutex_unlock(&q->qi_dqlist_lock);
-	/* return ! busy */
-	return 0;
+	return last_error;
 }
 
-/*
- * Release the group dquot pointers the user dquots may be
- * carrying around as a hint. mplist is locked on entry and exit.
- */
-STATIC void
-xfs_qm_detach_gdquots(
-	struct xfs_mount	*mp)
+STATIC int
+xfs_qm_flush_one(
+	struct xfs_dquot	*dqp,
+	int			flags)
 {
-	struct xfs_quotainfo	*q = mp->m_quotainfo;
-	struct xfs_dquot	*dqp, *gdqp;
+	int			error = 0;
 
- again:
-	ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
-	list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
-		xfs_dqlock(dqp);
-		if (dqp->dq_flags & XFS_DQ_FREEING) {
-			xfs_dqunlock(dqp);
-			mutex_unlock(&q->qi_dqlist_lock);
-			delay(1);
-			mutex_lock(&q->qi_dqlist_lock);
-			goto again;
-		}
+	xfs_dqlock(dqp);
+	if (dqp->dq_flags & XFS_DQ_FREEING)
+		goto out_unlock;
+	if (!XFS_DQ_IS_DIRTY(dqp))
+		goto out_unlock;
 
-		gdqp = dqp->q_gdquot;
-		if (gdqp)
-			dqp->q_gdquot = NULL;
-		xfs_dqunlock(dqp);
+	if (!xfs_dqflock_nowait(dqp))
+		xfs_dqflock_pushbuf_wait(dqp);
 
-		if (gdqp)
-			xfs_qm_dqrele(gdqp);
-	}
+	error = xfs_qm_dqflush(dqp, flags);
+
+out_unlock:
+	xfs_dqunlock(dqp);
+	return error;
 }
 
 /*
- * Go through all the incore dquots of this file system and take them
- * off the mplist and hashlist, if the dquot type matches the dqtype
- * parameter. This is used when turning off quota accounting for
- * users and/or groups, as well as when the filesystem is unmounting.
+ * Release the group dquot pointer the user dquot may be carrying around
+ * as a hint.
  */
 STATIC int
-xfs_qm_dqpurge_int(
+xfs_qm_detach_gdquot(
+	struct xfs_dquot	*dqp,
+	int			flags)
+{
+	struct xfs_dquot	*gdqp;
+
+	xfs_dqlock(dqp);
+	/* XXX(hch): should we bother with freeeing dquots here? */
+	if (dqp->dq_flags & XFS_DQ_FREEING) {
+		xfs_dqunlock(dqp);
+		return 0;
+	}
+	gdqp = dqp->q_gdquot;
+	if (gdqp) {
+		xfs_dqlock(gdqp);
+		dqp->q_gdquot = NULL;
+	}
+	xfs_dqunlock(dqp);
+	if (gdqp)
+		xfs_qm_dqput(gdqp);
+	return 0;
+}
+
+/*
+ * Purge the dquot cache.
+ *
+ * None of the dquots should really be busy at this point.
+ */
+int
+xfs_qm_dqpurge_all(
 	struct xfs_mount	*mp,
 	uint			flags)
 {
-	struct xfs_quotainfo	*q = mp->m_quotainfo;
-	struct xfs_dquot	*dqp, *n;
-	uint			dqtype;
-	int			nmisses = 0;
-	LIST_HEAD		(dispose_list);
+	int			error = 0;
 
-	if (!q)
+	if (!mp->m_quotainfo)
 		return 0;
 
-	dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
-	dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
-	dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
-
-	mutex_lock(&q->qi_dqlist_lock);
-
 	/*
 	 * In the first pass through all incore dquots of this filesystem,
 	 * we release the group dquot pointers the user dquots may be
 	 * carrying around as a hint. We need to do this irrespective of
 	 * what's being turned off.
 	 */
-	xfs_qm_detach_gdquots(mp);
-
-	/*
-	 * Try to get rid of all of the unwanted dquots.
-	 */
-	list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
-		xfs_dqlock(dqp);
-		if ((dqp->dq_flags & dqtype) != 0 &&
-		    !(dqp->dq_flags & XFS_DQ_FREEING)) {
-			if (dqp->q_nrefs == 0) {
-				dqp->dq_flags |= XFS_DQ_FREEING;
-				list_move_tail(&dqp->q_mplist, &dispose_list);
-			} else
-				nmisses++;
-		}
-		xfs_dqunlock(dqp);
-	}
-	mutex_unlock(&q->qi_dqlist_lock);
-
-	list_for_each_entry_safe(dqp, n, &dispose_list, q_mplist)
-		xfs_qm_dqpurge(dqp);
-
-	return nmisses;
-}
-
-int
-xfs_qm_dqpurge_all(
-	xfs_mount_t	*mp,
-	uint		flags)
-{
-	int		ndquots;
+	xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_detach_gdquot, 0);
 
-	/*
-	 * Purge the dquot cache.
-	 * None of the dquots should really be busy at this point.
-	 */
-	if (mp->m_quotainfo) {
-		while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) {
-			delay(ndquots * 10);
-		}
-	}
-	return 0;
+	if (!error && (flags & XFS_QMOPT_UQUOTA))
+		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, 0);
+	if (!error && (flags & XFS_QMOPT_GQUOTA))
+		error = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, 0);
+	if (!error && (flags & XFS_QMOPT_PQUOTA))
+		error = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, 0);
+	return error;
 }
 
 STATIC int
@@ -750,15 +735,10 @@ xfs_qm_init_quotainfo(
 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 	mutex_init(&qinf->qi_tree_lock);
 
-	INIT_LIST_HEAD(&qinf->qi_dqlist);
-	mutex_init(&qinf->qi_dqlist_lock);
-
 	INIT_LIST_HEAD(&qinf->qi_lru_list);
 	qinf->qi_lru_count = 0;
 	mutex_init(&qinf->qi_lru_lock);
 
-	qinf->qi_dqreclaims = 0;
-
 	/* mutex used to serialize quotaoffs */
 	mutex_init(&qinf->qi_quotaofflock);
 
@@ -855,9 +835,6 @@ xfs_qm_destroy_quotainfo(
 	 */
 	xfs_qm_rele_quotafs_ref(mp);
 
-	ASSERT(list_empty(&qi->qi_dqlist));
-	mutex_destroy(&qi->qi_dqlist_lock);
-
 	if (qi->qi_uquotaip) {
 		IRELE(qi->qi_uquotaip);
 		qi->qi_uquotaip = NULL; /* paranoia */
@@ -1330,12 +1307,6 @@ xfs_qm_quotacheck(
 	ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
-	/*
-	 * There should be no cached dquots. The (simplistic) quotacheck
-	 * algorithm doesn't like that.
-	 */
-	ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
-
 	xfs_notice(mp, "Quotacheck needed: Please wait.");
 
 	/*
@@ -1374,12 +1345,15 @@ xfs_qm_quotacheck(
 	} while (!done);
 
 	/*
-	 * We've made all the changes that we need to make incore.
-	 * Flush them down to disk buffers if everything was updated
-	 * successfully.
+	 * We've made all the changes that we need to make incore.  Flush them
+	 * down to disk buffers if everything was updated successfully.
 	 */
-	if (!error)
-		error = xfs_qm_dqflush_all(mp);
+	if (!error && XFS_IS_UQUOTA_ON(mp))
+		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, 0);
+	if (!error && XFS_IS_GQUOTA_ON(mp))
+		error = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, 0);
+	if (!error && XFS_IS_PQUOTA_ON(mp))
+		error = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, 0);
 
 	/*
 	 * We can get this error if we couldn't do a dquot allocation inside
@@ -1518,13 +1492,8 @@ xfs_qm_dqfree_one(
 	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
 	radix_tree_delete(XFS_DQUOT_TREE(mp, dqp->q_core.d_flags),
 			  be32_to_cpu(dqp->q_core.d_id));
-	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
-
-	mutex_lock(&qi->qi_dqlist_lock);
-	list_del_init(&dqp->q_mplist);
 	qi->qi_dquots--;
-	qi->qi_dqreclaims++;
-	mutex_unlock(&qi->qi_dqlist_lock);
+	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
 
 	xfs_qm_dqdestroy(dqp);
 }
Index: xfs/fs/xfs/xfs_qm.h
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.h	2012-02-12 13:19:18.433599690 -0800
+++ xfs/fs/xfs/xfs_qm.h	2012-02-12 13:22:33.330269971 -0800
@@ -65,11 +65,7 @@ typedef struct xfs_quotainfo {
 	struct list_head qi_lru_list;
 	struct mutex	 qi_lru_lock;
 	int		 qi_lru_count;
-	struct list_head qi_dqlist;	 /* all dquots in filesys */
-	struct mutex	 qi_dqlist_lock;
 	int		 qi_dquots;
-	int		 qi_dqreclaims;	 /* a change here indicates
-					    a removal in the dqlist */
 	time_t		 qi_btimelimit;	 /* limit for blks timer */
 	time_t		 qi_itimelimit;	 /* limit for inodes timer */
 	time_t		 qi_rtbtimelimit;/* limit for rt blks timer */
Index: xfs/fs/xfs/xfs_dquot.h
===================================================================
--- xfs.orig/fs/xfs/xfs_dquot.h	2012-02-12 13:19:18.440266355 -0800
+++ xfs/fs/xfs/xfs_dquot.h	2012-02-12 13:22:33.330269971 -0800
@@ -143,7 +143,7 @@ extern int		xfs_qm_dqread(struct xfs_mou
 					uint, struct xfs_dquot	**);
 extern void		xfs_qm_dqdestroy(xfs_dquot_t *);
 extern int		xfs_qm_dqflush(xfs_dquot_t *, uint);
-extern void		xfs_qm_dqpurge(xfs_dquot_t *);
+extern int		xfs_qm_dqpurge(xfs_dquot_t *, int);
 extern void		xfs_qm_dqunpin_wait(xfs_dquot_t *);
 extern void		xfs_qm_adjust_dqtimers(xfs_mount_t *,
 					xfs_disk_dquot_t *);

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 4/9] xfs: use per-CPU data for the quota statistics
  2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
                   ` (2 preceding siblings ...)
  2012-02-15  2:29 ` [PATCH 3/9] xfs: remove the per-filesystem list of dquots Christoph Hellwig
@ 2012-02-15  2:29 ` Christoph Hellwig
  2012-02-15 23:59   ` Dave Chinner
  2012-02-15  2:29 ` [PATCH 5/9] xfs: user per-cpu stats for the total dquot numbers Christoph Hellwig
                   ` (4 subsequent siblings)
  8 siblings, 1 reply; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

[-- Attachment #1: xfs-quota-percpu-stats --]
[-- Type: text/plain, Size: 4598 bytes --]

Use the same per-CPU scheme used in the main XFS statistics, as well as
the VFS inode and dcache statistics for the quota code.

Signed-off-by: Christoph Hellwig <hch@lst.de>

---
 fs/xfs/xfs_dquot.c    |    6 +++---
 fs/xfs/xfs_qm.c       |    6 +++---
 fs/xfs/xfs_qm_stats.c |   28 +++++++++++++++++-----------
 fs/xfs/xfs_qm_stats.h |   11 ++++++++---
 4 files changed, 31 insertions(+), 20 deletions(-)

Index: xfs/fs/xfs/xfs_dquot.c
===================================================================
--- xfs.orig/fs/xfs/xfs_dquot.c	2012-02-12 13:22:33.326936637 -0800
+++ xfs/fs/xfs/xfs_dquot.c	2012-02-12 13:22:45.036936854 -0800
@@ -663,13 +663,13 @@ restart:
 		mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
 
 		trace_xfs_dqget_hit(dqp);
-		XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
+		XQM_STATS_INC(xs_qm_dqcachehits);
 		*O_dqpp = dqp;
 		return 0;
 	}
 	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
 
-	XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
+	XQM_STATS_INC(xs_qm_dqcachemisses);
 
 	/*
 	 * Dquot cache miss. We don't want to keep the inode lock across
@@ -723,7 +723,7 @@ restart:
 		mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
 		trace_xfs_dqget_dup(dqp);
 		xfs_qm_dqdestroy(dqp);
-		XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
+		XQM_STATS_INC(xs_qm_dquot_dups);
 		goto restart;
 	}
 
Index: xfs/fs/xfs/xfs_qm.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.c	2012-02-12 13:22:33.326936637 -0800
+++ xfs/fs/xfs/xfs_qm.c	2012-02-12 13:22:45.036936854 -0800
@@ -1518,7 +1518,7 @@ xfs_qm_dqreclaim_one(
 		xfs_dqunlock(dqp);
 
 		trace_xfs_dqreclaim_want(dqp);
-		XQM_STATS_INC(xqmstats.xs_qm_dqwants);
+		XQM_STATS_INC(xs_qm_dqwants);
 
 		list_del_init(&dqp->q_lru);
 		qi->qi_lru_count--;
@@ -1572,7 +1572,7 @@ xfs_qm_dqreclaim_one(
 	qi->qi_lru_count--;
 
 	trace_xfs_dqreclaim_done(dqp);
-	XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
+	XQM_STATS_INC(xs_qm_dqreclaims);
 	return;
 
 out_busy:
@@ -1584,7 +1584,7 @@ out_busy:
 	list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
 
 	trace_xfs_dqreclaim_busy(dqp);
-	XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
+	XQM_STATS_INC(xs_qm_dqreclaim_misses);
 }
 
 STATIC int
Index: xfs/fs/xfs/xfs_qm_stats.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm_stats.c	2012-02-12 13:19:02.843599401 -0800
+++ xfs/fs/xfs/xfs_qm_stats.c	2012-02-12 13:22:45.040270187 -0800
@@ -36,7 +36,16 @@
 #include "xfs_buf_item.h"
 #include "xfs_qm.h"
 
-struct xqmstats xqmstats;
+DEFINE_PER_CPU(struct xqmstats, xqmstats);
+
+static int xqmstats_sum(int idx)
+{
+	int val = 0, cpu;
+
+	for_each_possible_cpu(cpu)
+		val += *(((__u32 *)&per_cpu(xqmstats, cpu) + idx));
+	return max(val, 0);
+}
 
 static int xqm_proc_show(struct seq_file *m, void *v)
 {
@@ -64,16 +73,13 @@ static const struct file_operations xqm_
 
 static int xqmstat_proc_show(struct seq_file *m, void *v)
 {
-	/* quota performance statistics */
-	seq_printf(m, "qm %u %u %u %u %u %u %u %u\n",
-			xqmstats.xs_qm_dqreclaims,
-			xqmstats.xs_qm_dqreclaim_misses,
-			xqmstats.xs_qm_dquot_dups,
-			xqmstats.xs_qm_dqcachemisses,
-			xqmstats.xs_qm_dqcachehits,
-			xqmstats.xs_qm_dqwants,
-			xqmstats.xs_qm_dqshake_reclaims,
-			xqmstats.xs_qm_dqinact_reclaims);
+	int j;
+
+	seq_printf(m, "qm");
+	for (j = 0; j < XQMSTAT_END_XQMSTAT; j++)
+		seq_printf(m, " %u", xqmstats_sum(j));
+	seq_putc(m, '\n');
+
 	return 0;
 }
 
Index: xfs/fs/xfs/xfs_qm_stats.h
===================================================================
--- xfs.orig/fs/xfs/xfs_qm_stats.h	2012-02-07 10:00:57.291250941 -0800
+++ xfs/fs/xfs/xfs_qm_stats.h	2012-02-12 13:22:45.040270187 -0800
@@ -32,18 +32,23 @@ struct xqmstats {
 	__uint32_t		xs_qm_dqwants;
 	__uint32_t		xs_qm_dqshake_reclaims;
 	__uint32_t		xs_qm_dqinact_reclaims;
+#define XQMSTAT_END_XQMSTAT	8
 };
 
-extern struct xqmstats xqmstats;
+DECLARE_PER_CPU(struct xqmstats, xqmstats);
 
-# define XQM_STATS_INC(count)	( (count)++ )
+/*
+ * We don't disable preempt, not too worried about poking the
+ * wrong CPU's stat for now (also aggregated before reporting).
+ */
+# define XQM_STATS_INC(v)	(per_cpu(xqmstats, current_cpu()).v++)
 
 extern void xfs_qm_init_procfs(void);
 extern void xfs_qm_cleanup_procfs(void);
 
 #else
 
-# define XQM_STATS_INC(count)	do { } while (0)
+# define XQM_STATS_INC(v)	do { } while (0)
 
 static inline void xfs_qm_init_procfs(void) { };
 static inline void xfs_qm_cleanup_procfs(void) { };

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 5/9] xfs: user per-cpu stats for the total dquot numbers
  2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
                   ` (3 preceding siblings ...)
  2012-02-15  2:29 ` [PATCH 4/9] xfs: use per-CPU data for the quota statistics Christoph Hellwig
@ 2012-02-15  2:29 ` Christoph Hellwig
  2012-02-16  0:02   ` Dave Chinner
  2012-02-15  2:29 ` [PATCH 6/9] xfs: remove the global xfs_Gqm structure Christoph Hellwig
                   ` (3 subsequent siblings)
  8 siblings, 1 reply; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

[-- Attachment #1: xfs-quota-percpu-stats-2 --]
[-- Type: text/plain, Size: 4901 bytes --]

Switch the total number of dquots counter over to use the per-cpu stats
implementation, and reintroduce the number of unused dquots counter
dropped earlier in the series.

Btw, I wonder if we should simply add these counters to /proc/fs/xfs/xqmstat
instead of keeping the odd format and mostly superflous /proc/fs/xfs/xqm
around.

Signed-off-by: Christoph Hellwig <hch@lst.de>

---
 fs/xfs/xfs_dquot.c    |    7 ++++---
 fs/xfs/xfs_qm.c       |    3 ++-
 fs/xfs/xfs_qm.h       |    1 -
 fs/xfs/xfs_qm_stats.c |    4 ++--
 fs/xfs/xfs_qm_stats.h |    5 +++++
 5 files changed, 13 insertions(+), 7 deletions(-)

Index: xfs/fs/xfs/xfs_qm_stats.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm_stats.c	2012-02-12 13:22:45.040270187 -0800
+++ xfs/fs/xfs/xfs_qm_stats.c	2012-02-12 13:22:49.553603603 -0800
@@ -52,9 +52,9 @@ static int xqm_proc_show(struct seq_file
 	/* maximum; incore; ratio free to inuse; freelist */
 	seq_printf(m, "%d\t%d\t%d\t%u\n",
 			0,
-			xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
+			xqmstats_sum(XQMSTAT_END_XQMSTAT),
 			0,
-			0);
+			xqmstats_sum(XQMSTAT_END_XQMSTAT + 1));
 	return 0;
 }
 
Index: xfs/fs/xfs/xfs_qm_stats.h
===================================================================
--- xfs.orig/fs/xfs/xfs_qm_stats.h	2012-02-12 13:22:45.040270187 -0800
+++ xfs/fs/xfs/xfs_qm_stats.h	2012-02-12 13:22:49.553603603 -0800
@@ -33,6 +33,9 @@ struct xqmstats {
 	__uint32_t		xs_qm_dqshake_reclaims;
 	__uint32_t		xs_qm_dqinact_reclaims;
 #define XQMSTAT_END_XQMSTAT	8
+	__uint32_t		xs_qm_dquots;
+	__uint32_t		xs_qm_dquots_unused;
+#define XQMSTAT_END_XQM		(XQMSTAT_END_XQMSTAT + 4)
 };
 
 DECLARE_PER_CPU(struct xqmstats, xqmstats);
@@ -42,6 +45,7 @@ DECLARE_PER_CPU(struct xqmstats, xqmstat
  * wrong CPU's stat for now (also aggregated before reporting).
  */
 # define XQM_STATS_INC(v)	(per_cpu(xqmstats, current_cpu()).v++)
+# define XQM_STATS_DEC(v)	(per_cpu(xqmstats, current_cpu()).v--)
 
 extern void xfs_qm_init_procfs(void);
 extern void xfs_qm_cleanup_procfs(void);
@@ -49,6 +53,7 @@ extern void xfs_qm_cleanup_procfs(void);
 #else
 
 # define XQM_STATS_INC(v)	do { } while (0)
+# define XQM_STATS_DEC(v)	do { } while (0)
 
 static inline void xfs_qm_init_procfs(void) { };
 static inline void xfs_qm_cleanup_procfs(void) { };
Index: xfs/fs/xfs/xfs_dquot.c
===================================================================
--- xfs.orig/fs/xfs/xfs_dquot.c	2012-02-12 13:22:45.036936854 -0800
+++ xfs/fs/xfs/xfs_dquot.c	2012-02-12 13:22:49.553603603 -0800
@@ -72,8 +72,7 @@ xfs_qm_dqdestroy(
 
 	mutex_destroy(&dqp->q_qlock);
 	kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
-
-	atomic_dec(&xfs_Gqm->qm_totaldquots);
+	XQM_STATS_DEC(xs_qm_dquots);
 }
 
 /*
@@ -515,7 +514,7 @@ xfs_qm_dqread(
 	if (!(type & XFS_DQ_USER))
 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
 
-	atomic_inc(&xfs_Gqm->qm_totaldquots);
+	XQM_STATS_INC(xs_qm_dquots);
 
 	trace_xfs_dqread(dqp);
 
@@ -773,6 +772,7 @@ recurse:
 		list_add_tail(&dqp->q_lru,
 			      &dqp->q_mount->m_quotainfo->qi_lru_list);
 		dqp->q_mount->m_quotainfo->qi_lru_count++;
+		XQM_STATS_INC(xs_qm_dquots_unused);
 	}
 	mutex_unlock(&dqp->q_mount->m_quotainfo->qi_lru_lock);
 
@@ -1091,6 +1091,7 @@ xfs_qm_dqpurge(
 	qi->qi_lru_count--;
 	mutex_unlock(&qi->qi_lru_lock);
 
+	XQM_STATS_DEC(xs_qm_dquots_unused);
 	xfs_qm_dqdestroy(dqp);
 	return 0;
 }
Index: xfs/fs/xfs/xfs_qm.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.c	2012-02-12 13:22:45.036936854 -0800
+++ xfs/fs/xfs/xfs_qm.c	2012-02-12 13:22:49.556936937 -0800
@@ -89,7 +89,6 @@ xfs_Gqm_init(void)
 	} else
 		xqm->qm_dqtrxzone = qm_dqtrxzone;
 
-	atomic_set(&xqm->qm_totaldquots, 0);
 	xqm->qm_nrefs = 0;
 	return xqm;
 }
@@ -1522,6 +1521,7 @@ xfs_qm_dqreclaim_one(
 
 		list_del_init(&dqp->q_lru);
 		qi->qi_lru_count--;
+		XQM_STATS_DEC(xs_qm_dquots_unused);
 		return;
 	}
 
@@ -1570,6 +1570,7 @@ xfs_qm_dqreclaim_one(
 	ASSERT(dqp->q_nrefs == 0);
 	list_move_tail(&dqp->q_lru, dispose_list);
 	qi->qi_lru_count--;
+	XQM_STATS_DEC(xs_qm_dquots_unused);
 
 	trace_xfs_dqreclaim_done(dqp);
 	XQM_STATS_INC(xs_qm_dqreclaims);
Index: xfs/fs/xfs/xfs_qm.h
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.h	2012-02-12 13:22:33.330269971 -0800
+++ xfs/fs/xfs/xfs_qm.h	2012-02-12 13:22:49.556936937 -0800
@@ -46,7 +46,6 @@ extern kmem_zone_t	*qm_dqtrxzone;
  * Quota Manager (global) structure. Lives only in core.
  */
 typedef struct xfs_qm {
-	atomic_t	 qm_totaldquots; /* total incore dquots */
 	uint		 qm_nrefs;	 /* file systems with quota on */
 	kmem_zone_t	*qm_dqzone;	 /* dquot mem-alloc zone */
 	kmem_zone_t	*qm_dqtrxzone;	 /* t_dqinfo of transactions */

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 6/9] xfs: remove the global xfs_Gqm structure
  2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
                   ` (4 preceding siblings ...)
  2012-02-15  2:29 ` [PATCH 5/9] xfs: user per-cpu stats for the total dquot numbers Christoph Hellwig
@ 2012-02-15  2:29 ` Christoph Hellwig
  2012-02-16  0:07   ` Dave Chinner
  2012-02-15  2:29 ` [PATCH 7/9] xfs: merge xfs_qm_export_dquot into xfs_qm_scall_getquota Christoph Hellwig
                   ` (2 subsequent siblings)
  8 siblings, 1 reply; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

[-- Attachment #1: xfs-quota-kill-xfs-qm --]
[-- Type: text/plain, Size: 10414 bytes --]

Initialize the slab caches for the quote code as soon XFS is loaded, and
remove the now unused overhead of a reference counted global structure.

Signed-off-by: Christoph Hellwig <hch@lst.de>

---
 fs/xfs/xfs_dquot.c       |   38 ++++++++++++
 fs/xfs/xfs_qm.c          |  138 -----------------------------------------------
 fs/xfs/xfs_qm.h          |   15 -----
 fs/xfs/xfs_qm_bhv.c      |   18 ------
 fs/xfs/xfs_super.c       |   10 ++-
 fs/xfs/xfs_super.h       |    8 +-
 fs/xfs/xfs_trans_dquot.c |    4 -
 7 files changed, 49 insertions(+), 182 deletions(-)

Index: xfs/fs/xfs/xfs_dquot.c
===================================================================
--- xfs.orig/fs/xfs/xfs_dquot.c	2012-02-12 13:22:49.553603603 -0800
+++ xfs/fs/xfs/xfs_dquot.c	2012-02-12 13:22:51.386936971 -0800
@@ -59,6 +59,9 @@ int xfs_dqreq_num;
 int xfs_dqerror_mod = 33;
 #endif
 
+struct kmem_zone		*xfs_qm_dqtrxzone;
+static struct kmem_zone		*xfs_qm_dqzone;
+
 static struct lock_class_key xfs_dquot_other_class;
 
 /*
@@ -71,7 +74,7 @@ xfs_qm_dqdestroy(
 	ASSERT(list_empty(&dqp->q_lru));
 
 	mutex_destroy(&dqp->q_qlock);
-	kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
+	kmem_zone_free(xfs_qm_dqzone, dqp);
 	XQM_STATS_DEC(xs_qm_dquots);
 }
 
@@ -490,7 +493,7 @@ xfs_qm_dqread(
 	int			cancelflags = 0;
 
 
-	dqp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
+	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
 
 	dqp->dq_flags = type;
 	dqp->q_core.d_id = cpu_to_be32(id);
@@ -1128,3 +1131,34 @@ xfs_dqflock_pushbuf_wait(
 out_lock:
 	xfs_dqflock(dqp);
 }
+
+int __init
+xfs_qm_init(void)
+{
+	xfs_qm_dqzone =
+		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
+	if (!xfs_qm_dqzone)
+		goto out;
+
+	xfs_qm_dqtrxzone =
+		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
+	if (!xfs_qm_dqtrxzone)
+		goto out_free_dqzone;
+
+	xfs_qm_init_procfs();
+	return 0;
+
+out_free_dqzone:
+	kmem_zone_destroy(xfs_qm_dqzone);
+out:
+	return -ENOMEM;
+}
+
+void __exit
+xfs_qm_exit(void)
+{
+	xfs_qm_cleanup_procfs();
+
+	kmem_zone_destroy(xfs_qm_dqtrxzone);
+	kmem_zone_destroy(xfs_qm_dqzone);
+}
Index: xfs/fs/xfs/xfs_qm.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.c	2012-02-12 13:22:49.556936937 -0800
+++ xfs/fs/xfs/xfs_qm.c	2012-02-12 13:22:51.390270305 -0800
@@ -42,133 +42,11 @@
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 
-/*
- * The global quota manager. There is only one of these for the entire
- * system, _not_ one per file system. XQM keeps track of the overall
- * quota functionality, including maintaining the freelist and hash
- * tables of dquots.
- */
-struct mutex	xfs_Gqm_lock;
-struct xfs_qm	*xfs_Gqm;
-
-kmem_zone_t	*qm_dqzone;
-kmem_zone_t	*qm_dqtrxzone;
-
 STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
 STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
 STATIC int	xfs_qm_shake(struct shrinker *, struct shrink_control *);
 
 /*
- * Initialize the XQM structure.
- * Note that there is not one quota manager per file system.
- */
-STATIC struct xfs_qm *
-xfs_Gqm_init(void)
-{
-	xfs_qm_t	*xqm;
-
-	xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
-
-	/*
-	 * dquot zone. we register our own low-memory callback.
-	 */
-	if (!qm_dqzone) {
-		xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t),
-						"xfs_dquots");
-		qm_dqzone = xqm->qm_dqzone;
-	} else
-		xqm->qm_dqzone = qm_dqzone;
-
-	/*
-	 * The t_dqinfo portion of transactions.
-	 */
-	if (!qm_dqtrxzone) {
-		xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t),
-						   "xfs_dqtrx");
-		qm_dqtrxzone = xqm->qm_dqtrxzone;
-	} else
-		xqm->qm_dqtrxzone = qm_dqtrxzone;
-
-	xqm->qm_nrefs = 0;
-	return xqm;
-}
-
-/*
- * Destroy the global quota manager when its reference count goes to zero.
- */
-STATIC void
-xfs_qm_destroy(
-	struct xfs_qm	*xqm)
-{
-	ASSERT(xqm != NULL);
-	ASSERT(xqm->qm_nrefs == 0);
-
-	kmem_free(xqm);
-}
-
-/*
- * Called at mount time to let XQM know that another file system is
- * starting quotas. This isn't crucial information as the individual mount
- * structures are pretty independent, but it helps the XQM keep a
- * global view of what's going on.
- */
-/* ARGSUSED */
-STATIC int
-xfs_qm_hold_quotafs_ref(
-	struct xfs_mount *mp)
-{
-	/*
-	 * Need to lock the xfs_Gqm structure for things like this. For example,
-	 * the structure could disappear between the entry to this routine and
-	 * a HOLD operation if not locked.
-	 */
-	mutex_lock(&xfs_Gqm_lock);
-
-	if (!xfs_Gqm) {
-		xfs_Gqm = xfs_Gqm_init();
-		if (!xfs_Gqm) {
-			mutex_unlock(&xfs_Gqm_lock);
-			return ENOMEM;
-		}
-	}
-
-	/*
-	 * We can keep a list of all filesystems with quotas mounted for
-	 * debugging and statistical purposes, but ...
-	 * Just take a reference and get out.
-	 */
-	xfs_Gqm->qm_nrefs++;
-	mutex_unlock(&xfs_Gqm_lock);
-
-	return 0;
-}
-
-
-/*
- * Release the reference that a filesystem took at mount time,
- * so that we know when we need to destroy the entire quota manager.
- */
-/* ARGSUSED */
-STATIC void
-xfs_qm_rele_quotafs_ref(
-	struct xfs_mount *mp)
-{
-	ASSERT(xfs_Gqm);
-	ASSERT(xfs_Gqm->qm_nrefs > 0);
-
-	/*
-	 * Destroy the entire XQM. If somebody mounts with quotaon, this'll
-	 * be restarted.
-	 */
-	mutex_lock(&xfs_Gqm_lock);
-	if (--xfs_Gqm->qm_nrefs == 0) {
-		xfs_qm_destroy(xfs_Gqm);
-		xfs_Gqm = NULL;
-	}
-	mutex_unlock(&xfs_Gqm_lock);
-}
-
-/*
  * Just destroy the quotainfo structure.
  */
 void
@@ -711,13 +589,6 @@ xfs_qm_init_quotainfo(
 
 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
-	/*
-	 * Tell XQM that we exist as soon as possible.
-	 */
-	if ((error = xfs_qm_hold_quotafs_ref(mp))) {
-		return error;
-	}
-
 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 
 	/*
@@ -823,17 +694,9 @@ xfs_qm_destroy_quotainfo(
 
 	qi = mp->m_quotainfo;
 	ASSERT(qi != NULL);
-	ASSERT(xfs_Gqm != NULL);
 
 	unregister_shrinker(&qi->qi_shrinker);
 
-	/*
-	 * Release the reference that XQM kept, so that we know
-	 * when the XQM structure should be freed. We cannot assume
-	 * that xfs_Gqm is non-null after this point.
-	 */
-	xfs_qm_rele_quotafs_ref(mp);
-
 	if (qi->qi_uquotaip) {
 		IRELE(qi->qi_uquotaip);
 		qi->qi_uquotaip = NULL; /* paranoia */
@@ -1392,7 +1255,6 @@ xfs_qm_quotacheck(
 		 * We must turn off quotas.
 		 */
 		ASSERT(mp->m_quotainfo != NULL);
-		ASSERT(xfs_Gqm != NULL);
 		xfs_qm_destroy_quotainfo(mp);
 		if (xfs_mount_reset_sbqflags(mp)) {
 			xfs_warn(mp,
Index: xfs/fs/xfs/xfs_qm.h
===================================================================
--- xfs.orig/fs/xfs/xfs_qm.h	2012-02-12 13:22:49.556936937 -0800
+++ xfs/fs/xfs/xfs_qm.h	2012-02-12 13:22:51.390270305 -0800
@@ -23,13 +23,9 @@
 #include "xfs_quota_priv.h"
 #include "xfs_qm_stats.h"
 
-struct xfs_qm;
 struct xfs_inode;
 
-extern struct mutex	xfs_Gqm_lock;
-extern struct xfs_qm	*xfs_Gqm;
-extern kmem_zone_t	*qm_dqzone;
-extern kmem_zone_t	*qm_dqtrxzone;
+extern struct kmem_zone	*xfs_qm_dqtrxzone;
 
 /*
  * This defines the unit of allocation of dquots.
@@ -43,15 +39,6 @@ extern kmem_zone_t	*qm_dqtrxzone;
 #define XFS_DQUOT_CLUSTER_SIZE_FSB	(xfs_filblks_t)1
 
 /*
- * Quota Manager (global) structure. Lives only in core.
- */
-typedef struct xfs_qm {
-	uint		 qm_nrefs;	 /* file systems with quota on */
-	kmem_zone_t	*qm_dqzone;	 /* dquot mem-alloc zone */
-	kmem_zone_t	*qm_dqtrxzone;	 /* t_dqinfo of transactions */
-} xfs_qm_t;
-
-/*
  * Various quota information for individual filesystems.
  * The mount structure keeps a pointer to this.
  */
Index: xfs/fs/xfs/xfs_qm_bhv.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm_bhv.c	2012-02-12 13:18:57.546932636 -0800
+++ xfs/fs/xfs/xfs_qm_bhv.c	2012-02-12 13:22:51.390270305 -0800
@@ -86,21 +86,3 @@ xfs_qm_statvfs(
 		xfs_qm_dqput(dqp);
 	}
 }
-
-void __init
-xfs_qm_init(void)
-{
-	printk(KERN_INFO "SGI XFS Quota Management subsystem\n");
-	mutex_init(&xfs_Gqm_lock);
-	xfs_qm_init_procfs();
-}
-
-void __exit
-xfs_qm_exit(void)
-{
-	xfs_qm_cleanup_procfs();
-	if (qm_dqzone)
-		kmem_zone_destroy(qm_dqzone);
-	if (qm_dqtrxzone)
-		kmem_zone_destroy(qm_dqtrxzone);
-}
Index: xfs/fs/xfs/xfs_trans_dquot.c
===================================================================
--- xfs.orig/fs/xfs/xfs_trans_dquot.c	2012-02-07 10:00:57.097917604 -0800
+++ xfs/fs/xfs/xfs_trans_dquot.c	2012-02-12 13:22:51.390270305 -0800
@@ -876,7 +876,7 @@ STATIC void
 xfs_trans_alloc_dqinfo(
 	xfs_trans_t	*tp)
 {
-	tp->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
+	tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP);
 }
 
 void
@@ -885,6 +885,6 @@ xfs_trans_free_dqinfo(
 {
 	if (!tp->t_dqinfo)
 		return;
-	kmem_zone_free(xfs_Gqm->qm_dqtrxzone, tp->t_dqinfo);
+	kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
 	tp->t_dqinfo = NULL;
 }
Index: xfs/fs/xfs/xfs_super.c
===================================================================
--- xfs.orig/fs/xfs/xfs_super.c	2012-02-12 13:18:57.220265963 -0800
+++ xfs/fs/xfs/xfs_super.c	2012-02-12 13:22:51.393603639 -0800
@@ -1658,13 +1658,17 @@ init_xfs_fs(void)
 	if (error)
 		goto out_cleanup_procfs;
 
-	vfs_initquota();
+	error = xfs_qm_init();
+	if (error)
+		goto out_sysctl_unregister;
 
 	error = register_filesystem(&xfs_fs_type);
 	if (error)
-		goto out_sysctl_unregister;
+		goto out_qm_exit;
 	return 0;
 
+ out_qm_exit:
+	xfs_qm_exit();
  out_sysctl_unregister:
 	xfs_sysctl_unregister();
  out_cleanup_procfs:
@@ -1686,7 +1690,7 @@ init_xfs_fs(void)
 STATIC void __exit
 exit_xfs_fs(void)
 {
-	vfs_exitquota();
+	xfs_qm_exit();
 	unregister_filesystem(&xfs_fs_type);
 	xfs_sysctl_unregister();
 	xfs_cleanup_procfs();
Index: xfs/fs/xfs/xfs_super.h
===================================================================
--- xfs.orig/fs/xfs/xfs_super.h	2012-02-07 10:00:57.111250937 -0800
+++ xfs/fs/xfs/xfs_super.h	2012-02-12 13:22:51.393603639 -0800
@@ -21,13 +21,11 @@
 #include <linux/exportfs.h>
 
 #ifdef CONFIG_XFS_QUOTA
-extern void xfs_qm_init(void);
+extern int xfs_qm_init(void);
 extern void xfs_qm_exit(void);
-# define vfs_initquota()	xfs_qm_init()
-# define vfs_exitquota()	xfs_qm_exit()
 #else
-# define vfs_initquota()	do { } while (0)
-# define vfs_exitquota()	do { } while (0)
+# define xfs_qm_init()	(0)
+# define xfs_qm_exit()	do { } while (0)
 #endif
 
 #ifdef CONFIG_XFS_POSIX_ACL

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 7/9] xfs: merge xfs_qm_export_dquot into xfs_qm_scall_getquota
  2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
                   ` (5 preceding siblings ...)
  2012-02-15  2:29 ` [PATCH 6/9] xfs: remove the global xfs_Gqm structure Christoph Hellwig
@ 2012-02-15  2:29 ` Christoph Hellwig
  2012-02-16  0:11   ` Dave Chinner
  2012-02-27  1:57   ` Ben Myers
  2012-02-15  2:29 ` [PATCH 8/9] xfs: include reservations in quota reporting Christoph Hellwig
  2012-02-15  2:29 ` [PATCH 9/9] quota: make Q_XQUOTASYNC a noop Christoph Hellwig
  8 siblings, 2 replies; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

[-- Attachment #1: xfs-quota-kill-xfs_qm_export_dquot --]
[-- Type: text/plain, Size: 5742 bytes --]

The is no good reason to have these two separate, and for the next change
I'd need the full struct xfs_dquot in xfs_qm_export_dquot, so better just
fold the code now instead of changing it around.

Signed-off-by: Christoph Hellwig <hch@lst.de>

Index: xfs/fs/xfs/xfs_qm_syscalls.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm_syscalls.c	2012-02-02 13:11:38.368396372 +0100
+++ xfs/fs/xfs/xfs_qm_syscalls.c	2012-02-02 13:17:04.139964850 +0100
@@ -47,9 +47,6 @@ STATIC int	xfs_qm_log_quotaoff_end(xfs_m
 					uint);
 STATIC uint	xfs_qm_export_flags(uint);
 STATIC uint	xfs_qm_export_qtype_flags(uint);
-STATIC void	xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
-					fs_disk_quota_t *);
-
 
 /*
  * Turn off quota accounting and/or enforcement for all udquots and/or
@@ -635,42 +632,6 @@ xfs_qm_scall_setqlim(
 	return error;
 }
 
-int
-xfs_qm_scall_getquota(
-	xfs_mount_t	*mp,
-	xfs_dqid_t	id,
-	uint		type,
-	fs_disk_quota_t *out)
-{
-	xfs_dquot_t	*dqp;
-	int		error;
-
-	/*
-	 * Try to get the dquot. We don't want it allocated on disk, so
-	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
-	 * exist, we'll get ENOENT back.
-	 */
-	if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) {
-		return (error);
-	}
-
-	/*
-	 * If everything's NULL, this dquot doesn't quite exist as far as
-	 * our utility programs are concerned.
-	 */
-	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
-		xfs_qm_dqput(dqp);
-		return XFS_ERROR(ENOENT);
-	}
-	/*
-	 * Convert the disk dquot to the exportable format
-	 */
-	xfs_qm_export_dquot(mp, &dqp->q_core, out);
-	xfs_qm_dqput(dqp);
-	return (error ? XFS_ERROR(EFAULT) : 0);
-}
-
-
 STATIC int
 xfs_qm_log_quotaoff_end(
 	xfs_mount_t		*mp,
@@ -759,50 +720,66 @@ error0:
 }
 
 
-/*
- * Translate an internal style on-disk-dquot to the exportable format.
- * The main differences are that the counters/limits are all in Basic
- * Blocks (BBs) instead of the internal FSBs, and all on-disk data has
- * to be converted to the native endianness.
- */
-STATIC void
-xfs_qm_export_dquot(
-	xfs_mount_t		*mp,
-	xfs_disk_dquot_t	*src,
+int
+xfs_qm_scall_getquota(
+	struct xfs_mount	*mp,
+	xfs_dqid_t		id,
+	uint			type,
 	struct fs_disk_quota	*dst)
 {
+	struct xfs_dquot	*dqp;
+	int			error;
+
+	/*
+	 * Try to get the dquot. We don't want it allocated on disk, so
+	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
+	 * exist, we'll get ENOENT back.
+	 */
+	error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
+	if (error)
+		return error;
+
+	/*
+	 * If everything's NULL, this dquot doesn't quite exist as far as
+	 * our utility programs are concerned.
+	 */
+	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
+		error = XFS_ERROR(ENOENT);
+		goto out_put;
+	}
+
 	memset(dst, 0, sizeof(*dst));
-	dst->d_version = FS_DQUOT_VERSION;  /* different from src->d_version */
-	dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags);
-	dst->d_id = be32_to_cpu(src->d_id);
+	dst->d_version = FS_DQUOT_VERSION;
+	dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
+	dst->d_id = be32_to_cpu(dqp->q_core.d_id);
 	dst->d_blk_hardlimit =
-		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit));
+		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
 	dst->d_blk_softlimit =
-		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit));
-	dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit);
-	dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit);
-	dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount));
-	dst->d_icount = be64_to_cpu(src->d_icount);
-	dst->d_btimer = be32_to_cpu(src->d_btimer);
-	dst->d_itimer = be32_to_cpu(src->d_itimer);
-	dst->d_iwarns = be16_to_cpu(src->d_iwarns);
-	dst->d_bwarns = be16_to_cpu(src->d_bwarns);
+		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
+	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
+	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
+	dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_bcount));
+	dst->d_icount = be64_to_cpu(dqp->q_core.d_icount);
+	dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
+	dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
+	dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
+	dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
 	dst->d_rtb_hardlimit =
-		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit));
+		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
 	dst->d_rtb_softlimit =
-		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit));
-	dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount));
-	dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer);
-	dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns);
+		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
+	dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtbcount));
+	dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
+	dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
 
 	/*
 	 * Internally, we don't reset all the timers when quota enforcement
 	 * gets turned off. No need to confuse the user level code,
 	 * so return zeroes in that case.
 	 */
-	if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) ||
+	if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) ||
 	    (!XFS_IS_OQUOTA_ENFORCED(mp) &&
-			(src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
+			(dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
 		dst->d_btimer = 0;
 		dst->d_itimer = 0;
 		dst->d_rtbtimer = 0;
@@ -823,6 +800,9 @@ xfs_qm_export_dquot(
 		}
 	}
 #endif
+out_put:
+	xfs_qm_dqput(dqp);
+	return error;
 }
 
 STATIC uint

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 8/9] xfs: include reservations in quota reporting
  2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
                   ` (6 preceding siblings ...)
  2012-02-15  2:29 ` [PATCH 7/9] xfs: merge xfs_qm_export_dquot into xfs_qm_scall_getquota Christoph Hellwig
@ 2012-02-15  2:29 ` Christoph Hellwig
  2012-02-16  0:14   ` Dave Chinner
  2012-02-15  2:29 ` [PATCH 9/9] quota: make Q_XQUOTASYNC a noop Christoph Hellwig
  8 siblings, 1 reply; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

[-- Attachment #1: xfs-quota-report-delalloc-reservations --]
[-- Type: text/plain, Size: 3453 bytes --]

Report all quota usage including the currently pending reservations.  This
avoids the need to flush delalloc space before gathering quota information,
and matches quota enforcement, which already takes the reservations into
account.

Signed-off-by: Christoph Hellwig <hch@lst.de>

Index: xfs/fs/xfs/xfs_qm_bhv.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm_bhv.c	2012-02-02 15:06:42.624326140 +0100
+++ xfs/fs/xfs/xfs_qm_bhv.c	2012-02-02 15:06:43.077657017 +0100
@@ -40,28 +40,28 @@
 STATIC void
 xfs_fill_statvfs_from_dquot(
 	struct kstatfs		*statp,
-	xfs_disk_dquot_t	*dp)
+	struct xfs_dquot	*dqp)
 {
 	__uint64_t		limit;
 
-	limit = dp->d_blk_softlimit ?
-		be64_to_cpu(dp->d_blk_softlimit) :
-		be64_to_cpu(dp->d_blk_hardlimit);
+	limit = dqp->q_core.d_blk_softlimit ?
+		be64_to_cpu(dqp->q_core.d_blk_softlimit) :
+		be64_to_cpu(dqp->q_core.d_blk_hardlimit);
 	if (limit && statp->f_blocks > limit) {
 		statp->f_blocks = limit;
 		statp->f_bfree = statp->f_bavail =
-			(statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
-			 (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
+			(statp->f_blocks > dqp->q_res_bcount) ?
+			 (statp->f_blocks - dqp->q_res_bcount) : 0;
 	}
 
-	limit = dp->d_ino_softlimit ?
-		be64_to_cpu(dp->d_ino_softlimit) :
-		be64_to_cpu(dp->d_ino_hardlimit);
+	limit = dqp->q_core.d_ino_softlimit ?
+		be64_to_cpu(dqp->q_core.d_ino_softlimit) :
+		be64_to_cpu(dqp->q_core.d_ino_hardlimit);
 	if (limit && statp->f_files > limit) {
 		statp->f_files = limit;
 		statp->f_ffree =
-			(statp->f_files > be64_to_cpu(dp->d_icount)) ?
-			 (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0;
+			(statp->f_files > dqp->q_res_icount) ?
+			 (statp->f_ffree - dqp->q_res_icount) : 0;
 	}
 }
 
@@ -82,7 +82,7 @@ xfs_qm_statvfs(
 	xfs_dquot_t		*dqp;
 
 	if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) {
-		xfs_fill_statvfs_from_dquot(statp, &dqp->q_core);
+		xfs_fill_statvfs_from_dquot(statp, dqp);
 		xfs_qm_dqput(dqp);
 	}
 }
Index: xfs/fs/xfs/xfs_qm_syscalls.c
===================================================================
--- xfs.orig/fs/xfs/xfs_qm_syscalls.c	2012-02-02 15:06:42.837658317 +0100
+++ xfs/fs/xfs/xfs_qm_syscalls.c	2012-02-02 15:07:31.894059224 +0100
@@ -758,8 +758,8 @@ xfs_qm_scall_getquota(
 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
 	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
 	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
-	dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_bcount));
-	dst->d_icount = be64_to_cpu(dqp->q_core.d_icount);
+	dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
+	dst->d_icount = dqp->q_res_icount;
 	dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
 	dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
 	dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
@@ -768,7 +768,7 @@ xfs_qm_scall_getquota(
 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
 	dst->d_rtb_softlimit =
 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
-	dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtbcount));
+	dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
 	dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
 	dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
 

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 9/9] quota: make Q_XQUOTASYNC a noop
  2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
                   ` (7 preceding siblings ...)
  2012-02-15  2:29 ` [PATCH 8/9] xfs: include reservations in quota reporting Christoph Hellwig
@ 2012-02-15  2:29 ` Christoph Hellwig
  2012-02-16  0:15   ` Dave Chinner
  8 siblings, 1 reply; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-15  2:29 UTC (permalink / raw)
  To: xfs

[-- Attachment #1: quota-disable-Q_XQUOTASYNC --]
[-- Type: text/plain, Size: 1023 bytes --]

Now that XFS takes quota reservations into account there is no need to flush
anything before reporting quotas - in addition to beeing fully transactional
all quota information is also 100% coherent with the rest of the filesystem
now.

Signed-off-by: Christoph Hellwig <hch@lst.de>

Index: xfs/fs/quota/quota.c
===================================================================
--- xfs.orig/fs/quota/quota.c	2012-02-02 13:06:30.693396524 +0100
+++ xfs/fs/quota/quota.c	2012-02-02 13:23:23.181244741 +0100
@@ -282,10 +282,9 @@ static int do_quotactl(struct super_bloc
 	case Q_XGETQUOTA:
 		return quota_getxquota(sb, type, id, addr);
 	case Q_XQUOTASYNC:
-		/* caller already holds s_umount */
 		if (sb->s_flags & MS_RDONLY)
 			return -EROFS;
-		writeback_inodes_sb(sb, WB_REASON_SYNC);
+		/* XFS quotas are fully coherent now, making this call a noop */
 		return 0;
 	default:
 		return -EINVAL;

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/9] xfs: use per-filesystem dquot LRU lists
  2012-02-15  2:29 ` [PATCH 1/9] xfs: use per-filesystem dquot LRU lists Christoph Hellwig
@ 2012-02-15 22:10   ` Dave Chinner
  2012-02-17 17:33     ` Christoph Hellwig
  0 siblings, 1 reply; 25+ messages in thread
From: Dave Chinner @ 2012-02-15 22:10 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 14, 2012 at 09:29:27PM -0500, Christoph Hellwig wrote:
> Replace the global dquot lru lists with a per-filesystem one.
> 
> Note that the shrinker isn't wired up to the per-superblock VFS shrinker
> infrastructure, as doing so would cause problems due to summing up and
> splitting out again the counts for inodes and dquots.  I do not believe this
> is a major issue as the quota cache is not deeply interwinded with inode
> and dentry caching.

Yes, that's fine, like the xfs_buf cache has it's own shrinker
and reclaim algorithm....

> Also temporarily stop tracking the system-wide count of dquots on the LRU
> lists for /proc/fs/xfs/xqm, which will be added back later in the series.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
....
> @@ -869,12 +868,13 @@ recurse:
>  
>  	trace_xfs_dqput_free(dqp);
>  
> -	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
> -	if (list_empty(&dqp->q_freelist)) {
> -		list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
> -		xfs_Gqm->qm_dqfrlist_cnt++;
> +	mutex_lock(&dqp->q_mount->m_quotainfo->qi_lru_lock);
> +	if (list_empty(&dqp->q_lru)) {
> +		list_add_tail(&dqp->q_lru,
> +			      &dqp->q_mount->m_quotainfo->qi_lru_list);
> +		dqp->q_mount->m_quotainfo->qi_lru_count++;
>  	}
> -	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
> +	mutex_unlock(&dqp->q_mount->m_quotainfo->qi_lru_lock);

Might be nice to add a

+	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;

here  to make the code a little easier to read and consistent with
all the other functions.

Regardless, change looks good.

Reviewed-by: Dave Chinner <dchinner@redhat.com>

-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 2/9] xfs: use per-filesystem radix trees for dquot lookup
  2012-02-15  2:29 ` [PATCH 2/9] xfs: use per-filesystem radix trees for dquot lookup Christoph Hellwig
@ 2012-02-15 22:21   ` Dave Chinner
  2012-02-17 17:38     ` Christoph Hellwig
  0 siblings, 1 reply; 25+ messages in thread
From: Dave Chinner @ 2012-02-15 22:21 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 14, 2012 at 09:29:28PM -0500, Christoph Hellwig wrote:
> Replace the global hash tables for looking up in-memory dquot structures
> with per-filesystem radix trees to allow scaling to a large number of
> in-memory dquot structures.

And simplifies the code a lot. Only minor comments about using
local quotainfo variables below.

> @@ -1593,10 +1515,10 @@ xfs_qm_dqfree_one(
>  	struct xfs_mount	*mp = dqp->q_mount;
>  	struct xfs_quotainfo	*qi = mp->m_quotainfo;
>  
> -	mutex_lock(&dqp->q_hash->qh_lock);
> -	list_del_init(&dqp->q_hashlist);
> -	dqp->q_hash->qh_version++;
> -	mutex_unlock(&dqp->q_hash->qh_lock);
> +	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
> +	radix_tree_delete(XFS_DQUOT_TREE(mp, dqp->q_core.d_flags),
> +			  be32_to_cpu(dqp->q_core.d_id));
> +	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);

qi->qi_tree_lock, as the local variable is already set up.

> @@ -672,10 +618,9 @@ xfs_qm_dqget(
>  	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
>  	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
>  {
> -	xfs_dquot_t	*dqp, *dqp1;
> -	xfs_dqhash_t	*h;
> -	uint		version;
> -	int		error;
> +	struct radix_tree_root *tree = XFS_DQUOT_TREE(mp, type);
> +	struct xfs_dquot	*dqp;
> +	int			error;

Add a quotainfo local variable and use it in the function?

> @@ -1171,10 +1078,10 @@ xfs_qm_dqpurge(
>  	xfs_dqfunlock(dqp);
>  	xfs_dqunlock(dqp);
>  
> -	mutex_lock(&qh->qh_lock);
> -	list_del_init(&dqp->q_hashlist);
> -	qh->qh_version++;
> -	mutex_unlock(&qh->qh_lock);
> +	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
> +	radix_tree_delete(XFS_DQUOT_TREE(mp, dqp->q_core.d_flags),
> +			  be32_to_cpu(dqp->q_core.d_id));
> +	mutex_unlock(&mp->m_quotainfo->qi_tree_lock);

qi->qi_tree_lock as it is already there and used for all the other
quotainfo references.

Otherwise looks good.

Reviewed-by: Dave Chinner <dchinner@redhat.com>
-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 3/9] xfs: remove the per-filesystem list of dquots
  2012-02-15  2:29 ` [PATCH 3/9] xfs: remove the per-filesystem list of dquots Christoph Hellwig
@ 2012-02-15 22:59   ` Dave Chinner
  2012-02-17 17:47     ` Christoph Hellwig
  0 siblings, 1 reply; 25+ messages in thread
From: Dave Chinner @ 2012-02-15 22:59 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 14, 2012 at 09:29:29PM -0500, Christoph Hellwig wrote:
> Instead of keeping a separate per-filesystem list of dquots we can walk
> the radix tree for the two places where we need to iterate all quota
> structures.

And with the new radix tree iterator code being worked on, this will
become even simpler soon...

.....
> @@ -1025,16 +1017,23 @@ xfs_dqlock2(
>  
>  /*
>   * Take a dquot out of the mount's dqlist as well as the hashlist.  This is

remove from the quota tree. No hashlist anymore, either...

> - * called via unmount as well as quotaoff, and the purge will always succeed.
> + * called via unmount as well as quotaoff.
>   */
> -void
> +int
>  xfs_qm_dqpurge(
> -	struct xfs_dquot	*dqp)
> +	struct xfs_dquot	*dqp,
> +	int			flags)
>  {
>  	struct xfs_mount	*mp = dqp->q_mount;
>  	struct xfs_quotainfo	*qi = mp->m_quotainfo;
>  
>  	xfs_dqlock(dqp);
> +	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
> +		xfs_dqlock(dqp);

xfs_dqunlock()?

> +++ xfs/fs/xfs/xfs_qm.c	2012-02-12 13:22:33.326936637 -0800
> @@ -308,172 +308,157 @@ xfs_qm_unmount_quotas(
>  }
>  
>  /*
> - * Flush all dquots of the given file system to disk. The dquots are
> - * _not_ purged from memory here, just their data written to disk.
> + * The quota lookup is done in batches to keep the amount of lock traffic and
> + * radix tree lookups to a minimum. The batch size is a trade off between
> + * lookup reduction and stack usage.

Given the way the locking works here, the gang lookup doesn't really
do anythign for reducing lock traffic. It reduces lookup overhead a
bit, but seeing as we don't drop the tree lock while executing
operations on each dquot I don't see much advantage in the
complexity of batched lookups....

>   */
> +#define XFS_DQ_LOOKUP_BATCH	32
> +
>  STATIC int
> -xfs_qm_dqflush_all(
> -	struct xfs_mount	*mp)
> -{
> -	struct xfs_quotainfo	*q = mp->m_quotainfo;
> -	int			recl;
> -	struct xfs_dquot	*dqp;
> -	int			error;
> +xfs_qm_dquot_walk(
> +	struct xfs_mount	*mp,
> +	int			type,
> +	int			(*execute)(struct xfs_dquot *dqp, int flags),
> +	int			flags)
> +{
> +	struct radix_tree_root	*tree = XFS_DQUOT_TREE(mp, type);
> +	uint32_t		first_index;
> +	int			last_error = 0;
> +	int			skipped;
> +	int			nr_found;
> +
> +restart:
> +	skipped = 0;
> +	first_index = 0;
> +	nr_found = 0;
>  
> -	if (!q)
> -		return 0;
> -again:
> -	mutex_lock(&q->qi_dqlist_lock);
> -	list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
> -		xfs_dqlock(dqp);
> -		if ((dqp->dq_flags & XFS_DQ_FREEING) ||
> -		    !XFS_DQ_IS_DIRTY(dqp)) {
> -			xfs_dqunlock(dqp);
> -			continue;
> -		}
> +	mutex_lock(&mp->m_quotainfo->qi_tree_lock);
> +	do {
> +		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
> +		int		error = 0;
> +		int		i;
> +
> +		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
> +					first_index, XFS_DQ_LOOKUP_BATCH);
> +		if (!nr_found)
> +			break;
>  
> -		/* XXX a sentinel would be better */
> -		recl = q->qi_dqreclaims;
> -		if (!xfs_dqflock_nowait(dqp)) {
> -			/*
> -			 * If we can't grab the flush lock then check
> -			 * to see if the dquot has been flushed delayed
> -			 * write.  If so, grab its buffer and send it
> -			 * out immediately.  We'll be able to acquire
> -			 * the flush lock when the I/O completes.
> -			 */
> -			xfs_dqflock_pushbuf_wait(dqp);
> +		for (i = 0; i < nr_found; i++) {
> +			struct xfs_dquot *dqp = batch[i];
> +
> +			first_index = be32_to_cpu(dqp->q_core.d_id) + 1;
> +
> +			error = execute(batch[i], flags);
> +			if (error == EAGAIN) {
> +				skipped++;
> +				continue;
> +			}
> +			if (error && last_error != EFSCORRUPTED)
> +				last_error = error;
> +		}
> +		/* bail out if the filesystem is corrupted.  */
> +		if (error == EFSCORRUPTED) {
> +			skipped = 0;
> +			break;
>  		}

The problem I see with this is that it holds the qi_tree_lock over
the entire walk - it is not dropped anywhere it there is no
reschedule pressure. Hence all lookups will stall while a walk is in
progress. Given a walk can block on IO or dquot locks, this could
mean that a walk holds off lookups for quite some time.

> -		/*
> -		 * Let go of the mplist lock. We don't want to hold it
> -		 * across a disk write.
> -		 */
> -		mutex_unlock(&q->qi_dqlist_lock);
> -		error = xfs_qm_dqflush(dqp, 0);
> -		xfs_dqunlock(dqp);
> -		if (error)
> -			return error;
>  
> -		mutex_lock(&q->qi_dqlist_lock);
> -		if (recl != q->qi_dqreclaims) {
> -			mutex_unlock(&q->qi_dqlist_lock);
> -			/* XXX restart limit */
> -			goto again;
> +		if (need_resched()) {
> +			mutex_unlock(&mp->m_quotainfo->qi_tree_lock);
> +			cond_resched();
> +			mutex_lock(&mp->m_quotainfo->qi_tree_lock);
>  		}

While this plays nice with other threads that require low latency,
it doesn't solve the "hold the lock for the entire walk" problem
when lookups are trying to get the qi tree lock as need_resched
state is only triggered at the scheduler level and not by lock
waiters.

The problem doesn't exist with the current code, because lookups are
done under the hash lock, not the list lock. Now both lookup and
all-dquot-walking functioanlity are under the same lock, so
hold-offs definitely need thinking about.  Do we need to hold the
tree lock over the execute() function - I can see the advantages for
the purge case, but for the flush case it is less clear. Perhaps
unconditionally dropping the tree lock after every batch would
mitigate this problem - after all we already have the index we need
to do the next lookup from and that doesn't change if we drop the
lock....

> +STATIC int
> +xfs_qm_flush_one(
> +	struct xfs_dquot	*dqp,
> +	int			flags)
>  {
> -	struct xfs_quotainfo	*q = mp->m_quotainfo;
> -	struct xfs_dquot	*dqp, *gdqp;
> +	int			error = 0;
>  
> - again:
> -	ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
> -	list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
> -		xfs_dqlock(dqp);
> -		if (dqp->dq_flags & XFS_DQ_FREEING) {
> -			xfs_dqunlock(dqp);
> -			mutex_unlock(&q->qi_dqlist_lock);
> -			delay(1);
> -			mutex_lock(&q->qi_dqlist_lock);
> -			goto again;
> -		}
> +	xfs_dqlock(dqp);
> +	if (dqp->dq_flags & XFS_DQ_FREEING)
> +		goto out_unlock;
> +	if (!XFS_DQ_IS_DIRTY(dqp))
> +		goto out_unlock;
>  
> -		gdqp = dqp->q_gdquot;
> -		if (gdqp)
> -			dqp->q_gdquot = NULL;
> -		xfs_dqunlock(dqp);
> +	if (!xfs_dqflock_nowait(dqp))
> +		xfs_dqflock_pushbuf_wait(dqp);

For example, this blocks holding the tree lock waiting for IO
completion.

> -xfs_qm_dqpurge_int(
> +xfs_qm_detach_gdquot(
> +	struct xfs_dquot	*dqp,
> +	int			flags)
> +{
> +	struct xfs_dquot	*gdqp;
> +
> +	xfs_dqlock(dqp);
> +	/* XXX(hch): should we bother with freeeing dquots here? */
> +	if (dqp->dq_flags & XFS_DQ_FREEING) {
> +		xfs_dqunlock(dqp);
> +		return 0;
> +	}

Better to be safe, I think, rather than leave a landmine for future
modifications to trip over...

.....

> +	if (!error && (flags & XFS_QMOPT_UQUOTA))
> +		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, 0);
> +	if (!error && (flags & XFS_QMOPT_GQUOTA))
> +		error = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, 0);
> +	if (!error && (flags & XFS_QMOPT_PQUOTA))
> +		error = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, 0);
> +	return error;

Seeing as it is a purge, even on an error I'd still try to purge all
trees. Indeed, what happens in the case of a filesystem shutdown
here?

> +	 * We've made all the changes that we need to make incore.  Flush them
> +	 * down to disk buffers if everything was updated successfully.
>  	 */
> -	if (!error)
> -		error = xfs_qm_dqflush_all(mp);
> +	if (!error && XFS_IS_UQUOTA_ON(mp))
> +		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, 0);
> +	if (!error && XFS_IS_GQUOTA_ON(mp))
> +		error = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, 0);
> +	if (!error && XFS_IS_PQUOTA_ON(mp))
> +		error = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, 0);

Same here - I'd still try to flush each tree even if one tree gets
an error...

Hmmmm- all the walk cases pass 0 as their flags. Are they used in
later patches?

Cheers,

Dave.
-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 4/9] xfs: use per-CPU data for the quota statistics
  2012-02-15  2:29 ` [PATCH 4/9] xfs: use per-CPU data for the quota statistics Christoph Hellwig
@ 2012-02-15 23:59   ` Dave Chinner
  2012-02-17 17:48     ` Christoph Hellwig
  0 siblings, 1 reply; 25+ messages in thread
From: Dave Chinner @ 2012-02-15 23:59 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 14, 2012 at 09:29:30PM -0500, Christoph Hellwig wrote:
> Use the same per-CPU scheme used in the main XFS statistics, as well as
> the VFS inode and dcache statistics for the quota code.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

.....

> -struct xqmstats xqmstats;
> +DEFINE_PER_CPU(struct xqmstats, xqmstats);
> +
> +static int xqmstats_sum(int idx)
> +{
> +	int val = 0, cpu;
> +
> +	for_each_possible_cpu(cpu)
> +		val += *(((__u32 *)&per_cpu(xqmstats, cpu) + idx));
> +	return max(val, 0);
> +}

Why not just make val a 64bit value so overflow is simply not an
issue?

Otherwise, all looks good.

Reviewed-by: Dave Chinner <dchinner@redhat.com>

-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 5/9] xfs: user per-cpu stats for the total dquot numbers
  2012-02-15  2:29 ` [PATCH 5/9] xfs: user per-cpu stats for the total dquot numbers Christoph Hellwig
@ 2012-02-16  0:02   ` Dave Chinner
  2012-02-17 17:48     ` Christoph Hellwig
  0 siblings, 1 reply; 25+ messages in thread
From: Dave Chinner @ 2012-02-16  0:02 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 14, 2012 at 09:29:31PM -0500, Christoph Hellwig wrote:
> Switch the total number of dquots counter over to use the per-cpu stats
> implementation, and reintroduce the number of unused dquots counter
> dropped earlier in the series.
> 
> Btw, I wonder if we should simply add these counters to /proc/fs/xfs/xqmstat
> instead of keeping the odd format and mostly superflous /proc/fs/xfs/xqm
> around.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> 
> ---
>  fs/xfs/xfs_dquot.c    |    7 ++++---
>  fs/xfs/xfs_qm.c       |    3 ++-
>  fs/xfs/xfs_qm.h       |    1 -
>  fs/xfs/xfs_qm_stats.c |    4 ++--
>  fs/xfs/xfs_qm_stats.h |    5 +++++
>  5 files changed, 13 insertions(+), 7 deletions(-)
> 
> Index: xfs/fs/xfs/xfs_qm_stats.c
> ===================================================================
> --- xfs.orig/fs/xfs/xfs_qm_stats.c	2012-02-12 13:22:45.040270187 -0800
> +++ xfs/fs/xfs/xfs_qm_stats.c	2012-02-12 13:22:49.553603603 -0800
> @@ -52,9 +52,9 @@ static int xqm_proc_show(struct seq_file
>  	/* maximum; incore; ratio free to inuse; freelist */
>  	seq_printf(m, "%d\t%d\t%d\t%u\n",
>  			0,
> -			xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
> +			xqmstats_sum(XQMSTAT_END_XQMSTAT),
>  			0,
> -			0);
> +			xqmstats_sum(XQMSTAT_END_XQMSTAT + 1));
>  	return 0;
>  }
>  
> Index: xfs/fs/xfs/xfs_qm_stats.h
> ===================================================================
> --- xfs.orig/fs/xfs/xfs_qm_stats.h	2012-02-12 13:22:45.040270187 -0800
> +++ xfs/fs/xfs/xfs_qm_stats.h	2012-02-12 13:22:49.553603603 -0800
> @@ -33,6 +33,9 @@ struct xqmstats {
>  	__uint32_t		xs_qm_dqshake_reclaims;
>  	__uint32_t		xs_qm_dqinact_reclaims;
>  #define XQMSTAT_END_XQMSTAT	8
> +	__uint32_t		xs_qm_dquots;
> +	__uint32_t		xs_qm_dquots_unused;
> +#define XQMSTAT_END_XQM		(XQMSTAT_END_XQMSTAT + 4)

Shouldn't that be (XQMSTAT_END_XQMSTAT + 2)?

Cheers,

Dave.
-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 6/9] xfs: remove the global xfs_Gqm structure
  2012-02-15  2:29 ` [PATCH 6/9] xfs: remove the global xfs_Gqm structure Christoph Hellwig
@ 2012-02-16  0:07   ` Dave Chinner
  0 siblings, 0 replies; 25+ messages in thread
From: Dave Chinner @ 2012-02-16  0:07 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 14, 2012 at 09:29:32PM -0500, Christoph Hellwig wrote:
> Initialize the slab caches for the quote code as soon XFS is loaded, and
> remove the now unused overhead of a reference counted global structure.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good to me. A nice cleanup.

Reviewed-by: Dave Chinner <dchinner@redhat.com>

-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 7/9] xfs: merge xfs_qm_export_dquot into xfs_qm_scall_getquota
  2012-02-15  2:29 ` [PATCH 7/9] xfs: merge xfs_qm_export_dquot into xfs_qm_scall_getquota Christoph Hellwig
@ 2012-02-16  0:11   ` Dave Chinner
  2012-02-27  1:57   ` Ben Myers
  1 sibling, 0 replies; 25+ messages in thread
From: Dave Chinner @ 2012-02-16  0:11 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 14, 2012 at 09:29:33PM -0500, Christoph Hellwig wrote:
> The is no good reason to have these two separate, and for the next change
> I'd need the full struct xfs_dquot in xfs_qm_export_dquot, so better just
> fold the code now instead of changing it around.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good.

Reviewed-by: Dave Chinner <dchinner@redhat.com>

-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 8/9] xfs: include reservations in quota reporting
  2012-02-15  2:29 ` [PATCH 8/9] xfs: include reservations in quota reporting Christoph Hellwig
@ 2012-02-16  0:14   ` Dave Chinner
  0 siblings, 0 replies; 25+ messages in thread
From: Dave Chinner @ 2012-02-16  0:14 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 14, 2012 at 09:29:34PM -0500, Christoph Hellwig wrote:
> Report all quota usage including the currently pending reservations.  This
> avoids the need to flush delalloc space before gathering quota information,
> and matches quota enforcement, which already takes the reservations into
> account.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good.

Reviewed-by: Dave Chinner <dchinner@redhat.com>
-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 9/9] quota: make Q_XQUOTASYNC a noop
  2012-02-15  2:29 ` [PATCH 9/9] quota: make Q_XQUOTASYNC a noop Christoph Hellwig
@ 2012-02-16  0:15   ` Dave Chinner
  0 siblings, 0 replies; 25+ messages in thread
From: Dave Chinner @ 2012-02-16  0:15 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 14, 2012 at 09:29:35PM -0500, Christoph Hellwig wrote:
> Now that XFS takes quota reservations into account there is no need to flush
> anything before reporting quotas - in addition to beeing fully transactional
> all quota information is also 100% coherent with the rest of the filesystem
> now.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Reviewed-by: Dave Chinner <dchinner@redhat.com>

-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/9] xfs: use per-filesystem dquot LRU lists
  2012-02-15 22:10   ` Dave Chinner
@ 2012-02-17 17:33     ` Christoph Hellwig
  0 siblings, 0 replies; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-17 17:33 UTC (permalink / raw)
  To: Dave Chinner; +Cc: xfs

On Thu, Feb 16, 2012 at 09:10:45AM +1100, Dave Chinner wrote:
> Might be nice to add a
> 
> +	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
> 
> here  to make the code a little easier to read and consistent with
> all the other functions.
> 
> Regardless, change looks good.

Makes sense.  I've done that and actually split the real final put
into a separate xfs_qm_dqput_final function, and replaced the tail
recursion with a real recursive call to xfs_qm_dqput for the group
quota.  Given that this is a fairly large amount of changes I didn't
bother to add your reviewed by tag until it's getting re-reviewed.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 2/9] xfs: use per-filesystem radix trees for dquot lookup
  2012-02-15 22:21   ` Dave Chinner
@ 2012-02-17 17:38     ` Christoph Hellwig
  0 siblings, 0 replies; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-17 17:38 UTC (permalink / raw)
  To: Dave Chinner; +Cc: Christoph Hellwig, xfs

On Thu, Feb 16, 2012 at 09:21:11AM +1100, Dave Chinner wrote:
> qi->qi_tree_lock, as the local variable is already set up.

Indeed.

> > +	struct radix_tree_root *tree = XFS_DQUOT_TREE(mp, type);
> > +	struct xfs_dquot	*dqp;
> > +	int			error;
> 
> Add a quotainfo local variable and use it in the function?

Yes, that's a good idea.

> qi->qi_tree_lock as it is already there and used for all the other
> quotainfo references.

Indeed.

I've fixed all these up, and also changed the XFS_DQUOT_TREE macro
to take a quotainfo pointer instead of a mount pointer.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 3/9] xfs: remove the per-filesystem list of dquots
  2012-02-15 22:59   ` Dave Chinner
@ 2012-02-17 17:47     ` Christoph Hellwig
  0 siblings, 0 replies; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-17 17:47 UTC (permalink / raw)
  To: Dave Chinner; +Cc: xfs

On Thu, Feb 16, 2012 at 09:59:22AM +1100, Dave Chinner wrote:
> On Tue, Feb 14, 2012 at 09:29:29PM -0500, Christoph Hellwig wrote:
> > Instead of keeping a separate per-filesystem list of dquots we can walk
> > the radix tree for the two places where we need to iterate all quota
> > structures.
> 
> And with the new radix tree iterator code being worked on, this will
> become even simpler soon...

Indeed.

> >  	struct xfs_mount	*mp = dqp->q_mount;
> >  	struct xfs_quotainfo	*qi = mp->m_quotainfo;
> >  
> >  	xfs_dqlock(dqp);
> > +	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
> > +		xfs_dqlock(dqp);
> 
> xfs_dqunlock()?

Yes.

> > - * Flush all dquots of the given file system to disk. The dquots are
> > - * _not_ purged from memory here, just their data written to disk.
> > + * The quota lookup is done in batches to keep the amount of lock traffic and
> > + * radix tree lookups to a minimum. The batch size is a trade off between
> > + * lookup reduction and stack usage.
> 
> Given the way the locking works here, the gang lookup doesn't really
> do anythign for reducing lock traffic. It reduces lookup overhead a
> bit, but seeing as we don't drop the tree lock while executing
> operations on each dquot I don't see much advantage in the
> complexity of batched lookups....

True.  On the other hand the code is there and debugged now, so I don't
see much point to change it - except for maybe using the new radix tree
iterator once it goes in.

> The problem I see with this is that it holds the qi_tree_lock over
> the entire walk - it is not dropped anywhere it there is no
> reschedule pressure. Hence all lookups will stall while a walk is in
> progress. Given a walk can block on IO or dquot locks, this could
> mean that a walk holds off lookups for quite some time.

Ok, maybe I should move it to individual lookups.  Then again this
code is only called either after quotacheck, when the isn't online
yet, or during umount/quotaoff, so all this doesn't matter too much.

> Seeing as it is a purge, even on an error I'd still try to purge all
> trees. Indeed, what happens in the case of a filesystem shutdown
> here?

I'll need to take a deeper look and figure this out.  Thanks for the
headsup.

> Hmmmm- all the walk cases pass 0 as their flags. Are they used in
> later patches?

No - it's a copy and paste leftover from the inode iterator.

In fact I'm tempted to simply log all dquots after a quotacheck now
that we have delaylog and support relogging.  After this we could drop
the generic iterator and just hardcode a function that while loop over
finding any dquot and purging it.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 4/9] xfs: use per-CPU data for the quota statistics
  2012-02-15 23:59   ` Dave Chinner
@ 2012-02-17 17:48     ` Christoph Hellwig
  0 siblings, 0 replies; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-17 17:48 UTC (permalink / raw)
  To: Dave Chinner; +Cc: xfs

On Thu, Feb 16, 2012 at 10:59:29AM +1100, Dave Chinner wrote:
> Why not just make val a 64bit value so overflow is simply not an
> issue?

It's a 1:1 copy an paste from the normal XFS stats.

But thinking about this copy and paste is bad - I'll rework this to
add the quota stats that we keep into the normal XFS stats, and just
provide the two old quota stats files as legacy views into them.  That
way we'll have only a single copy of that code.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 5/9] xfs: user per-cpu stats for the total dquot numbers
  2012-02-16  0:02   ` Dave Chinner
@ 2012-02-17 17:48     ` Christoph Hellwig
  0 siblings, 0 replies; 25+ messages in thread
From: Christoph Hellwig @ 2012-02-17 17:48 UTC (permalink / raw)
  To: Dave Chinner; +Cc: xfs

On Thu, Feb 16, 2012 at 11:02:43AM +1100, Dave Chinner wrote:
> > +	__uint32_t		xs_qm_dquots;
> > +	__uint32_t		xs_qm_dquots_unused;
> > +#define XQMSTAT_END_XQM		(XQMSTAT_END_XQMSTAT + 4)
> 
> Shouldn't that be (XQMSTAT_END_XQMSTAT + 2)?

Yes.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 7/9] xfs: merge xfs_qm_export_dquot into xfs_qm_scall_getquota
  2012-02-15  2:29 ` [PATCH 7/9] xfs: merge xfs_qm_export_dquot into xfs_qm_scall_getquota Christoph Hellwig
  2012-02-16  0:11   ` Dave Chinner
@ 2012-02-27  1:57   ` Ben Myers
  1 sibling, 0 replies; 25+ messages in thread
From: Ben Myers @ 2012-02-27  1:57 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

Hi Christoph,

On Tue, Feb 14, 2012 at 09:29:33PM -0500, Christoph Hellwig wrote:
> The is no good reason to have these two separate, and for the next change
> I'd need the full struct xfs_dquot in xfs_qm_export_dquot, so better just
> fold the code now instead of changing it around.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

...

> -int
> -xfs_qm_scall_getquota(

...

> -	/*
> -	 * Convert the disk dquot to the exportable format
> -	 */
> -	xfs_qm_export_dquot(mp, &dqp->q_core, out);
> -	xfs_qm_dqput(dqp);
> -	return (error ? XFS_ERROR(EFAULT) : 0);
> -}

Note that we used to map all errors to EFAULT.

...

> +int
> +xfs_qm_scall_getquota(

...

> +out_put:
> +	xfs_qm_dqput(dqp);
> +	return error;

And, now we don't.

The rest of the patch looks great to me.  If you intend to change the
error code, please mention so in the commit message.  Was it wrong
before, or will it be wrong if we pull this in?

Regards,
Ben

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2012-02-27  1:57 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-02-15  2:29 [PATCH 0/9] quota updates V2 Christoph Hellwig
2012-02-15  2:29 ` [PATCH 1/9] xfs: use per-filesystem dquot LRU lists Christoph Hellwig
2012-02-15 22:10   ` Dave Chinner
2012-02-17 17:33     ` Christoph Hellwig
2012-02-15  2:29 ` [PATCH 2/9] xfs: use per-filesystem radix trees for dquot lookup Christoph Hellwig
2012-02-15 22:21   ` Dave Chinner
2012-02-17 17:38     ` Christoph Hellwig
2012-02-15  2:29 ` [PATCH 3/9] xfs: remove the per-filesystem list of dquots Christoph Hellwig
2012-02-15 22:59   ` Dave Chinner
2012-02-17 17:47     ` Christoph Hellwig
2012-02-15  2:29 ` [PATCH 4/9] xfs: use per-CPU data for the quota statistics Christoph Hellwig
2012-02-15 23:59   ` Dave Chinner
2012-02-17 17:48     ` Christoph Hellwig
2012-02-15  2:29 ` [PATCH 5/9] xfs: user per-cpu stats for the total dquot numbers Christoph Hellwig
2012-02-16  0:02   ` Dave Chinner
2012-02-17 17:48     ` Christoph Hellwig
2012-02-15  2:29 ` [PATCH 6/9] xfs: remove the global xfs_Gqm structure Christoph Hellwig
2012-02-16  0:07   ` Dave Chinner
2012-02-15  2:29 ` [PATCH 7/9] xfs: merge xfs_qm_export_dquot into xfs_qm_scall_getquota Christoph Hellwig
2012-02-16  0:11   ` Dave Chinner
2012-02-27  1:57   ` Ben Myers
2012-02-15  2:29 ` [PATCH 8/9] xfs: include reservations in quota reporting Christoph Hellwig
2012-02-16  0:14   ` Dave Chinner
2012-02-15  2:29 ` [PATCH 9/9] quota: make Q_XQUOTASYNC a noop Christoph Hellwig
2012-02-16  0:15   ` Dave Chinner

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.