linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: npiggin@suse.de
To: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: John Stultz <johnstul@us.ibm.com>, Frank Mayhar <fmayhar@google.com>
Subject: [patch 51/52] fs: per-zone dentry and inode LRU
Date: Thu, 24 Jun 2010 13:03:03 +1000	[thread overview]
Message-ID: <20100624030733.869946613@suse.de> (raw)
In-Reply-To: 20100624030212.676457061@suse.de

[-- Attachment #1: fs-dcache-per-zone-lru.patch --]
[-- Type: text/plain, Size: 20959 bytes --]

Per-zone LRUs and shrinkers for dentry and inode caches.

Signed-off-by: Nick Piggin <npiggin@suse.de>

Index: linux-2.6/include/linux/fs.h
===================================================================
--- linux-2.6.orig/include/linux/fs.h
+++ linux-2.6/include/linux/fs.h
@@ -382,6 +382,7 @@ struct files_stat_struct {
 #include <linux/semaphore.h>
 #include <linux/fiemap.h>
 #include <linux/rculist_bl.h>
+#include <linux/mmzone.h>
 
 #include <asm/atomic.h>
 #include <asm/byteorder.h>
@@ -1325,6 +1326,25 @@ extern int send_sigurg(struct fown_struc
 extern struct list_head super_blocks;
 extern spinlock_t sb_lock;
 
+#define sb_zone_info(sb, ___z)			\
+	&sb->s_reclaim.node[zone_to_nid(___z)].zone[zone_idx(___z)]
+
+struct sb_zoneinfo {
+	/* protected by zone->dentry_lru_lock */
+	spinlock_t		s_dentry_lru_lock;
+	struct list_head	s_dentry_lru;
+	long			s_nr_dentry_scan;
+	unsigned long		s_nr_dentry_unused;
+};
+
+struct sb_nodeinfo {
+	struct sb_zoneinfo zone[MAX_NR_ZONES];
+};
+
+struct sb_reclaim {
+	struct sb_nodeinfo node[MAX_NUMNODES];
+};
+
 struct super_block {
 	struct list_head	s_list;		/* Keep this first */
 	dev_t			s_dev;		/* search index; _not_ kdev_t */
@@ -1357,9 +1377,7 @@ struct super_block {
 	struct list_head	s_inodes;	/* all inodes */
 	struct list_head	s_files;
 #endif
-	/* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
-	struct list_head	s_dentry_lru;	/* unused dentry lru */
-	int			s_nr_dentry_unused;	/* # of dentry on lru */
+	struct sb_reclaim	s_reclaim;
 
 	struct block_device	*s_bdev;
 	struct backing_dev_info *s_bdi;
Index: linux-2.6/fs/dcache.c
===================================================================
--- linux-2.6.orig/fs/dcache.c
+++ linux-2.6/fs/dcache.c
@@ -43,8 +43,8 @@
  *   - i_dentry, d_alias, d_inode of aliases
  * dcache_hash_bucket lock protects:
  *   - the dcache hash table
- * dcache_lru_lock protects:
- *   - the dcache lru lists and counters
+ * sbz->s_dcache_lru_lock protects:
+ *   - the per-sb x per-zone dcache lru lists and counters
  * d_lock protects:
  *   - d_flags
  *   - d_name
@@ -58,7 +58,7 @@
  * Ordering:
  * dentry->d_inode->i_lock
  *   dentry->d_lock
- *     dcache_lru_lock
+ *     sbz->s_dcache_lru_lock
  *     dcache_hash_bucket lock
  *
  * If there is an ancestor relationship:
@@ -75,7 +75,6 @@
 int sysctl_vfs_cache_pressure __read_mostly = 100;
 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
 EXPORT_SYMBOL(rename_lock);
@@ -186,51 +185,55 @@ static void dentry_iput(struct dentry *
  */
 static void dentry_lru_add(struct dentry *dentry)
 {
-	spin_lock(&dcache_lru_lock);
-	list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
-	dentry->d_sb->s_nr_dentry_unused++;
-	dentry_stat.nr_unused++;
-	spin_unlock(&dcache_lru_lock);
+	struct sb_zoneinfo *sbz = sb_zone_info(dentry->d_sb,
+					page_zone(virt_to_page(dentry)));
+	spin_lock(&sbz->s_dentry_lru_lock);
+	list_add(&dentry->d_lru, &sbz->s_dentry_lru);
+	sbz->s_nr_dentry_unused++;
+	spin_unlock(&sbz->s_dentry_lru_lock);
 }
 
 static void dentry_lru_add_tail(struct dentry *dentry)
 {
-	spin_lock(&dcache_lru_lock);
-	list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
-	dentry->d_sb->s_nr_dentry_unused++;
-	dentry_stat.nr_unused++;
-	spin_unlock(&dcache_lru_lock);
+	struct sb_zoneinfo *sbz = sb_zone_info(dentry->d_sb,
+					page_zone(virt_to_page(dentry)));
+	spin_lock(&sbz->s_dentry_lru_lock);
+	list_add_tail(&dentry->d_lru, &sbz->s_dentry_lru);
+	sbz->s_nr_dentry_unused++;
+	spin_unlock(&sbz->s_dentry_lru_lock);
 }
 
-static void __dentry_lru_del(struct dentry *dentry)
+static void __dentry_lru_del(struct sb_zoneinfo *sbz, struct dentry *dentry)
 {
 	list_del(&dentry->d_lru);
-	dentry->d_sb->s_nr_dentry_unused--;
-	dentry_stat.nr_unused--;
+	sbz->s_nr_dentry_unused--;
 }
 
-static void __dentry_lru_del_init(struct dentry *dentry)
+static void __dentry_lru_del_init(struct sb_zoneinfo *sbz,struct dentry *dentry)
 {
 	list_del_init(&dentry->d_lru);
-	dentry->d_sb->s_nr_dentry_unused--;
-	dentry_stat.nr_unused--;
+	sbz->s_nr_dentry_unused--;
 }
 
 static void dentry_lru_del(struct dentry *dentry)
 {
 	if (!list_empty(&dentry->d_lru)) {
-		spin_lock(&dcache_lru_lock);
-		__dentry_lru_del(dentry);
-		spin_unlock(&dcache_lru_lock);
+		struct sb_zoneinfo *sbz = sb_zone_info(dentry->d_sb,
+					page_zone(virt_to_page(dentry)));
+		spin_lock(&sbz->s_dentry_lru_lock);
+		__dentry_lru_del(sbz, dentry);
+		spin_unlock(&sbz->s_dentry_lru_lock);
 	}
 }
 
 static void dentry_lru_del_init(struct dentry *dentry)
 {
 	if (likely(!list_empty(&dentry->d_lru))) {
-		spin_lock(&dcache_lru_lock);
-		__dentry_lru_del_init(dentry);
-		spin_unlock(&dcache_lru_lock);
+		struct sb_zoneinfo *sbz = sb_zone_info(dentry->d_sb,
+					page_zone(virt_to_page(dentry)));
+		spin_lock(&sbz->s_dentry_lru_lock);
+		__dentry_lru_del_init(sbz, dentry);
+		spin_unlock(&sbz->s_dentry_lru_lock);
 	}
 }
 
@@ -638,32 +641,33 @@ again:
  * which flags are set. This means we don't need to maintain multiple
  * similar copies of this loop.
  */
-static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
+static void __shrink_dcache_sb_zone(struct super_block *sb,
+		struct sb_zoneinfo *sbz, unsigned long *count, int flags)
 {
 	LIST_HEAD(referenced);
 	LIST_HEAD(tmp);
 	struct dentry *dentry;
-	int cnt = 0;
+	unsigned long cnt = 0;
 
 	BUG_ON(!sb);
 	BUG_ON((flags & DCACHE_REFERENCED) && count == NULL);
 	if (count != NULL)
 		/* called from prune_dcache() and shrink_dcache_parent() */
 		cnt = *count;
-relock:
-	spin_lock(&dcache_lru_lock);
 restart:
 	if (count == NULL)
-		list_splice_init(&sb->s_dentry_lru, &tmp);
+		list_splice_init(&sbz->s_dentry_lru, &tmp);
 	else {
-		while (!list_empty(&sb->s_dentry_lru)) {
-			dentry = list_entry(sb->s_dentry_lru.prev,
-					struct dentry, d_lru);
+		while (!list_empty(&sbz->s_dentry_lru)) {
+			dentry = list_entry(sbz->s_dentry_lru.prev,
+							struct dentry, d_lru);
 			BUG_ON(dentry->d_sb != sb);
 
 			if (!spin_trylock(&dentry->d_lock)) {
-				spin_unlock(&dcache_lru_lock);
-				goto relock;
+				spin_unlock(&sbz->s_dentry_lru_lock);
+				cpu_relax();
+				spin_lock(&sbz->s_dentry_lru_lock);
+				continue;
 			}
 			/*
 			 * If we are honouring the DCACHE_REFERENCED flag and
@@ -682,13 +686,10 @@ restart:
 				if (!cnt)
 					break;
 			}
-			cond_resched_lock(&dcache_lru_lock);
+			cond_resched_lock(&sbz->s_dentry_lru_lock);
 		}
 	}
-	spin_unlock(&dcache_lru_lock);
 
-again:
-	spin_lock(&dcache_lru_lock); /* lru_lock also protects tmp list */
 	while (!list_empty(&tmp)) {
 		struct inode *inode;
 
@@ -696,8 +697,10 @@ again:
 
 		if (!spin_trylock(&dentry->d_lock)) {
 again1:
-			spin_unlock(&dcache_lru_lock);
-			goto again;
+			spin_unlock(&sbz->s_dentry_lru_lock);
+			cpu_relax();
+			spin_lock(&sbz->s_dentry_lru_lock);
+			continue;
 		}
 		/*
 		 * We found an inuse dentry which was not removed from
@@ -705,7 +708,7 @@ again1:
 		 * it - just keep it off the LRU list.
 		 */
 		if (dentry->d_count) {
-			__dentry_lru_del_init(dentry);
+			__dentry_lru_del_init(sbz, dentry);
 			spin_unlock(&dentry->d_lock);
 			continue;
 		}
@@ -722,21 +725,33 @@ again2:
 				goto again2;
 			}
 		}
-		__dentry_lru_del_init(dentry);
-		spin_unlock(&dcache_lru_lock);
+		__dentry_lru_del_init(sbz, dentry);
+		spin_unlock(&sbz->s_dentry_lru_lock);
 
 		prune_one_dentry(dentry);
+		cond_resched();
 		/* dentry->d_lock dropped */
-		spin_lock(&dcache_lru_lock);
+		spin_lock(&sbz->s_dentry_lru_lock);
 	}
 
-	if (count == NULL && !list_empty(&sb->s_dentry_lru))
+	if (count == NULL && !list_empty(&sbz->s_dentry_lru))
 		goto restart;
 	if (count != NULL)
 		*count = cnt;
 	if (!list_empty(&referenced))
-		list_splice(&referenced, &sb->s_dentry_lru);
-	spin_unlock(&dcache_lru_lock);
+		list_splice(&referenced, &sbz->s_dentry_lru);
+}
+
+static void __shrink_dcache_sb(struct super_block *sb, unsigned long *count, int flags)
+{
+	struct zone *zone;
+	for_each_zone(zone) {
+		struct sb_zoneinfo *sbz = sb_zone_info(sb, zone);
+
+		spin_lock(&sbz->s_dentry_lru_lock);
+		__shrink_dcache_sb_zone(sb, sbz, count, flags);
+		spin_unlock(&sbz->s_dentry_lru_lock);
+	}
 }
 
 /**
@@ -749,31 +764,29 @@ again2:
  * This function may fail to free any resources if all the dentries are in use.
  */
 static void prune_dcache(struct zone *zone, unsigned long scanned,
-			unsigned long total, gfp_t gfp_mask)
-
+					unsigned long total, gfp_t gfp_mask)
 {
-	unsigned long nr_to_scan;
 	struct super_block *sb, *n;
-	int w_count;
-	int prune_ratio;
-	int count, pruned;
 
-	shrinker_add_scan(&nr_to_scan, scanned, total, dentry_stat.nr_unused,
-			DEFAULT_SEEKS * sysctl_vfs_cache_pressure / 100);
-done:
-	count = shrinker_do_scan(&nr_to_scan, SHRINK_BATCH);
-	if (dentry_stat.nr_unused == 0 || count == 0)
-		return;
-	if (count >= dentry_stat.nr_unused)
-		prune_ratio = 1;
-	else
-		prune_ratio = dentry_stat.nr_unused / count;
 	spin_lock(&sb_lock);
 	list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
+		struct sb_zoneinfo *sbz = sb_zone_info(sb, zone);
+		unsigned long nr;
+
 		if (list_empty(&sb->s_instances))
 			continue;
-		if (sb->s_nr_dentry_unused == 0)
+		if (sbz->s_nr_dentry_unused == 0)
+			continue;
+
+		shrinker_add_scan(&sbz->s_nr_dentry_scan, scanned, total,
+						sbz->s_nr_dentry_unused,
+			DEFAULT_SEEKS * sysctl_vfs_cache_pressure / 100);
+		if (!(gfp_mask & __GFP_FS))
+		       continue;
+		nr = ACCESS_ONCE(sbz->s_nr_dentry_scan);
+		if (nr < SHRINK_BATCH)
 			continue;
+
 		sb->s_count++;
 		/* Now, we reclaim unused dentrins with fairness.
 		 * We reclaim them same percentage from each superblock.
@@ -785,11 +798,8 @@ done:
 		 * number of dentries in the machine)
 		 */
 		spin_unlock(&sb_lock);
-		if (prune_ratio != 1)
-			w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
-		else
-			w_count = sb->s_nr_dentry_unused;
-		pruned = w_count;
+
+
 		/*
 		 * We need to be sure this filesystem isn't being unmounted,
 		 * otherwise we could race with generic_shutdown_super(), and
@@ -798,28 +808,24 @@ done:
 		 * s_root isn't NULL.
 		 */
 		if (down_read_trylock(&sb->s_umount)) {
-			if ((sb->s_root != NULL) &&
-			    (!list_empty(&sb->s_dentry_lru))) {
-				__shrink_dcache_sb(sb, &w_count,
-						DCACHE_REFERENCED);
-				pruned -= w_count;
+			spin_lock(&sbz->s_dentry_lru_lock);
+			if (sb->s_root != NULL &&
+					!list_empty(&sbz->s_dentry_lru)) {
+				count_vm_events(SLABS_SCANNED, nr);
+				sbz->s_nr_dentry_scan = 0;
+				__shrink_dcache_sb_zone(sb, sbz,
+					&nr, DCACHE_REFERENCED);
+				sbz->s_nr_dentry_scan += nr;
 			}
+			spin_unlock(&sbz->s_dentry_lru_lock);
 			up_read(&sb->s_umount);
 		}
 		spin_lock(&sb_lock);
 		/* lock was dropped, must reset next */
 		list_safe_reset_next(sb, n, s_list);
-		count -= pruned;
 		__put_super(sb);
-		/* more work left to do? */
-		if (count <= 0)
-			break;
 	}
 	spin_unlock(&sb_lock);
-	if (count <= 0) {
-		cond_resched();
-		goto done;
-	}
 }
 
 /**
@@ -1167,8 +1173,9 @@ out:
 void shrink_dcache_parent(struct dentry * parent)
 {
 	struct super_block *sb = parent->d_sb;
-	int found;
+	unsigned long found;
 
+	/* doesn't work well anymore :( */
 	while ((found = select_parent(parent)) != 0)
 		__shrink_dcache_sb(sb, &found, 0);
 }
@@ -1189,7 +1196,7 @@ EXPORT_SYMBOL(shrink_dcache_parent);
 static int shrink_dcache_memory(struct zone *zone, unsigned long scanned,
 		unsigned long total, unsigned long global, gfp_t gfp_mask)
 {
-	prune_dcache(zone, scanned, global, gfp_mask);
+	prune_dcache(zone, scanned, total, gfp_mask);
 	return 0;
 }
 
Index: linux-2.6/fs/inode.c
===================================================================
--- linux-2.6.orig/fs/inode.c
+++ linux-2.6/fs/inode.c
@@ -35,7 +35,7 @@
  *   s_inodes, i_sb_list
  * inode_hash_bucket lock protects:
  *   inode hash table, i_hash
- * inode_lru_lock protects:
+ * zone->inode_lru_lock protects:
  *   inode_lru, i_lru
  * wb->b_lock protects:
  *   b_io, b_more_io, b_dirty, i_io, i_lru
@@ -51,7 +51,7 @@
  * inode_lock
  *   inode->i_lock
  *     inode_list_lglock
- *     inode_lru_lock
+ *     zone->inode_lru_lock
  *     wb->b_lock
  *     inode_hash_bucket lock
  */
@@ -102,8 +102,6 @@ static unsigned int i_hash_shift __read_
  * allowing for low-overhead inode sync() operations.
  */
 
-static LIST_HEAD(inode_lru);
-
 struct inode_hash_bucket {
 	struct hlist_bl_head head;
 };
@@ -129,8 +127,6 @@ static struct inode_hash_bucket *inode_h
 DECLARE_LGLOCK(inode_list_lglock);
 DEFINE_LGLOCK(inode_list_lglock);
 
-static DEFINE_SPINLOCK(inode_lru_lock);
-
 /*
  * iprune_sem provides exclusion between the kswapd or try_to_free_pages
  * icache shrinking path, and the umount path.  Without this exclusion,
@@ -428,18 +424,22 @@ static void dispose_list(struct list_hea
 
 void __inode_lru_list_add(struct inode *inode)
 {
-	spin_lock(&inode_lru_lock);
-	list_add(&inode->i_lru, &inode_lru);
-	inodes_stat.nr_unused++;
-	spin_unlock(&inode_lru_lock);
+	struct zone *z = page_zone(virt_to_page(inode));
+
+	spin_lock(&z->inode_lru_lock);
+	list_add(&inode->i_lru, &z->inode_lru);
+	z->inode_nr_lru++;
+	spin_unlock(&z->inode_lru_lock);
 }
 
 void __inode_lru_list_del(struct inode *inode)
 {
-	spin_lock(&inode_lru_lock);
+	struct zone *z = page_zone(virt_to_page(inode));
+
+	spin_lock(&z->inode_lru_lock);
 	list_del_init(&inode->i_lru);
-	inodes_stat.nr_unused--;
-	spin_unlock(&inode_lru_lock);
+	z->inode_nr_lru--;
+	spin_unlock(&z->inode_lru_lock);
 }
 
 /*
@@ -464,10 +464,7 @@ static int invalidate_sb_inodes(struct s
 			list_del_init(&inode->i_io);
 			spin_unlock(&wb->b_lock);
 
-			spin_lock(&inode_lru_lock);
-			list_del(&inode->i_lru);
-			inodes_stat.nr_unused--;
-			spin_unlock(&inode_lru_lock);
+			__inode_lru_list_del(inode);
 
 			WARN_ON(inode->i_state & I_NEW);
 			inode->i_state |= I_FREEING;
@@ -534,55 +531,58 @@ static void prune_icache(struct zone *zo
 
 	down_read(&iprune_sem);
 again:
-	spin_lock(&inode_lru_lock);
+	spin_lock(&zone->inode_lru_lock);
 	for (; nr_to_scan; nr_to_scan--) {
 		struct inode *inode;
 
-		if (list_empty(&inode_lru))
+		if (list_empty(&zone->inode_lru))
 			break;
 
-		inode = list_entry(inode_lru.prev, struct inode, i_lru);
+		inode = list_entry(zone->inode_lru.prev, struct inode, i_lru);
 
 		if (!spin_trylock(&inode->i_lock)) {
-			spin_unlock(&inode_lru_lock);
+			spin_unlock(&zone->inode_lru_lock);
+			cpu_relax();
 			goto again;
 		}
 		if (inode->i_count || (inode->i_state & ~I_REFERENCED)) {
 			list_del_init(&inode->i_lru);
 			spin_unlock(&inode->i_lock);
-			inodes_stat.nr_unused--;
+			zone->inode_nr_lru--;
 			continue;
 		}
 		if (inode->i_state) {
-			list_move(&inode->i_lru, &inode_lru);
+			list_move(&inode->i_lru, &zone->inode_lru);
 			inode->i_state &= ~I_REFERENCED;
 			spin_unlock(&inode->i_lock);
 			continue;
 		}
 		if (inode_has_buffers(inode) || inode->i_data.nrpages) {
-			list_move(&inode->i_lru, &inode_lru);
-			spin_unlock(&inode_lru_lock);
+			list_move(&inode->i_lru, &zone->inode_lru);
+			spin_unlock(&zone->inode_lru_lock);
 			__iget(inode);
 			spin_unlock(&inode->i_lock);
 
+			dispose_list(&freeable);
+
 			if (remove_inode_buffers(inode))
 				reap += invalidate_mapping_pages(&inode->i_data,
 								0, -1);
 			iput(inode);
-			spin_lock(&inode_lru_lock);
+			spin_lock(&zone->inode_lru_lock);
 			continue;
 		}
 		list_move(&inode->i_lru, &freeable);
 		WARN_ON(inode->i_state & I_NEW);
 		inode->i_state |= I_FREEING;
 		spin_unlock(&inode->i_lock);
-		inodes_stat.nr_unused--;
+		zone->inode_nr_lru--;
 	}
 	if (current_is_kswapd())
 		__count_vm_events(KSWAPD_INODESTEAL, reap);
 	else
 		__count_vm_events(PGINODESTEAL, reap);
-	spin_unlock(&inode_lru_lock);
+	spin_unlock(&zone->inode_lru_lock);
 
 	dispose_list(&freeable);
 	up_read(&iprune_sem);
@@ -600,19 +600,24 @@ again:
 static int shrink_icache_memory(struct zone *zone, unsigned long scanned,
 		unsigned long total, unsigned long global, gfp_t gfp_mask)
 {
-	static unsigned long nr_to_scan;
 	unsigned long nr;
 
-	shrinker_add_scan(&nr_to_scan, scanned, global,
-			inodes_stat.nr_unused,
+	shrinker_add_scan(&zone->inode_nr_scan, scanned, total,
+					zone->inode_nr_lru,
 			DEFAULT_SEEKS * sysctl_vfs_cache_pressure / 100);
+	/*
+	 * Nasty deadlock avoidance.  We may hold various FS locks,
+	 * and we don't want to recurse into the FS that called us
+	 * in clear_inode() and friends..
+	 */
 	if (!(gfp_mask & __GFP_FS))
-	       return 0;
+		return 0;
 
-	while ((nr = shrinker_do_scan(&nr_to_scan, SHRINK_BATCH))) {
-		prune_icache(zone, nr);
-		cond_resched();
-	}
+	nr = ACCESS_ONCE(zone->inode_nr_scan);
+	if (nr < SHRINK_BATCH)
+		return 0;
+	zone->inode_nr_scan = 0;
+	prune_icache(zone, nr);
 
 	return 0;
 }
@@ -1431,12 +1436,8 @@ void generic_delete_inode(struct inode *
 {
 	const struct super_operations *op = inode->i_sb->s_op;
 
-	if (!list_empty(&inode->i_lru)) {
-		spin_lock(&inode_lru_lock);
-		list_del_init(&inode->i_lru);
-		inodes_stat.nr_unused--;
-		spin_unlock(&inode_lru_lock);
-	}
+	if (!list_empty(&inode->i_lru))
+		__inode_lru_list_del(inode);
 	if (!list_empty(&inode->i_io)) {
 		struct bdi_writeback *wb = inode_to_wb(inode);
 		spin_lock(&wb->b_lock);
@@ -1493,10 +1494,7 @@ int generic_detach_inode(struct inode *i
 			inode->i_state |= I_REFERENCED;
 			if (!(inode->i_state & (I_DIRTY|I_SYNC)) &&
 					list_empty(&inode->i_lru)) {
-				spin_lock(&inode_lru_lock);
-				list_add(&inode->i_lru, &inode_lru);
-				inodes_stat.nr_unused++;
-				spin_unlock(&inode_lru_lock);
+				__inode_lru_list_add(inode);
 			}
 			spin_unlock(&inode->i_lock);
 			return 0;
@@ -1510,12 +1508,8 @@ int generic_detach_inode(struct inode *i
 		inode->i_state &= ~I_WILL_FREE;
 		__remove_inode_hash(inode);
 	}
-	if (!list_empty(&inode->i_lru)) {
-		spin_lock(&inode_lru_lock);
-		list_del_init(&inode->i_lru);
-		inodes_stat.nr_unused--;
-		spin_unlock(&inode_lru_lock);
-	}
+	if (!list_empty(&inode->i_lru))
+		__inode_lru_list_del(inode);
 	if (!list_empty(&inode->i_io)) {
 		struct bdi_writeback *wb = inode_to_wb(inode);
 		spin_lock(&wb->b_lock);
@@ -1831,6 +1825,7 @@ void __init inode_init_early(void)
 void __init inode_init(void)
 {
 	int loop;
+	struct zone *zone;
 
 	percpu_counter_init(&nr_inodes, 0);
 	/* inode slab cache */
@@ -1840,6 +1835,12 @@ void __init inode_init(void)
 					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
 					 SLAB_MEM_SPREAD),
 					 init_once);
+	for_each_zone(zone) {
+		spin_lock_init(&zone->inode_lru_lock);
+		INIT_LIST_HEAD(&zone->inode_lru);
+		zone->inode_nr_lru = 0;
+		zone->inode_nr_scan = 0;
+	}
 	register_shrinker(&icache_shrinker);
 
 	lg_lock_init(inode_list_lglock);
Index: linux-2.6/fs/super.c
===================================================================
--- linux-2.6.orig/fs/super.c
+++ linux-2.6/fs/super.c
@@ -50,6 +50,8 @@ static struct super_block *alloc_super(s
 	static const struct super_operations default_op;
 
 	if (s) {
+		struct zone *zone;
+
 		if (security_sb_alloc(s)) {
 			kfree(s);
 			s = NULL;
@@ -88,7 +90,6 @@ static struct super_block *alloc_super(s
 #endif
 		INIT_LIST_HEAD(&s->s_instances);
 		INIT_HLIST_BL_HEAD(&s->s_anon);
-		INIT_LIST_HEAD(&s->s_dentry_lru);
 		init_rwsem(&s->s_umount);
 		mutex_init(&s->s_lock);
 		lockdep_set_class(&s->s_umount, &type->s_umount_key);
@@ -125,6 +126,15 @@ static struct super_block *alloc_super(s
 		s->s_maxbytes = MAX_NON_LFS;
 		s->s_op = &default_op;
 		s->s_time_gran = 1000000000;
+
+		for_each_zone(zone) {
+			struct sb_zoneinfo *sbz = sb_zone_info(s, zone);
+
+			spin_lock_init(&sbz->s_dentry_lru_lock);
+			INIT_LIST_HEAD(&sbz->s_dentry_lru);
+			sbz->s_nr_dentry_scan = 0;
+			sbz->s_nr_dentry_unused = 0;
+		}
 	}
 out:
 	return s;
Index: linux-2.6/include/linux/mmzone.h
===================================================================
--- linux-2.6.orig/include/linux/mmzone.h
+++ linux-2.6/include/linux/mmzone.h
@@ -370,6 +370,13 @@ struct zone {
 
 
 	ZONE_PADDING(_pad2_)
+
+	spinlock_t inode_lru_lock;
+	struct list_head inode_lru;
+	unsigned long inode_nr_lru;
+	unsigned long inode_nr_scan;
+
+	ZONE_PADDING(_pad3_)
 	/* Rarely used or read-mostly fields */
 
 	/*
Index: linux-2.6/include/linux/mm.h
===================================================================
--- linux-2.6.orig/include/linux/mm.h
+++ linux-2.6/include/linux/mm.h
@@ -1004,7 +1004,8 @@ struct shrinker {
 	/* These are for internal use */
 	struct list_head list;
 };
-#define DEFAULT_SEEKS	(128UL*2) /* A good number if you don't know better. */
+#define SHRINK_FIXED	(128UL)	/* Fixed point for shrinker ratio */
+#define DEFAULT_SEEKS	(SHRINK_FIXED*2) /* A good number if you don't know better. */
 #define SHRINK_BATCH	128	/* A good number if you don't know better */
 extern void register_shrinker(struct shrinker *);
 extern void unregister_shrinker(struct shrinker *);



  parent reply	other threads:[~2010-06-24  3:26 UTC|newest]

Thread overview: 152+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-06-24  3:02 [patch 00/52] vfs scalability patches updated npiggin
2010-06-24  3:02 ` [patch 01/52] kernel: add bl_list npiggin
2010-06-24  6:04   ` Eric Dumazet
2010-06-24 14:42     ` Nick Piggin
2010-06-24 16:01       ` Eric Dumazet
2010-06-28 21:37   ` Paul E. McKenney
2010-06-29  6:30     ` Nick Piggin
2010-06-24  3:02 ` [patch 02/52] fs: fix superblock iteration race npiggin
2010-06-29 13:02   ` Christoph Hellwig
2010-06-29 14:56     ` Nick Piggin
2010-06-29 17:35       ` Linus Torvalds
2010-06-29 17:41         ` Nick Piggin
2010-06-29 17:52           ` Linus Torvalds
2010-06-29 17:58             ` Linus Torvalds
2010-06-29 20:04               ` Chris Clayton
2010-06-29 20:14                 ` Nick Piggin
2010-06-29 20:38                   ` Chris Clayton
2010-06-30  7:13                     ` Chris Clayton
2010-06-30 12:51               ` Al Viro
2010-06-24  3:02 ` [patch 03/52] fs: fs_struct rwlock to spinlock npiggin
2010-06-24  3:02 ` [patch 04/52] fs: cleanup files_lock npiggin
2010-06-24  3:02 ` [patch 05/52] lglock: introduce special lglock and brlock spin locks npiggin
2010-06-24 18:15   ` Thomas Gleixner
2010-06-25  6:22     ` Nick Piggin
2010-06-25  9:50       ` Thomas Gleixner
2010-06-25 10:11         ` Nick Piggin
2010-06-24  3:02 ` [patch 06/52] fs: scale files_lock npiggin
2010-06-24  7:52   ` Peter Zijlstra
2010-06-24 15:00     ` Nick Piggin
2010-06-24  3:02 ` [patch 07/52] fs: brlock vfsmount_lock npiggin
2010-06-24  3:02 ` [patch 08/52] fs: scale mntget/mntput npiggin
2010-06-24  3:02 ` [patch 09/52] fs: dcache scale hash npiggin
2010-06-24  3:02 ` [patch 10/52] fs: dcache scale lru npiggin
2010-06-24  3:02 ` [patch 11/52] fs: dcache scale nr_dentry npiggin
2010-06-24  3:02 ` [patch 12/52] fs: dcache scale dentry refcount npiggin
2010-06-24  3:02 ` [patch 13/52] fs: dcache scale d_unhashed npiggin
2010-06-24  3:02 ` [patch 14/52] fs: dcache scale subdirs npiggin
2010-06-24  7:56   ` Peter Zijlstra
2010-06-24  9:50   ` Andi Kleen
2010-06-24 15:53     ` Nick Piggin
2010-06-24  3:02 ` [patch 15/52] fs: dcache scale inode alias list npiggin
2010-06-24  3:02 ` [patch 16/52] fs: dcache RCU for multi-step operaitons npiggin
2010-06-24  7:58   ` Peter Zijlstra
2010-06-24 15:03     ` Nick Piggin
2010-06-24 17:22       ` john stultz
2010-06-24 17:26   ` john stultz
2010-06-25  6:45     ` Nick Piggin
2010-06-24  3:02 ` [patch 17/52] fs: dcache remove dcache_lock npiggin
2010-06-24  3:02 ` [patch 18/52] fs: dcache reduce dput locking npiggin
2010-06-24  3:02 ` [patch 19/52] fs: dcache per-bucket dcache hash locking npiggin
2010-06-24  3:02 ` [patch 20/52] fs: dcache reduce dcache_inode_lock npiggin
2010-06-24  3:02 ` [patch 21/52] fs: dcache per-inode inode alias locking npiggin
2010-06-24  3:02 ` [patch 22/52] fs: dcache rationalise dget variants npiggin
2010-06-24  3:02 ` [patch 23/52] fs: dcache percpu nr_dentry npiggin
2010-06-24  3:02 ` [patch 24/52] fs: dcache reduce d_parent locking npiggin
2010-06-24  8:44   ` Peter Zijlstra
2010-06-24 15:07     ` Nick Piggin
2010-06-24 15:32       ` Paul E. McKenney
2010-06-24 16:05         ` Nick Piggin
2010-06-24 16:41           ` Paul E. McKenney
2010-06-28 21:50   ` Paul E. McKenney
2010-07-07 14:35     ` Nick Piggin
2010-06-24  3:02 ` [patch 25/52] fs: dcache DCACHE_REFERENCED improve npiggin
2010-06-24  3:02 ` [patch 26/52] fs: icache lock s_inodes list npiggin
2010-06-24  3:02 ` [patch 27/52] fs: icache lock inode hash npiggin
2010-06-24  3:02 ` [patch 28/52] fs: icache lock i_state npiggin
2010-06-24  3:02 ` [patch 29/52] fs: icache lock i_count npiggin
2010-06-30  7:27   ` Dave Chinner
2010-06-30 12:05     ` Nick Piggin
2010-07-01  2:36       ` Dave Chinner
2010-07-01  7:54         ` Nick Piggin
2010-07-01  9:36           ` Nick Piggin
2010-07-01 16:21           ` Frank Mayhar
2010-07-03  2:03       ` Andrew Morton
2010-07-03  3:41         ` Nick Piggin
2010-07-03  4:31           ` Andrew Morton
2010-07-03  5:06             ` Nick Piggin
2010-07-03  5:18               ` Nick Piggin
2010-07-05 22:41               ` Dave Chinner
2010-07-06  4:34                 ` Nick Piggin
2010-07-06 10:38                   ` Theodore Tso
2010-07-06 13:04                     ` Nick Piggin
2010-07-07 17:00                     ` Frank Mayhar
2010-06-24  3:02 ` [patch 30/52] fs: icache lock lru/writeback lists npiggin
2010-06-24  8:58   ` Peter Zijlstra
2010-06-24 15:09     ` Nick Piggin
2010-06-24 15:13       ` Peter Zijlstra
2010-06-24  3:02 ` [patch 31/52] fs: icache atomic inodes_stat npiggin
2010-06-24  3:02 ` [patch 32/52] fs: icache protect inode state npiggin
2010-06-24  3:02 ` [patch 33/52] fs: icache atomic last_ino, iunique lock npiggin
2010-06-24  3:02 ` [patch 34/52] fs: icache remove inode_lock npiggin
2010-06-24  3:02 ` [patch 35/52] fs: icache factor hash lock into functions npiggin
2010-06-24  3:02 ` [patch 36/52] fs: icache per-bucket inode hash locks npiggin
2010-06-24  3:02 ` [patch 37/52] fs: icache lazy lru npiggin
2010-06-24  9:52   ` Andi Kleen
2010-06-24 15:59     ` Nick Piggin
2010-06-30  8:38   ` Dave Chinner
2010-06-30 12:06     ` Nick Piggin
2010-07-01  2:46       ` Dave Chinner
2010-07-01  7:57         ` Nick Piggin
2010-06-24  3:02 ` [patch 38/52] fs: icache RCU free inodes npiggin
2010-06-30  8:57   ` Dave Chinner
2010-06-30 12:07     ` Nick Piggin
2010-06-24  3:02 ` [patch 39/52] fs: icache rcu walk for i_sb_list npiggin
2010-06-24  3:02 ` [patch 40/52] fs: dcache improve scalability of pseudo filesystems npiggin
2010-06-24  3:02 ` [patch 41/52] fs: icache reduce atomics npiggin
2010-06-24  3:02 ` [patch 42/52] fs: icache per-cpu last_ino allocator npiggin
2010-06-24  9:48   ` Andi Kleen
2010-06-24 15:52     ` Nick Piggin
2010-06-24 16:19       ` Andi Kleen
2010-06-24 16:38         ` Nick Piggin
2010-06-24  3:02 ` [patch 43/52] fs: icache per-cpu nr_inodes counter npiggin
2010-06-24  3:02 ` [patch 44/52] fs: icache per-CPU sb inode lists and locks npiggin
2010-06-30  9:26   ` Dave Chinner
2010-06-30 12:08     ` Nick Piggin
2010-07-01  3:12       ` Dave Chinner
2010-07-01  8:00         ` Nick Piggin
2010-06-24  3:02 ` [patch 45/52] fs: icache RCU hash lookups npiggin
2010-06-24  3:02 ` [patch 46/52] fs: icache reduce locking npiggin
2010-06-24  3:02 ` [patch 47/52] fs: keep inode with backing-dev npiggin
2010-06-24  3:03 ` [patch 48/52] fs: icache split IO and LRU lists npiggin
2010-06-24  3:03 ` [patch 49/52] fs: icache scale writeback list locking npiggin
2010-06-24  3:03 ` [patch 50/52] mm: implement per-zone shrinker npiggin
2010-06-24 10:06   ` Andi Kleen
2010-06-24 16:00     ` Nick Piggin
2010-06-24 16:27       ` Andi Kleen
2010-06-24 16:32         ` Andi Kleen
2010-06-24 16:37         ` Andi Kleen
2010-06-30  6:28   ` Dave Chinner
2010-06-30 12:03     ` Nick Piggin
2010-06-24  3:03 ` npiggin [this message]
2010-06-30 10:09   ` [patch 51/52] fs: per-zone dentry and inode LRU Dave Chinner
2010-06-30 12:13     ` Nick Piggin
2010-06-24  3:03 ` [patch 52/52] fs: icache less I_FREEING time npiggin
2010-06-30 10:13   ` Dave Chinner
2010-06-30 12:14     ` Nick Piggin
2010-07-01  3:33       ` Dave Chinner
2010-07-01  8:06         ` Nick Piggin
2010-06-25  7:12 ` [patch 00/52] vfs scalability patches updated Christoph Hellwig
2010-06-25  8:05   ` Nick Piggin
2010-06-30 11:30 ` Dave Chinner
2010-06-30 12:40   ` Nick Piggin
2010-07-01  3:56     ` Dave Chinner
2010-07-01  8:20       ` Nick Piggin
2010-07-01 17:36       ` Andi Kleen
2010-07-01 17:23     ` Nick Piggin
2010-07-01 17:28       ` Andi Kleen
2010-07-06 17:49       ` Nick Piggin
2010-07-01 17:35     ` Linus Torvalds
2010-07-01 17:52       ` Nick Piggin
2010-07-02  4:01       ` Paul E. McKenney
2010-06-30 17:08   ` Frank Mayhar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20100624030733.869946613@suse.de \
    --to=npiggin@suse.de \
    --cc=fmayhar@google.com \
    --cc=johnstul@us.ibm.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).