linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] f2fs: check the largest extent at look-up time
@ 2015-07-01  1:25 Jaegeuk Kim
  2015-07-01  1:25 ` [PATCH 2/2] f2fs: shrink unreferenced extent_caches first Jaegeuk Kim
  2015-07-02 12:38 ` [f2fs-dev] [PATCH 1/2] f2fs: check the largest extent at look-up time Chao Yu
  0 siblings, 2 replies; 10+ messages in thread
From: Jaegeuk Kim @ 2015-07-01  1:25 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

Because of the extent shrinker or other -ENOMEM scenarios, it cannot guarantee
that the largest extent would be cached in the tree all the time.

Instead of relying on extent_tree, we can simply check the cached one in extent
tree accordingly.

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/data.c              | 16 ++++++++++++++--
 include/trace/events/f2fs.h | 12 ++++++------
 2 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index b6c28bb..8f059e0 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -511,12 +511,22 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
 	struct extent_node *en;
+	bool ret = false;
 
 	f2fs_bug_on(sbi, !et);
 
 	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
 
 	read_lock(&et->lock);
+
+	if (et->largest.fofs <= pgofs &&
+			et->largest.fofs + et->largest.len > pgofs) {
+		*ei = et->largest;
+		ret = true;
+		stat_inc_read_hit(sbi->sb);
+		goto out;
+	}
+
 	en = __lookup_extent_tree(et, pgofs);
 	if (en) {
 		*ei = en->ei;
@@ -525,13 +535,15 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 			list_move_tail(&en->list, &sbi->extent_list);
 		et->cached_en = en;
 		spin_unlock(&sbi->extent_lock);
+		ret = true;
 		stat_inc_read_hit(sbi->sb);
 	}
+out:
 	stat_inc_total_hit(sbi->sb);
 	read_unlock(&et->lock);
 
-	trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
-	return en ? true : false;
+	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
+	return ret;
 }
 
 /* return true, if on-disk extent should be updated */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 04856a2..a019465 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1099,11 +1099,11 @@ TRACE_EVENT(f2fs_lookup_extent_tree_start,
 TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
 
 	TP_PROTO(struct inode *inode, unsigned int pgofs,
-						struct extent_node *en),
+						struct extent_info *ei),
 
-	TP_ARGS(inode, pgofs, en),
+	TP_ARGS(inode, pgofs, ei),
 
-	TP_CONDITION(en),
+	TP_CONDITION(ei),
 
 	TP_STRUCT__entry(
 		__field(dev_t,	dev)
@@ -1118,9 +1118,9 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
 		__entry->dev = inode->i_sb->s_dev;
 		__entry->ino = inode->i_ino;
 		__entry->pgofs = pgofs;
-		__entry->fofs = en->ei.fofs;
-		__entry->blk = en->ei.blk;
-		__entry->len = en->ei.len;
+		__entry->fofs = ei->fofs;
+		__entry->blk = ei->blk;
+		__entry->len = ei->len;
 	),
 
 	TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
-- 
2.1.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
  2015-07-01  1:25 [PATCH 1/2] f2fs: check the largest extent at look-up time Jaegeuk Kim
@ 2015-07-01  1:25 ` Jaegeuk Kim
  2015-07-02 12:39   ` [f2fs-dev] " Chao Yu
  2015-07-02 12:38 ` [f2fs-dev] [PATCH 1/2] f2fs: check the largest extent at look-up time Chao Yu
  1 sibling, 1 reply; 10+ messages in thread
From: Jaegeuk Kim @ 2015-07-01  1:25 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

If an extent_tree entry has a zero reference count, we can drop it from the
cache in higher priority rather than currently referencing entries.

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/data.c | 51 +++++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 41 insertions(+), 10 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 8f059e0..a0a0e2b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -661,21 +661,54 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 	struct radix_tree_root *root = &sbi->extent_tree_root;
 	unsigned int found;
 	unsigned int node_cnt = 0, tree_cnt = 0;
+	int remained;
 
 	if (!test_opt(sbi, EXTENT_CACHE))
 		return 0;
 
+	if (!down_write_trylock(&sbi->extent_tree_lock))
+		goto out;
+
+	/* 1. remove unreferenced extent tree */
+	while ((found = radix_tree_gang_lookup(root,
+				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
+		unsigned i;
+
+		ino = treevec[found - 1]->ino + 1;
+		for (i = 0; i < found; i++) {
+			struct extent_tree *et = treevec[i];
+
+			if (!atomic_read(&et->refcount)) {
+				write_lock(&et->lock);
+				node_cnt += __free_extent_tree(sbi, et, true);
+				write_unlock(&et->lock);
+
+				radix_tree_delete(root, et->ino);
+				kmem_cache_free(extent_tree_slab, et);
+				sbi->total_ext_tree--;
+				tree_cnt++;
+
+				if (node_cnt + tree_cnt >= nr_shrink)
+					goto unlock_out;
+			}
+		}
+	}
+	up_write(&sbi->extent_tree_lock);
+
+	/* 2. remove LRU extent entries */
+	if (!down_write_trylock(&sbi->extent_tree_lock))
+		goto out;
+
+	remained = nr_shrink - (node_cnt + tree_cnt);
+
 	spin_lock(&sbi->extent_lock);
 	list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
-		if (!nr_shrink--)
+		if (!remained--)
 			break;
 		list_del_init(&en->list);
 	}
 	spin_unlock(&sbi->extent_lock);
 
-	if (!down_write_trylock(&sbi->extent_tree_lock))
-		goto out;
-
 	while ((found = radix_tree_gang_lookup(root,
 				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
 		unsigned i;
@@ -687,14 +720,12 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 			write_lock(&et->lock);
 			node_cnt += __free_extent_tree(sbi, et, false);
 			write_unlock(&et->lock);
-			if (!atomic_read(&et->refcount) && !et->count) {
-				radix_tree_delete(root, et->ino);
-				kmem_cache_free(extent_tree_slab, et);
-				sbi->total_ext_tree--;
-				tree_cnt++;
-			}
+
+			if (node_cnt + tree_cnt >= nr_shrink)
+				break;
 		}
 	}
+unlock_out:
 	up_write(&sbi->extent_tree_lock);
 out:
 	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
-- 
2.1.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* RE: [f2fs-dev] [PATCH 1/2] f2fs: check the largest extent at look-up time
  2015-07-01  1:25 [PATCH 1/2] f2fs: check the largest extent at look-up time Jaegeuk Kim
  2015-07-01  1:25 ` [PATCH 2/2] f2fs: shrink unreferenced extent_caches first Jaegeuk Kim
@ 2015-07-02 12:38 ` Chao Yu
  1 sibling, 0 replies; 10+ messages in thread
From: Chao Yu @ 2015-07-02 12:38 UTC (permalink / raw)
  To: 'Jaegeuk Kim'; +Cc: linux-kernel, linux-fsdevel, linux-f2fs-devel

> -----Original Message-----
> From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> Sent: Wednesday, July 01, 2015 9:26 AM
> To: linux-kernel@vger.kernel.org; linux-fsdevel@vger.kernel.org;
> linux-f2fs-devel@lists.sourceforge.net; linux-kernel@vger.kernel.org;
> linux-fsdevel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net
> Cc: Jaegeuk Kim; Jaegeuk Kim
> Subject: [f2fs-dev] [PATCH 1/2] f2fs: check the largest extent at look-up time
> 
> Because of the extent shrinker or other -ENOMEM scenarios, it cannot guarantee
> that the largest extent would be cached in the tree all the time.
> 
> Instead of relying on extent_tree, we can simply check the cached one in extent
> tree accordingly.
> 
> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>

Reviewed-by: Chao Yu <chao2.yu@samsung.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
  2015-07-01  1:25 ` [PATCH 2/2] f2fs: shrink unreferenced extent_caches first Jaegeuk Kim
@ 2015-07-02 12:39   ` Chao Yu
  2015-07-04  6:29     ` Jaegeuk Kim
  0 siblings, 1 reply; 10+ messages in thread
From: Chao Yu @ 2015-07-02 12:39 UTC (permalink / raw)
  To: 'Jaegeuk Kim'; +Cc: linux-kernel, linux-fsdevel, linux-f2fs-devel

> -----Original Message-----
> From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> Sent: Wednesday, July 01, 2015 9:26 AM
> To: linux-kernel@vger.kernel.org; linux-fsdevel@vger.kernel.org;
> linux-f2fs-devel@lists.sourceforge.net; linux-kernel@vger.kernel.org;
> linux-fsdevel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net
> Cc: Jaegeuk Kim; Jaegeuk Kim
> Subject: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> 
> If an extent_tree entry has a zero reference count, we can drop it from the
> cache in higher priority rather than currently referencing entries.
> 
> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
> ---
>  fs/f2fs/data.c | 51 +++++++++++++++++++++++++++++++++++++++++----------
>  1 file changed, 41 insertions(+), 10 deletions(-)
> 
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 8f059e0..a0a0e2b 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -661,21 +661,54 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int
> nr_shrink)
>  	struct radix_tree_root *root = &sbi->extent_tree_root;
>  	unsigned int found;
>  	unsigned int node_cnt = 0, tree_cnt = 0;
> +	int remained;
> 
>  	if (!test_opt(sbi, EXTENT_CACHE))
>  		return 0;
> 
> +	if (!down_write_trylock(&sbi->extent_tree_lock))
> +		goto out;
> +
> +	/* 1. remove unreferenced extent tree */

We always release extent node and tree from inode with lowest ino,
why not random ino?

And this step releasing breaks the rule of lru runs.

Some unreferenced file has high hit ratio and some referenced file may
have low hit ratio. Why not release low hit ratio extent tree at first?

Thanks,


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
  2015-07-02 12:39   ` [f2fs-dev] " Chao Yu
@ 2015-07-04  6:29     ` Jaegeuk Kim
  2015-07-06 12:25       ` Chao Yu
  0 siblings, 1 reply; 10+ messages in thread
From: Jaegeuk Kim @ 2015-07-04  6:29 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-kernel, linux-fsdevel, linux-f2fs-devel

On Thu, Jul 02, 2015 at 08:39:43PM +0800, Chao Yu wrote:
> > -----Original Message-----
> > From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> > Sent: Wednesday, July 01, 2015 9:26 AM
> > To: linux-kernel@vger.kernel.org; linux-fsdevel@vger.kernel.org;
> > linux-f2fs-devel@lists.sourceforge.net; linux-kernel@vger.kernel.org;
> > linux-fsdevel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net
> > Cc: Jaegeuk Kim; Jaegeuk Kim
> > Subject: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> > 
> > If an extent_tree entry has a zero reference count, we can drop it from the
> > cache in higher priority rather than currently referencing entries.
> > 
> > Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
> > ---
> >  fs/f2fs/data.c | 51 +++++++++++++++++++++++++++++++++++++++++----------
> >  1 file changed, 41 insertions(+), 10 deletions(-)
> > 
> > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> > index 8f059e0..a0a0e2b 100644
> > --- a/fs/f2fs/data.c
> > +++ b/fs/f2fs/data.c
> > @@ -661,21 +661,54 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int
> > nr_shrink)
> >  	struct radix_tree_root *root = &sbi->extent_tree_root;
> >  	unsigned int found;
> >  	unsigned int node_cnt = 0, tree_cnt = 0;
> > +	int remained;
> > 
> >  	if (!test_opt(sbi, EXTENT_CACHE))
> >  		return 0;
> > 
> > +	if (!down_write_trylock(&sbi->extent_tree_lock))
> > +		goto out;
> > +
> > +	/* 1. remove unreferenced extent tree */
> 
> We always release extent node and tree from inode with lowest ino,
> why not random ino?

Here what I want to do is to
1. drop unreferenced inodes' extent trees;
   the inodes were already evicted before,
2. drop LRU ordered extent nodes.

You're right. We don't need to drop it having the lowest ino first.
Actually, I was supposed to add an LRU list for extent_trees.
But, do we need to really take care of its order for already evicted inodes?

Here, we should think about two types of hit ratios.
One is for inodes, and the other is for data.
The VFS maintains inode_cache in an LRU order, while its data is cached via
page cache also conrolled by LRU. And, whenever inode is evicted, VFS drops
all the cached data.
So, I believe we should give a higher priority to inodes rather than data.

And, in order to increase the hit ratio, we're trying to keep an extent tree
and its nodes even if its corresponding inode was evicted.
So, I concluded that the dropping order would not be critical especially for
the already evicted inodes.

> 
> And this step releasing breaks the rule of lru runs.
> 
> Some unreferenced file has high hit ratio and some referenced file may
> have low hit ratio. Why not release low hit ratio extent tree at first?

But, still user has opened the referenced file to further access, right?
And, the unreferenced file was evicted by VFS.

Thanks,

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
  2015-07-04  6:29     ` Jaegeuk Kim
@ 2015-07-06 12:25       ` Chao Yu
  2015-07-06 22:16         ` Jaegeuk Kim
  0 siblings, 1 reply; 10+ messages in thread
From: Chao Yu @ 2015-07-06 12:25 UTC (permalink / raw)
  To: 'Jaegeuk Kim'; +Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel

> -----Original Message-----
> From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> Sent: Saturday, July 04, 2015 2:29 PM
> To: Chao Yu
> Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org;
> linux-f2fs-devel@lists.sourceforge.net
> Subject: Re: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> 
> On Thu, Jul 02, 2015 at 08:39:43PM +0800, Chao Yu wrote:
> > > -----Original Message-----
> > > From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> > > Sent: Wednesday, July 01, 2015 9:26 AM
> > > To: linux-kernel@vger.kernel.org; linux-fsdevel@vger.kernel.org;
> > > linux-f2fs-devel@lists.sourceforge.net; linux-kernel@vger.kernel.org;
> > > linux-fsdevel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net
> > > Cc: Jaegeuk Kim; Jaegeuk Kim
> > > Subject: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> > >
> > > If an extent_tree entry has a zero reference count, we can drop it from the
> > > cache in higher priority rather than currently referencing entries.
> > >
> > > Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
> > > ---
> > >  fs/f2fs/data.c | 51 +++++++++++++++++++++++++++++++++++++++++----------
> > >  1 file changed, 41 insertions(+), 10 deletions(-)
> > >
> > > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> > > index 8f059e0..a0a0e2b 100644
> > > --- a/fs/f2fs/data.c
> > > +++ b/fs/f2fs/data.c
> > > @@ -661,21 +661,54 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int
> > > nr_shrink)
> > >  	struct radix_tree_root *root = &sbi->extent_tree_root;
> > >  	unsigned int found;
> > >  	unsigned int node_cnt = 0, tree_cnt = 0;
> > > +	int remained;
> > >
> > >  	if (!test_opt(sbi, EXTENT_CACHE))
> > >  		return 0;
> > >
> > > +	if (!down_write_trylock(&sbi->extent_tree_lock))
> > > +		goto out;
> > > +
> > > +	/* 1. remove unreferenced extent tree */
> >
> > We always release extent node and tree from inode with lowest ino,
> > why not random ino?
> 
> Here what I want to do is to
> 1. drop unreferenced inodes' extent trees;
>    the inodes were already evicted before,
> 2. drop LRU ordered extent nodes.

Actually, I got it, you codes show this method very clearly. :)

> 
> You're right. We don't need to drop it having the lowest ino first.
> Actually, I was supposed to add an LRU list for extent_trees.
> But, do we need to really take care of its order for already evicted inodes?
> 
> Here, we should think about two types of hit ratios.
> One is for inodes, and the other is for data.
> The VFS maintains inode_cache in an LRU order, while its data is cached via
> page cache also conrolled by LRU. And, whenever inode is evicted, VFS drops
> all the cached data.
> So, I believe we should give a higher priority to inodes rather than data.
> 
> And, in order to increase the hit ratio, we're trying to keep an extent tree
> and its nodes even if its corresponding inode was evicted.
> So, I concluded that the dropping order would not be critical especially for
> the already evicted inodes.

>From perspective of shrinking memory size, I think you're completely right,
because we can regard extent tree and its nodes as metadata of one inode, if
VFS evict one inode, all its data and metadata include data in extent cache
should be evicted.

But from perspective of arising hit ratio of our cache, I'm not sure this is
the best way.

I guess in this method, we may encounter lower coverage area of dnode + extent
cache and double caches exist issue, like:
a) all data of inode is evicted by VFS, and its tree and nodes in extent cache
is evicted too, resulting lower hit raito of further access.
b) data and node cache of inode is exist in VFS, and its tree and nodes in extent
cache is exist too.

> 
> >
> > And this step releasing breaks the rule of lru runs.
> >
> > Some unreferenced file has high hit ratio and some referenced file may
> > have low hit ratio. Why not release low hit ratio extent tree at first?
> 
> But, still user has opened the referenced file to further access, right?

It depends on access model.

What I mean is that if extent cache of one inode can be hit for many times,
we can assume the access model is re-accessly, that means, we can expect
this cache can be hit more times. On the contrary, we can release it if it
is necessary.

Thanks,

> And, the unreferenced file was evicted by VFS.
> 
> Thanks,
> 
> ------------------------------------------------------------------------------
> Don't Limit Your Business. Reach for the Cloud.
> GigeNET's Cloud Solutions provide you with the tools and support that
> you need to offload your IT needs and focus on growing your business.
> Configured For All Businesses. Start Your Cloud Today.
> https://www.gigenetcloud.com/
> _______________________________________________
> Linux-f2fs-devel mailing list
> Linux-f2fs-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
  2015-07-06 12:25       ` Chao Yu
@ 2015-07-06 22:16         ` Jaegeuk Kim
  2015-07-08  9:41           ` Chao Yu
  0 siblings, 1 reply; 10+ messages in thread
From: Jaegeuk Kim @ 2015-07-06 22:16 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel

On Mon, Jul 06, 2015 at 08:25:23PM +0800, Chao Yu wrote:
> > -----Original Message-----
> > From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> > Sent: Saturday, July 04, 2015 2:29 PM
> > To: Chao Yu
> > Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org;
> > linux-f2fs-devel@lists.sourceforge.net
> > Subject: Re: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> > 
> > On Thu, Jul 02, 2015 at 08:39:43PM +0800, Chao Yu wrote:
> > > > -----Original Message-----
> > > > From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> > > > Sent: Wednesday, July 01, 2015 9:26 AM
> > > > To: linux-kernel@vger.kernel.org; linux-fsdevel@vger.kernel.org;
> > > > linux-f2fs-devel@lists.sourceforge.net; linux-kernel@vger.kernel.org;
> > > > linux-fsdevel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net
> > > > Cc: Jaegeuk Kim; Jaegeuk Kim
> > > > Subject: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> > > >
> > > > If an extent_tree entry has a zero reference count, we can drop it from the
> > > > cache in higher priority rather than currently referencing entries.
> > > >
> > > > Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
> > > > ---
> > > >  fs/f2fs/data.c | 51 +++++++++++++++++++++++++++++++++++++++++----------
> > > >  1 file changed, 41 insertions(+), 10 deletions(-)
> > > >
> > > > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> > > > index 8f059e0..a0a0e2b 100644
> > > > --- a/fs/f2fs/data.c
> > > > +++ b/fs/f2fs/data.c
> > > > @@ -661,21 +661,54 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int
> > > > nr_shrink)
> > > >  	struct radix_tree_root *root = &sbi->extent_tree_root;
> > > >  	unsigned int found;
> > > >  	unsigned int node_cnt = 0, tree_cnt = 0;
> > > > +	int remained;
> > > >
> > > >  	if (!test_opt(sbi, EXTENT_CACHE))
> > > >  		return 0;
> > > >
> > > > +	if (!down_write_trylock(&sbi->extent_tree_lock))
> > > > +		goto out;
> > > > +
> > > > +	/* 1. remove unreferenced extent tree */
> > >
> > > We always release extent node and tree from inode with lowest ino,
> > > why not random ino?
> > 
> > Here what I want to do is to
> > 1. drop unreferenced inodes' extent trees;
> >    the inodes were already evicted before,
> > 2. drop LRU ordered extent nodes.
> 
> Actually, I got it, you codes show this method very clearly. :)
> 
> > 
> > You're right. We don't need to drop it having the lowest ino first.
> > Actually, I was supposed to add an LRU list for extent_trees.
> > But, do we need to really take care of its order for already evicted inodes?
> > 
> > Here, we should think about two types of hit ratios.
> > One is for inodes, and the other is for data.
> > The VFS maintains inode_cache in an LRU order, while its data is cached via
> > page cache also conrolled by LRU. And, whenever inode is evicted, VFS drops
> > all the cached data.
> > So, I believe we should give a higher priority to inodes rather than data.
> > 
> > And, in order to increase the hit ratio, we're trying to keep an extent tree
> > and its nodes even if its corresponding inode was evicted.
> > So, I concluded that the dropping order would not be critical especially for
> > the already evicted inodes.
> 
> >From perspective of shrinking memory size, I think you're completely right,
> because we can regard extent tree and its nodes as metadata of one inode, if
> VFS evict one inode, all its data and metadata include data in extent cache
> should be evicted.
> 
> But from perspective of arising hit ratio of our cache, I'm not sure this is
> the best way.
> 
> I guess in this method, we may encounter lower coverage area of dnode + extent
> cache and double caches exist issue, like:
> a) all data of inode is evicted by VFS, and its tree and nodes in extent cache
> is evicted too, resulting lower hit raito of further access.

Well, f2fs_evict_inode does not destroy whole its extent tree and nodes right
away, but just drops the refcount of the extent tree. So, I expect that this
gives another chance of cache hit for further data access.
Moreover, since this only matters with memory pressure, the unreferenced extent
trees and nodes would be kept long enough beyond the normal situation.

> b) data and node cache of inode is exist in VFS, and its tree and nodes in extent
> cache is exist too.

We know that this is a separate issue, since there is no such code to check
whether data and node cache exist along with extent cache entries.
And, I don't think we should eliminate such the duplicate, since the extent
cache is a supplemenray subset of data and node caches.

> 
> > 
> > >
> > > And this step releasing breaks the rule of lru runs.
> > >
> > > Some unreferenced file has high hit ratio and some referenced file may
> > > have low hit ratio. Why not release low hit ratio extent tree at first?
> > 
> > But, still user has opened the referenced file to further access, right?
> 
> It depends on access model.
> 
> What I mean is that if extent cache of one inode can be hit for many times,
> we can assume the access model is re-accessly, that means, we can expect
> this cache can be hit more times. On the contrary, we can release it if it
> is necessary.

Yes, exactly it depends on user workloads.

As a counter example,
1. thread A wrote extents and remained the file as it was opened to use later,
2. thread B wrote many extents newly and never touched.

After #2, if shrinker was activated, the extents cached by thread A would
be evicted, resulting in cache misses on further thread A's accesses.

IMO, this can happen when a bunch of data blocks were written without updates,
while some opened library/database files will access the data sooner or later.

Thanks,

> 
> Thanks,
> 
> > And, the unreferenced file was evicted by VFS.
> > 
> > Thanks,
> > 
> > ------------------------------------------------------------------------------
> > Don't Limit Your Business. Reach for the Cloud.
> > GigeNET's Cloud Solutions provide you with the tools and support that
> > you need to offload your IT needs and focus on growing your business.
> > Configured For All Businesses. Start Your Cloud Today.
> > https://www.gigenetcloud.com/
> > _______________________________________________
> > Linux-f2fs-devel mailing list
> > Linux-f2fs-devel@lists.sourceforge.net
> > https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
  2015-07-06 22:16         ` Jaegeuk Kim
@ 2015-07-08  9:41           ` Chao Yu
  2015-07-10 23:54             ` Jaegeuk Kim
  0 siblings, 1 reply; 10+ messages in thread
From: Chao Yu @ 2015-07-08  9:41 UTC (permalink / raw)
  To: 'Jaegeuk Kim'; +Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel

> -----Original Message-----
> From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> Sent: Tuesday, July 07, 2015 6:16 AM
> To: Chao Yu
> Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org;
> linux-f2fs-devel@lists.sourceforge.net
> Subject: Re: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> 
> On Mon, Jul 06, 2015 at 08:25:23PM +0800, Chao Yu wrote:
> > > -----Original Message-----
> > > From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> > > Sent: Saturday, July 04, 2015 2:29 PM
> > > To: Chao Yu
> > > Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org;
> > > linux-f2fs-devel@lists.sourceforge.net
> > > Subject: Re: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> > >
> > > On Thu, Jul 02, 2015 at 08:39:43PM +0800, Chao Yu wrote:
> > > > > -----Original Message-----
> > > > > From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> > > > > Sent: Wednesday, July 01, 2015 9:26 AM
> > > > > To: linux-kernel@vger.kernel.org; linux-fsdevel@vger.kernel.org;
> > > > > linux-f2fs-devel@lists.sourceforge.net; linux-kernel@vger.kernel.org;
> > > > > linux-fsdevel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net
> > > > > Cc: Jaegeuk Kim; Jaegeuk Kim
> > > > > Subject: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> > > > >
> > > > > If an extent_tree entry has a zero reference count, we can drop it from the
> > > > > cache in higher priority rather than currently referencing entries.
> > > > >
> > > > > Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
> > > > > ---
> > > > >  fs/f2fs/data.c | 51 +++++++++++++++++++++++++++++++++++++++++----------
> > > > >  1 file changed, 41 insertions(+), 10 deletions(-)
> > > > >
> > > > > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> > > > > index 8f059e0..a0a0e2b 100644
> > > > > --- a/fs/f2fs/data.c
> > > > > +++ b/fs/f2fs/data.c
> > > > > @@ -661,21 +661,54 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi,
> int
> > > > > nr_shrink)
> > > > >  	struct radix_tree_root *root = &sbi->extent_tree_root;
> > > > >  	unsigned int found;
> > > > >  	unsigned int node_cnt = 0, tree_cnt = 0;
> > > > > +	int remained;
> > > > >
> > > > >  	if (!test_opt(sbi, EXTENT_CACHE))
> > > > >  		return 0;
> > > > >
> > > > > +	if (!down_write_trylock(&sbi->extent_tree_lock))
> > > > > +		goto out;
> > > > > +
> > > > > +	/* 1. remove unreferenced extent tree */
> > > >
> > > > We always release extent node and tree from inode with lowest ino,
> > > > why not random ino?
> > >
> > > Here what I want to do is to
> > > 1. drop unreferenced inodes' extent trees;
> > >    the inodes were already evicted before,
> > > 2. drop LRU ordered extent nodes.
> >
> > Actually, I got it, you codes show this method very clearly. :)
> >
> > >
> > > You're right. We don't need to drop it having the lowest ino first.
> > > Actually, I was supposed to add an LRU list for extent_trees.
> > > But, do we need to really take care of its order for already evicted inodes?
> > >
> > > Here, we should think about two types of hit ratios.
> > > One is for inodes, and the other is for data.
> > > The VFS maintains inode_cache in an LRU order, while its data is cached via
> > > page cache also conrolled by LRU. And, whenever inode is evicted, VFS drops
> > > all the cached data.
> > > So, I believe we should give a higher priority to inodes rather than data.
> > >
> > > And, in order to increase the hit ratio, we're trying to keep an extent tree
> > > and its nodes even if its corresponding inode was evicted.
> > > So, I concluded that the dropping order would not be critical especially for
> > > the already evicted inodes.
> >
> > >From perspective of shrinking memory size, I think you're completely right,
> > because we can regard extent tree and its nodes as metadata of one inode, if
> > VFS evict one inode, all its data and metadata include data in extent cache
> > should be evicted.
> >
> > But from perspective of arising hit ratio of our cache, I'm not sure this is
> > the best way.
> >
> > I guess in this method, we may encounter lower coverage area of dnode + extent
> > cache and double caches exist issue, like:
> > a) all data of inode is evicted by VFS, and its tree and nodes in extent cache
> > is evicted too, resulting lower hit raito of further access.
> 
> Well, f2fs_evict_inode does not destroy whole its extent tree and nodes right
> away, but just drops the refcount of the extent tree. So, I expect that this
> gives another chance of cache hit for further data access.

Agreed.

> Moreover, since this only matters with memory pressure, the unreferenced extent
> trees and nodes would be kept long enough beyond the normal situation.

I'm worry about the 'only matters' thing, I will investigate it if I have time.

> 
> > b) data and node cache of inode is exist in VFS, and its tree and nodes in extent
> > cache is exist too.
> 
> We know that this is a separate issue, since there is no such code to check
> whether data and node cache exist along with extent cache entries.

Well, just thought, for multimedia objects, like a movie file, most time
we will just read it, there will no further writes in it. So why not
building extent cache for reaccessing, and meanwhile releasing dnode pages
for saving memory?

> And, I don't think we should eliminate such the duplicate, since the extent
> cache is a supplemenray subset of data and node caches.

Right.

> 
> >
> > >
> > > >
> > > > And this step releasing breaks the rule of lru runs.
> > > >
> > > > Some unreferenced file has high hit ratio and some referenced file may
> > > > have low hit ratio. Why not release low hit ratio extent tree at first?
> > >
> > > But, still user has opened the referenced file to further access, right?
> >
> > It depends on access model.
> >
> > What I mean is that if extent cache of one inode can be hit for many times,
> > we can assume the access model is re-accessly, that means, we can expect
> > this cache can be hit more times. On the contrary, we can release it if it
> > is necessary.
> 
> Yes, exactly it depends on user workloads.
> 
> As a counter example,
> 1. thread A wrote extents and remained the file as it was opened to use later,
> 2. thread B wrote many extents newly and never touched.
> 
> After #2, if shrinker was activated, the extents cached by thread A would
> be evicted, resulting in cache misses on further thread A's accesses.

I didn't understand, if thread A's file is opened for reusing, from long-term
view, it will have high hit ratio in its extent cache than thread B's, Why
thread B's extent cache is not be evicted firstly.

> 
> IMO, this can happen when a bunch of data blocks were written without updates,
> while some opened library/database files will access the data sooner or later.

You mean the file with one time written in its life, like lib file or multimedia
file?

So I guess what you mean is that, some app keeps file opened for long time, and
will access it again sooner or later, at least we should keep these referenced
extent cache visible rather than evicting them before evicting unreferenced one's.

Thanks,

> 
> Thanks,
> 
> >
> > Thanks,
> >
> > > And, the unreferenced file was evicted by VFS.
> > >
> > > Thanks,
> > >
> > > ------------------------------------------------------------------------------
> > > Don't Limit Your Business. Reach for the Cloud.
> > > GigeNET's Cloud Solutions provide you with the tools and support that
> > > you need to offload your IT needs and focus on growing your business.
> > > Configured For All Businesses. Start Your Cloud Today.
> > > https://www.gigenetcloud.com/
> > > _______________________________________________
> > > Linux-f2fs-devel mailing list
> > > Linux-f2fs-devel@lists.sourceforge.net
> > > https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
  2015-07-08  9:41           ` Chao Yu
@ 2015-07-10 23:54             ` Jaegeuk Kim
  2015-07-11  1:53               ` Chao Yu
  0 siblings, 1 reply; 10+ messages in thread
From: Jaegeuk Kim @ 2015-07-10 23:54 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel

On Wed, Jul 08, 2015 at 05:41:57PM +0800, Chao Yu wrote:

[snip]

> > > > You're right. We don't need to drop it having the lowest ino first.
> > > > Actually, I was supposed to add an LRU list for extent_trees.
> > > > But, do we need to really take care of its order for already evicted inodes?
> > > >
> > > > Here, we should think about two types of hit ratios.
> > > > One is for inodes, and the other is for data.
> > > > The VFS maintains inode_cache in an LRU order, while its data is cached via
> > > > page cache also conrolled by LRU. And, whenever inode is evicted, VFS drops
> > > > all the cached data.
> > > > So, I believe we should give a higher priority to inodes rather than data.
> > > >
> > > > And, in order to increase the hit ratio, we're trying to keep an extent tree
> > > > and its nodes even if its corresponding inode was evicted.
> > > > So, I concluded that the dropping order would not be critical especially for
> > > > the already evicted inodes.
> > >
> > > >From perspective of shrinking memory size, I think you're completely right,
> > > because we can regard extent tree and its nodes as metadata of one inode, if
> > > VFS evict one inode, all its data and metadata include data in extent cache
> > > should be evicted.
> > >
> > > But from perspective of arising hit ratio of our cache, I'm not sure this is
> > > the best way.
> > >
> > > I guess in this method, we may encounter lower coverage area of dnode + extent
> > > cache and double caches exist issue, like:
> > > a) all data of inode is evicted by VFS, and its tree and nodes in extent cache
> > > is evicted too, resulting lower hit raito of further access.
> > 
> > Well, f2fs_evict_inode does not destroy whole its extent tree and nodes right
> > away, but just drops the refcount of the extent tree. So, I expect that this
> > gives another chance of cache hit for further data access.
> 
> Agreed.
> 
> > Moreover, since this only matters with memory pressure, the unreferenced extent
> > trees and nodes would be kept long enough beyond the normal situation.
> 
> I'm worry about the 'only matters' thing, I will investigate it if I have time.
> 
> > 
> > > b) data and node cache of inode is exist in VFS, and its tree and nodes in extent
> > > cache is exist too.
> > 
> > We know that this is a separate issue, since there is no such code to check
> > whether data and node cache exist along with extent cache entries.
> 
> Well, just thought, for multimedia objects, like a movie file, most time
> we will just read it, there will no further writes in it. So why not
> building extent cache for reaccessing, and meanwhile releasing dnode pages
> for saving memory?

Hmm, my basic approach is letting mm reclaim caches in an LRU manner as much
as possible.
Of course, we can think about many specific conditions, but IMO, it is not
enough to treat them as general ones.

> 
> > And, I don't think we should eliminate such the duplicate, since the extent
> > cache is a supplemenray subset of data and node caches.
> 
> Right.
> 
> > 
> > >
> > > >
> > > > >
> > > > > And this step releasing breaks the rule of lru runs.
> > > > >
> > > > > Some unreferenced file has high hit ratio and some referenced file may
> > > > > have low hit ratio. Why not release low hit ratio extent tree at first?
> > > >
> > > > But, still user has opened the referenced file to further access, right?
> > >
> > > It depends on access model.
> > >
> > > What I mean is that if extent cache of one inode can be hit for many times,
> > > we can assume the access model is re-accessly, that means, we can expect
> > > this cache can be hit more times. On the contrary, we can release it if it
> > > is necessary.
> > 
> > Yes, exactly it depends on user workloads.
> > 
> > As a counter example,
> > 1. thread A wrote extents and remained the file as it was opened to use later,
> > 2. thread B wrote many extents newly and never touched.
> > 
> > After #2, if shrinker was activated, the extents cached by thread A would
> > be evicted, resulting in cache misses on further thread A's accesses.
> 
> I didn't understand, if thread A's file is opened for reusing, from long-term
> view, it will have high hit ratio in its extent cache than thread B's, Why
> thread B's extent cache is not be evicted firstly.

Not long-term view. Like #1 -> #2 -> shrink -> #1 -> ...

> 
> > 
> > IMO, this can happen when a bunch of data blocks were written without updates,
> > while some opened library/database files will access the data sooner or later.
> 
> You mean the file with one time written in its life, like lib file or multimedia
> file?
> 
> So I guess what you mean is that, some app keeps file opened for long time, and
> will access it again sooner or later, at least we should keep these referenced
> extent cache visible rather than evicting them before evicting unreferenced one's.

Something like that. :)

Thanks,

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
  2015-07-10 23:54             ` Jaegeuk Kim
@ 2015-07-11  1:53               ` Chao Yu
  0 siblings, 0 replies; 10+ messages in thread
From: Chao Yu @ 2015-07-11  1:53 UTC (permalink / raw)
  To: 'Jaegeuk Kim'; +Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel

> -----Original Message-----
> From: Jaegeuk Kim [mailto:jaegeuk@kernel.org]
> Sent: Saturday, July 11, 2015 7:54 AM
> To: Chao Yu; Chao Yu
> Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org;
> linux-f2fs-devel@lists.sourceforge.net; linux-fsdevel@vger.kernel.org;
> linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net
> Subject: Re: [f2fs-dev] [PATCH 2/2] f2fs: shrink unreferenced extent_caches first
> 
> On Wed, Jul 08, 2015 at 05:41:57PM +0800, Chao Yu wrote:
> 
> [snip]
> 
> > > > > You're right. We don't need to drop it having the lowest ino first.
> > > > > Actually, I was supposed to add an LRU list for extent_trees.
> > > > > But, do we need to really take care of its order for already evicted inodes?
> > > > >
> > > > > Here, we should think about two types of hit ratios.
> > > > > One is for inodes, and the other is for data.
> > > > > The VFS maintains inode_cache in an LRU order, while its data is cached via
> > > > > page cache also conrolled by LRU. And, whenever inode is evicted, VFS drops
> > > > > all the cached data.
> > > > > So, I believe we should give a higher priority to inodes rather than data.
> > > > >
> > > > > And, in order to increase the hit ratio, we're trying to keep an extent tree
> > > > > and its nodes even if its corresponding inode was evicted.
> > > > > So, I concluded that the dropping order would not be critical especially for
> > > > > the already evicted inodes.
> > > >
> > > > >From perspective of shrinking memory size, I think you're completely right,
> > > > because we can regard extent tree and its nodes as metadata of one inode, if
> > > > VFS evict one inode, all its data and metadata include data in extent cache
> > > > should be evicted.
> > > >
> > > > But from perspective of arising hit ratio of our cache, I'm not sure this is
> > > > the best way.
> > > >
> > > > I guess in this method, we may encounter lower coverage area of dnode + extent
> > > > cache and double caches exist issue, like:
> > > > a) all data of inode is evicted by VFS, and its tree and nodes in extent cache
> > > > is evicted too, resulting lower hit raito of further access.
> > >
> > > Well, f2fs_evict_inode does not destroy whole its extent tree and nodes right
> > > away, but just drops the refcount of the extent tree. So, I expect that this
> > > gives another chance of cache hit for further data access.
> >
> > Agreed.
> >
> > > Moreover, since this only matters with memory pressure, the unreferenced extent
> > > trees and nodes would be kept long enough beyond the normal situation.
> >
> > I'm worry about the 'only matters' thing, I will investigate it if I have time.
> >
> > >
> > > > b) data and node cache of inode is exist in VFS, and its tree and nodes in extent
> > > > cache is exist too.
> > >
> > > We know that this is a separate issue, since there is no such code to check
> > > whether data and node cache exist along with extent cache entries.
> >
> > Well, just thought, for multimedia objects, like a movie file, most time
> > we will just read it, there will no further writes in it. So why not
> > building extent cache for reaccessing, and meanwhile releasing dnode pages
> > for saving memory?
> 
> Hmm, my basic approach is letting mm reclaim caches in an LRU manner as much
> as possible.
> Of course, we can think about many specific conditions, but IMO, it is not
> enough to treat them as general ones.

It's just one thought flash in my mind, I hope this can help even a litter for
memory saving in f2fs. Anyway, I agree with you, and let's stabilize and enhance
'these general ones'.

> 
> >
> > > And, I don't think we should eliminate such the duplicate, since the extent
> > > cache is a supplemenray subset of data and node caches.
> >
> > Right.
> >
> > >
> > > >
> > > > >
> > > > > >
> > > > > > And this step releasing breaks the rule of lru runs.
> > > > > >
> > > > > > Some unreferenced file has high hit ratio and some referenced file may
> > > > > > have low hit ratio. Why not release low hit ratio extent tree at first?
> > > > >
> > > > > But, still user has opened the referenced file to further access, right?
> > > >
> > > > It depends on access model.
> > > >
> > > > What I mean is that if extent cache of one inode can be hit for many times,
> > > > we can assume the access model is re-accessly, that means, we can expect
> > > > this cache can be hit more times. On the contrary, we can release it if it
> > > > is necessary.
> > >
> > > Yes, exactly it depends on user workloads.
> > >
> > > As a counter example,
> > > 1. thread A wrote extents and remained the file as it was opened to use later,
> > > 2. thread B wrote many extents newly and never touched.
> > >
> > > After #2, if shrinker was activated, the extents cached by thread A would
> > > be evicted, resulting in cache misses on further thread A's accesses.
> >
> > I didn't understand, if thread A's file is opened for reusing, from long-term
> > view, it will have high hit ratio in its extent cache than thread B's, Why
> > thread B's extent cache is not be evicted firstly.
> 
> Not long-term view. Like #1 -> #2 -> shrink -> #1 -> ...

That's reasonable, Thanks for your explanation. :)

> 
> >
> > >
> > > IMO, this can happen when a bunch of data blocks were written without updates,
> > > while some opened library/database files will access the data sooner or later.
> >
> > You mean the file with one time written in its life, like lib file or multimedia
> > file?
> >
> > So I guess what you mean is that, some app keeps file opened for long time, and
> > will access it again sooner or later, at least we should keep these referenced
> > extent cache visible rather than evicting them before evicting unreferenced one's.
> 
> Something like that. :)

OK, I can understand completely, so for now, let's do it in your way.
If any further thoughts, I will discuss with you. :)

Reviewed-by: Chao Yu <chao2.yu@samsung.com>

> 
> Thanks,
> 
> ------------------------------------------------------------------------------
> Don't Limit Your Business. Reach for the Cloud.
> GigeNET's Cloud Solutions provide you with the tools and support that
> you need to offload your IT needs and focus on growing your business.
> Configured For All Businesses. Start Your Cloud Today.
> https://www.gigenetcloud.com/
> _______________________________________________
> Linux-f2fs-devel mailing list
> Linux-f2fs-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2015-07-11  7:02 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-07-01  1:25 [PATCH 1/2] f2fs: check the largest extent at look-up time Jaegeuk Kim
2015-07-01  1:25 ` [PATCH 2/2] f2fs: shrink unreferenced extent_caches first Jaegeuk Kim
2015-07-02 12:39   ` [f2fs-dev] " Chao Yu
2015-07-04  6:29     ` Jaegeuk Kim
2015-07-06 12:25       ` Chao Yu
2015-07-06 22:16         ` Jaegeuk Kim
2015-07-08  9:41           ` Chao Yu
2015-07-10 23:54             ` Jaegeuk Kim
2015-07-11  1:53               ` Chao Yu
2015-07-02 12:38 ` [f2fs-dev] [PATCH 1/2] f2fs: check the largest extent at look-up time Chao Yu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).