linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] f2fs: extract rb-tree operation infrastructure
@ 2017-04-07 10:29 Chao Yu
  2017-04-11  0:05 ` Jaegeuk Kim
  0 siblings, 1 reply; 3+ messages in thread
From: Chao Yu @ 2017-04-07 10:29 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao, Chao Yu

rb-tree lookup/update functions are deeply coupled into extent cache
codes, it's very hard to reuse these basic functions, this patch
extracts common rb-tree operation infrastructure for latter reusing.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 fs/f2fs/extent_cache.c | 293 ++++++++++++++++++++++++++++---------------------
 fs/f2fs/f2fs.h         |  20 +++-
 2 files changed, 182 insertions(+), 131 deletions(-)

diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index c6934f014e0f..1a353f8c01d9 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -18,6 +18,147 @@
 #include "node.h"
 #include <trace/events/f2fs.h>
 
+static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
+							unsigned int ofs)
+{
+	if (cached_re) {
+		if (cached_re->ofs <= ofs &&
+				cached_re->ofs + cached_re->len > ofs) {
+			return cached_re;
+		}
+	}
+
+	return NULL;
+}
+
+static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
+							unsigned int ofs)
+{
+	struct rb_node *node = root->rb_node;
+	struct rb_entry *re;
+
+	while (node) {
+		re = rb_entry(node, struct rb_entry, rb_node);
+
+		if (ofs < re->ofs)
+			node = node->rb_left;
+		else if (ofs >= re->ofs + re->len)
+			node = node->rb_right;
+		else
+			return re;
+	}
+	return NULL;
+}
+
+static struct rb_entry *__lookup_rb_tree(struct rb_root *root,
+				struct rb_entry *cached_re, unsigned int ofs)
+{
+	struct rb_entry *re;
+
+	re = __lookup_rb_tree_fast(cached_re, ofs);
+	if (!re)
+		return __lookup_rb_tree_slow(root, ofs);
+
+	return re;
+}
+
+static struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+				struct rb_root *root, struct rb_node **parent,
+				unsigned int ofs)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_entry *re;
+
+	while (*p) {
+		*parent = *p;
+		re = rb_entry(*parent, struct rb_entry, rb_node);
+
+		if (ofs < re->ofs)
+			p = &(*p)->rb_left;
+		else if (ofs >= re->ofs + re->len)
+			p = &(*p)->rb_right;
+		else
+			f2fs_bug_on(sbi, 1);
+	}
+
+	return p;
+}
+
+/*
+ * lookup rb entry in position of @ofs in rb-tree,
+ * if hit, return the entry, otherwise, return NULL
+ * @prev_ex: extent before ofs
+ * @next_ex: extent after ofs
+ * @insert_p: insert point for new extent at ofs
+ * in order to simpfy the insertion after.
+ * tree must stay unchanged between lookup and insertion.
+ */
+static struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
+				struct rb_entry *cached_re,
+				unsigned int ofs,
+				struct rb_entry **prev_entry,
+				struct rb_entry **next_entry,
+				struct rb_node ***insert_p,
+				struct rb_node **insert_parent)
+{
+	struct rb_node **pnode = &root->rb_node;
+	struct rb_node *parent = NULL, *tmp_node;
+	struct rb_entry *re = cached_re;
+
+	*insert_p = NULL;
+	*insert_parent = NULL;
+	*prev_entry = NULL;
+	*next_entry = NULL;
+
+	if (RB_EMPTY_ROOT(root))
+		return NULL;
+
+	if (re) {
+		if (re->ofs <= ofs && re->ofs + re->len > ofs)
+			goto lookup_neighbors;
+	}
+
+	while (*pnode) {
+		parent = *pnode;
+		re = rb_entry(*pnode, struct rb_entry, rb_node);
+
+		if (ofs < re->ofs)
+			pnode = &(*pnode)->rb_left;
+		else if (ofs >= re->ofs + re->len)
+			pnode = &(*pnode)->rb_right;
+		else
+			goto lookup_neighbors;
+	}
+
+	*insert_p = pnode;
+	*insert_parent = parent;
+
+	re = rb_entry(parent, struct rb_entry, rb_node);
+	tmp_node = parent;
+	if (parent && ofs > re->ofs)
+		tmp_node = rb_next(parent);
+	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+
+	tmp_node = parent;
+	if (parent && ofs < re->ofs)
+		tmp_node = rb_prev(parent);
+	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+	return NULL;
+
+lookup_neighbors:
+	if (ofs == re->ofs) {
+		/* lookup prev node for merging backward later */
+		tmp_node = rb_prev(&re->rb_node);
+		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+	}
+	if (ofs == re->ofs + re->len - 1) {
+		/* lookup next node for merging frontward later */
+		tmp_node = rb_next(&re->rb_node);
+		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+	}
+	return re;
+}
+
 static struct kmem_cache *extent_tree_slab;
 static struct kmem_cache *extent_node_slab;
 
@@ -102,36 +243,6 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
 	return et;
 }
 
-static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, unsigned int fofs)
-{
-	struct rb_node *node = et->root.rb_node;
-	struct extent_node *en = et->cached_en;
-
-	if (en) {
-		struct extent_info *cei = &en->ei;
-
-		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
-			stat_inc_cached_node_hit(sbi);
-			return en;
-		}
-	}
-
-	while (node) {
-		en = rb_entry(node, struct extent_node, rb_node);
-
-		if (fofs < en->ei.fofs) {
-			node = node->rb_left;
-		} else if (fofs >= en->ei.fofs + en->ei.len) {
-			node = node->rb_right;
-		} else {
-			stat_inc_rbtree_node_hit(sbi);
-			return en;
-		}
-	}
-	return NULL;
-}
-
 static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
 				struct extent_tree *et, struct extent_info *ei)
 {
@@ -237,17 +348,27 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 		goto out;
 	}
 
-	en = __lookup_extent_tree(sbi, et, pgofs);
+	en = (struct extent_node *)__lookup_rb_tree_fast(
+				(struct rb_entry *)et->cached_en, pgofs);
 	if (en) {
-		*ei = en->ei;
-		spin_lock(&sbi->extent_lock);
-		if (!list_empty(&en->list)) {
-			list_move_tail(&en->list, &sbi->extent_list);
-			et->cached_en = en;
-		}
-		spin_unlock(&sbi->extent_lock);
-		ret = true;
+		stat_inc_cached_node_hit(sbi);
+	} else {
+		en = (struct extent_node *)__lookup_rb_tree_slow(&et->root,
+								pgofs);
+		if (en)
+			stat_inc_rbtree_node_hit(sbi);
+		else
+			goto out;
 	}
+
+	*ei = en->ei;
+	spin_lock(&sbi->extent_lock);
+	if (!list_empty(&en->list)) {
+		list_move_tail(&en->list, &sbi->extent_list);
+		et->cached_en = en;
+	}
+	spin_unlock(&sbi->extent_lock);
+	ret = true;
 out:
 	stat_inc_total_hit(sbi);
 	read_unlock(&et->lock);
@@ -256,83 +377,6 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 	return ret;
 }
 
-
-/*
- * lookup extent at @fofs, if hit, return the extent
- * if not, return NULL and
- * @prev_ex: extent before fofs
- * @next_ex: extent after fofs
- * @insert_p: insert point for new extent at fofs
- * in order to simpfy the insertion after.
- * tree must stay unchanged between lookup and insertion.
- */
-static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
-				unsigned int fofs,
-				struct extent_node **prev_ex,
-				struct extent_node **next_ex,
-				struct rb_node ***insert_p,
-				struct rb_node **insert_parent)
-{
-	struct rb_node **pnode = &et->root.rb_node;
-	struct rb_node *parent = NULL, *tmp_node;
-	struct extent_node *en = et->cached_en;
-
-	*insert_p = NULL;
-	*insert_parent = NULL;
-	*prev_ex = NULL;
-	*next_ex = NULL;
-
-	if (RB_EMPTY_ROOT(&et->root))
-		return NULL;
-
-	if (en) {
-		struct extent_info *cei = &en->ei;
-
-		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
-			goto lookup_neighbors;
-	}
-
-	while (*pnode) {
-		parent = *pnode;
-		en = rb_entry(*pnode, struct extent_node, rb_node);
-
-		if (fofs < en->ei.fofs)
-			pnode = &(*pnode)->rb_left;
-		else if (fofs >= en->ei.fofs + en->ei.len)
-			pnode = &(*pnode)->rb_right;
-		else
-			goto lookup_neighbors;
-	}
-
-	*insert_p = pnode;
-	*insert_parent = parent;
-
-	en = rb_entry(parent, struct extent_node, rb_node);
-	tmp_node = parent;
-	if (parent && fofs > en->ei.fofs)
-		tmp_node = rb_next(parent);
-	*next_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
-
-	tmp_node = parent;
-	if (parent && fofs < en->ei.fofs)
-		tmp_node = rb_prev(parent);
-	*prev_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
-	return NULL;
-
-lookup_neighbors:
-	if (fofs == en->ei.fofs) {
-		/* lookup prev node for merging backward later */
-		tmp_node = rb_prev(&en->rb_node);
-		*prev_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
-	}
-	if (fofs == en->ei.fofs + en->ei.len - 1) {
-		/* lookup next node for merging frontward later */
-		tmp_node = rb_next(&en->rb_node);
-		*next_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
-	}
-	return en;
-}
-
 static struct extent_node *__try_merge_extent_node(struct inode *inode,
 				struct extent_tree *et, struct extent_info *ei,
 				struct extent_node *prev_ex,
@@ -387,17 +431,7 @@ static struct extent_node *__insert_extent_tree(struct inode *inode,
 		goto do_insert;
 	}
 
-	while (*p) {
-		parent = *p;
-		en = rb_entry(parent, struct extent_node, rb_node);
-
-		if (ei->fofs < en->ei.fofs)
-			p = &(*p)->rb_left;
-		else if (ei->fofs >= en->ei.fofs + en->ei.len)
-			p = &(*p)->rb_right;
-		else
-			f2fs_bug_on(sbi, 1);
-	}
+	p = __lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
 do_insert:
 	en = __attach_extent_node(sbi, et, ei, parent, p);
 	if (!en)
@@ -447,7 +481,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
 	__drop_largest_extent(inode, fofs, len);
 
 	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
-	en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
+	en = (struct extent_node *)__lookup_rb_tree_ret(&et->root,
+					(struct rb_entry *)et->cached_en, fofs,
+					(struct rb_entry **)&prev_en,
+					(struct rb_entry **)&next_en,
 					&insert_p, &insert_parent);
 	if (!en)
 		en = next_en;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 5a2b8cd13c92..407a21a09fb7 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -380,16 +380,30 @@ enum {
 /* number of extent info in extent cache we try to shrink */
 #define EXTENT_CACHE_SHRINK_NUMBER	128
 
+struct rb_entry {
+	struct rb_node rb_node;		/* rb node located in rb-tree */
+	unsigned int ofs;		/* start offset of the entry */
+	unsigned int len;		/* length of the entry */
+};
+
 struct extent_info {
 	unsigned int fofs;		/* start offset in a file */
-	u32 blk;			/* start block address of the extent */
 	unsigned int len;		/* length of the extent */
+	u32 blk;			/* start block address of the extent */
 };
 
 struct extent_node {
-	struct rb_node rb_node;		/* rb node located in rb-tree */
+	struct rb_node rb_node;
+	union {
+		struct {
+			unsigned int fofs;
+			unsigned int len;
+			u32 blk;
+		};
+		struct extent_info ei;	/* extent info */
+
+	};
 	struct list_head list;		/* node in global extent list of sbi */
-	struct extent_info ei;		/* extent info */
 	struct extent_tree *et;		/* extent tree pointer */
 };
 
-- 
2.12.2.510.ge1104a5ee539

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] f2fs: extract rb-tree operation infrastructure
  2017-04-07 10:29 [PATCH] f2fs: extract rb-tree operation infrastructure Chao Yu
@ 2017-04-11  0:05 ` Jaegeuk Kim
  2017-04-11  1:22   ` Chao Yu
  0 siblings, 1 reply; 3+ messages in thread
From: Jaegeuk Kim @ 2017-04-11  0:05 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, chao

Hi Chao,

On 04/07, Chao Yu wrote:
> rb-tree lookup/update functions are deeply coupled into extent cache
> codes, it's very hard to reuse these basic functions, this patch
> extracts common rb-tree operation infrastructure for latter reusing.
> 
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  fs/f2fs/extent_cache.c | 293 ++++++++++++++++++++++++++++---------------------
>  fs/f2fs/f2fs.h         |  20 +++-
>  2 files changed, 182 insertions(+), 131 deletions(-)
> 
> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
> index c6934f014e0f..1a353f8c01d9 100644
> --- a/fs/f2fs/extent_cache.c
> +++ b/fs/f2fs/extent_cache.c
> @@ -18,6 +18,147 @@
>  #include "node.h"
>  #include <trace/events/f2fs.h>
>  
> +static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
> +							unsigned int ofs)
> +{
> +	if (cached_re) {
> +		if (cached_re->ofs <= ofs &&
> +				cached_re->ofs + cached_re->len > ofs) {
> +			return cached_re;
> +		}
> +	}
> +
> +	return NULL;
> +}
> +
> +static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
> +							unsigned int ofs)
> +{
> +	struct rb_node *node = root->rb_node;
> +	struct rb_entry *re;
> +
> +	while (node) {
> +		re = rb_entry(node, struct rb_entry, rb_node);
> +
> +		if (ofs < re->ofs)
> +			node = node->rb_left;
> +		else if (ofs >= re->ofs + re->len)
> +			node = node->rb_right;
> +		else
> +			return re;
> +	}
> +	return NULL;
> +}
> +
> +static struct rb_entry *__lookup_rb_tree(struct rb_root *root,
> +				struct rb_entry *cached_re, unsigned int ofs)
> +{
> +	struct rb_entry *re;
> +
> +	re = __lookup_rb_tree_fast(cached_re, ofs);
> +	if (!re)
> +		return __lookup_rb_tree_slow(root, ofs);
> +
> +	return re;
> +}
> +
> +static struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
> +				struct rb_root *root, struct rb_node **parent,
> +				unsigned int ofs)
> +{
> +	struct rb_node **p = &root->rb_node;
> +	struct rb_entry *re;
> +
> +	while (*p) {
> +		*parent = *p;
> +		re = rb_entry(*parent, struct rb_entry, rb_node);
> +
> +		if (ofs < re->ofs)
> +			p = &(*p)->rb_left;
> +		else if (ofs >= re->ofs + re->len)
> +			p = &(*p)->rb_right;
> +		else
> +			f2fs_bug_on(sbi, 1);
> +	}
> +
> +	return p;
> +}
> +
> +/*
> + * lookup rb entry in position of @ofs in rb-tree,
> + * if hit, return the entry, otherwise, return NULL
> + * @prev_ex: extent before ofs
> + * @next_ex: extent after ofs
> + * @insert_p: insert point for new extent at ofs
> + * in order to simpfy the insertion after.
> + * tree must stay unchanged between lookup and insertion.
> + */
> +static struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
> +				struct rb_entry *cached_re,
> +				unsigned int ofs,
> +				struct rb_entry **prev_entry,
> +				struct rb_entry **next_entry,
> +				struct rb_node ***insert_p,
> +				struct rb_node **insert_parent)
> +{
> +	struct rb_node **pnode = &root->rb_node;
> +	struct rb_node *parent = NULL, *tmp_node;
> +	struct rb_entry *re = cached_re;
> +
> +	*insert_p = NULL;
> +	*insert_parent = NULL;
> +	*prev_entry = NULL;
> +	*next_entry = NULL;
> +
> +	if (RB_EMPTY_ROOT(root))
> +		return NULL;
> +
> +	if (re) {
> +		if (re->ofs <= ofs && re->ofs + re->len > ofs)
> +			goto lookup_neighbors;
> +	}
> +
> +	while (*pnode) {
> +		parent = *pnode;
> +		re = rb_entry(*pnode, struct rb_entry, rb_node);
> +
> +		if (ofs < re->ofs)
> +			pnode = &(*pnode)->rb_left;
> +		else if (ofs >= re->ofs + re->len)
> +			pnode = &(*pnode)->rb_right;
> +		else
> +			goto lookup_neighbors;
> +	}
> +
> +	*insert_p = pnode;
> +	*insert_parent = parent;
> +
> +	re = rb_entry(parent, struct rb_entry, rb_node);
> +	tmp_node = parent;
> +	if (parent && ofs > re->ofs)
> +		tmp_node = rb_next(parent);
> +	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
> +
> +	tmp_node = parent;
> +	if (parent && ofs < re->ofs)
> +		tmp_node = rb_prev(parent);
> +	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
> +	return NULL;
> +
> +lookup_neighbors:
> +	if (ofs == re->ofs) {
> +		/* lookup prev node for merging backward later */
> +		tmp_node = rb_prev(&re->rb_node);
> +		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
> +	}
> +	if (ofs == re->ofs + re->len - 1) {
> +		/* lookup next node for merging frontward later */
> +		tmp_node = rb_next(&re->rb_node);
> +		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
> +	}
> +	return re;
> +}
> +
>  static struct kmem_cache *extent_tree_slab;
>  static struct kmem_cache *extent_node_slab;
>  
> @@ -102,36 +243,6 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
>  	return et;
>  }
>  
> -static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
> -				struct extent_tree *et, unsigned int fofs)
> -{
> -	struct rb_node *node = et->root.rb_node;
> -	struct extent_node *en = et->cached_en;
> -
> -	if (en) {
> -		struct extent_info *cei = &en->ei;
> -
> -		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
> -			stat_inc_cached_node_hit(sbi);
> -			return en;
> -		}
> -	}
> -
> -	while (node) {
> -		en = rb_entry(node, struct extent_node, rb_node);
> -
> -		if (fofs < en->ei.fofs) {
> -			node = node->rb_left;
> -		} else if (fofs >= en->ei.fofs + en->ei.len) {
> -			node = node->rb_right;
> -		} else {
> -			stat_inc_rbtree_node_hit(sbi);
> -			return en;
> -		}
> -	}
> -	return NULL;
> -}
> -
>  static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei)
>  {
> @@ -237,17 +348,27 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  		goto out;
>  	}
>  
> -	en = __lookup_extent_tree(sbi, et, pgofs);
> +	en = (struct extent_node *)__lookup_rb_tree_fast(
> +				(struct rb_entry *)et->cached_en, pgofs);
>  	if (en) {
> -		*ei = en->ei;
> -		spin_lock(&sbi->extent_lock);
> -		if (!list_empty(&en->list)) {
> -			list_move_tail(&en->list, &sbi->extent_list);
> -			et->cached_en = en;
> -		}
> -		spin_unlock(&sbi->extent_lock);
> -		ret = true;
> +		stat_inc_cached_node_hit(sbi);
> +	} else {
> +		en = (struct extent_node *)__lookup_rb_tree_slow(&et->root,
> +								pgofs);
> +		if (en)
> +			stat_inc_rbtree_node_hit(sbi);
> +		else
> +			goto out;
>  	}

We can use __lookup_rb_tree() like this?

---
 fs/f2fs/extent_cache.c | 18 +++++++-----------
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 1a353f8c01d9..9b86d549a829 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -27,7 +27,6 @@ static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
 			return cached_re;
 		}
 	}
-
 	return NULL;
 }
 
@@ -348,18 +347,15 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 		goto out;
 	}
 
-	en = (struct extent_node *)__lookup_rb_tree_fast(
+	en = (struct extent_node *)__lookup_rb_tree(&et->root,
 				(struct rb_entry *)et->cached_en, pgofs);
-	if (en) {
+	if (!en)
+		goto out;
+
+	if (en == et->cached_en)
 		stat_inc_cached_node_hit(sbi);
-	} else {
-		en = (struct extent_node *)__lookup_rb_tree_slow(&et->root,
-								pgofs);
-		if (en)
-			stat_inc_rbtree_node_hit(sbi);
-		else
-			goto out;
-	}
+	else
+		stat_inc_rbtree_node_hit(sbi);
 
 	*ei = en->ei;
 	spin_lock(&sbi->extent_lock);
-- 
2.11.0


> +
> +	*ei = en->ei;
> +	spin_lock(&sbi->extent_lock);
> +	if (!list_empty(&en->list)) {
> +		list_move_tail(&en->list, &sbi->extent_list);
> +		et->cached_en = en;
> +	}
> +	spin_unlock(&sbi->extent_lock);
> +	ret = true;
>  out:
>  	stat_inc_total_hit(sbi);
>  	read_unlock(&et->lock);
> @@ -256,83 +377,6 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  	return ret;
>  }
>  
> -
> -/*
> - * lookup extent at @fofs, if hit, return the extent
> - * if not, return NULL and
> - * @prev_ex: extent before fofs
> - * @next_ex: extent after fofs
> - * @insert_p: insert point for new extent at fofs
> - * in order to simpfy the insertion after.
> - * tree must stay unchanged between lookup and insertion.
> - */
> -static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
> -				unsigned int fofs,
> -				struct extent_node **prev_ex,
> -				struct extent_node **next_ex,
> -				struct rb_node ***insert_p,
> -				struct rb_node **insert_parent)
> -{
> -	struct rb_node **pnode = &et->root.rb_node;
> -	struct rb_node *parent = NULL, *tmp_node;
> -	struct extent_node *en = et->cached_en;
> -
> -	*insert_p = NULL;
> -	*insert_parent = NULL;
> -	*prev_ex = NULL;
> -	*next_ex = NULL;
> -
> -	if (RB_EMPTY_ROOT(&et->root))
> -		return NULL;
> -
> -	if (en) {
> -		struct extent_info *cei = &en->ei;
> -
> -		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
> -			goto lookup_neighbors;
> -	}
> -
> -	while (*pnode) {
> -		parent = *pnode;
> -		en = rb_entry(*pnode, struct extent_node, rb_node);
> -
> -		if (fofs < en->ei.fofs)
> -			pnode = &(*pnode)->rb_left;
> -		else if (fofs >= en->ei.fofs + en->ei.len)
> -			pnode = &(*pnode)->rb_right;
> -		else
> -			goto lookup_neighbors;
> -	}
> -
> -	*insert_p = pnode;
> -	*insert_parent = parent;
> -
> -	en = rb_entry(parent, struct extent_node, rb_node);
> -	tmp_node = parent;
> -	if (parent && fofs > en->ei.fofs)
> -		tmp_node = rb_next(parent);
> -	*next_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
> -
> -	tmp_node = parent;
> -	if (parent && fofs < en->ei.fofs)
> -		tmp_node = rb_prev(parent);
> -	*prev_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
> -	return NULL;
> -
> -lookup_neighbors:
> -	if (fofs == en->ei.fofs) {
> -		/* lookup prev node for merging backward later */
> -		tmp_node = rb_prev(&en->rb_node);
> -		*prev_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
> -	}
> -	if (fofs == en->ei.fofs + en->ei.len - 1) {
> -		/* lookup next node for merging frontward later */
> -		tmp_node = rb_next(&en->rb_node);
> -		*next_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
> -	}
> -	return en;
> -}
> -
>  static struct extent_node *__try_merge_extent_node(struct inode *inode,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct extent_node *prev_ex,
> @@ -387,17 +431,7 @@ static struct extent_node *__insert_extent_tree(struct inode *inode,
>  		goto do_insert;
>  	}
>  
> -	while (*p) {
> -		parent = *p;
> -		en = rb_entry(parent, struct extent_node, rb_node);
> -
> -		if (ei->fofs < en->ei.fofs)
> -			p = &(*p)->rb_left;
> -		else if (ei->fofs >= en->ei.fofs + en->ei.len)
> -			p = &(*p)->rb_right;
> -		else
> -			f2fs_bug_on(sbi, 1);
> -	}
> +	p = __lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
>  do_insert:
>  	en = __attach_extent_node(sbi, et, ei, parent, p);
>  	if (!en)
> @@ -447,7 +481,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  	__drop_largest_extent(inode, fofs, len);
>  
>  	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
> -	en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
> +	en = (struct extent_node *)__lookup_rb_tree_ret(&et->root,
> +					(struct rb_entry *)et->cached_en, fofs,
> +					(struct rb_entry **)&prev_en,
> +					(struct rb_entry **)&next_en,
>  					&insert_p, &insert_parent);
>  	if (!en)
>  		en = next_en;
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 5a2b8cd13c92..407a21a09fb7 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -380,16 +380,30 @@ enum {
>  /* number of extent info in extent cache we try to shrink */
>  #define EXTENT_CACHE_SHRINK_NUMBER	128
>  
> +struct rb_entry {
> +	struct rb_node rb_node;		/* rb node located in rb-tree */
> +	unsigned int ofs;		/* start offset of the entry */
> +	unsigned int len;		/* length of the entry */
> +};
> +
>  struct extent_info {
>  	unsigned int fofs;		/* start offset in a file */
> -	u32 blk;			/* start block address of the extent */
>  	unsigned int len;		/* length of the extent */
> +	u32 blk;			/* start block address of the extent */
>  };
>  
>  struct extent_node {
> -	struct rb_node rb_node;		/* rb node located in rb-tree */
> +	struct rb_node rb_node;
> +	union {
> +		struct {
> +			unsigned int fofs;
> +			unsigned int len;
> +			u32 blk;
> +		};
> +		struct extent_info ei;	/* extent info */
> +
> +	};
>  	struct list_head list;		/* node in global extent list of sbi */
> -	struct extent_info ei;		/* extent info */
>  	struct extent_tree *et;		/* extent tree pointer */
>  };
>  
> -- 
> 2.12.2.510.ge1104a5ee539

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] f2fs: extract rb-tree operation infrastructure
  2017-04-11  0:05 ` Jaegeuk Kim
@ 2017-04-11  1:22   ` Chao Yu
  0 siblings, 0 replies; 3+ messages in thread
From: Chao Yu @ 2017-04-11  1:22 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, chao

On 2017/4/11 8:05, Jaegeuk Kim wrote:
> Hi Chao,
> 
> On 04/07, Chao Yu wrote:
>> rb-tree lookup/update functions are deeply coupled into extent cache
>> codes, it's very hard to reuse these basic functions, this patch
>> extracts common rb-tree operation infrastructure for latter reusing.
>>
>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>> ---
>>  fs/f2fs/extent_cache.c | 293 ++++++++++++++++++++++++++++---------------------
>>  fs/f2fs/f2fs.h         |  20 +++-
>>  2 files changed, 182 insertions(+), 131 deletions(-)
>>
>> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
>> index c6934f014e0f..1a353f8c01d9 100644
>> --- a/fs/f2fs/extent_cache.c
>> +++ b/fs/f2fs/extent_cache.c
>> @@ -18,6 +18,147 @@
>>  #include "node.h"
>>  #include <trace/events/f2fs.h>
>>  
>> +static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
>> +							unsigned int ofs)
>> +{
>> +	if (cached_re) {
>> +		if (cached_re->ofs <= ofs &&
>> +				cached_re->ofs + cached_re->len > ofs) {
>> +			return cached_re;
>> +		}
>> +	}
>> +
>> +	return NULL;
>> +}
>> +
>> +static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
>> +							unsigned int ofs)
>> +{
>> +	struct rb_node *node = root->rb_node;
>> +	struct rb_entry *re;
>> +
>> +	while (node) {
>> +		re = rb_entry(node, struct rb_entry, rb_node);
>> +
>> +		if (ofs < re->ofs)
>> +			node = node->rb_left;
>> +		else if (ofs >= re->ofs + re->len)
>> +			node = node->rb_right;
>> +		else
>> +			return re;
>> +	}
>> +	return NULL;
>> +}
>> +
>> +static struct rb_entry *__lookup_rb_tree(struct rb_root *root,
>> +				struct rb_entry *cached_re, unsigned int ofs)
>> +{
>> +	struct rb_entry *re;
>> +
>> +	re = __lookup_rb_tree_fast(cached_re, ofs);
>> +	if (!re)
>> +		return __lookup_rb_tree_slow(root, ofs);
>> +
>> +	return re;
>> +}
>> +
>> +static struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
>> +				struct rb_root *root, struct rb_node **parent,
>> +				unsigned int ofs)
>> +{
>> +	struct rb_node **p = &root->rb_node;
>> +	struct rb_entry *re;
>> +
>> +	while (*p) {
>> +		*parent = *p;
>> +		re = rb_entry(*parent, struct rb_entry, rb_node);
>> +
>> +		if (ofs < re->ofs)
>> +			p = &(*p)->rb_left;
>> +		else if (ofs >= re->ofs + re->len)
>> +			p = &(*p)->rb_right;
>> +		else
>> +			f2fs_bug_on(sbi, 1);
>> +	}
>> +
>> +	return p;
>> +}
>> +
>> +/*
>> + * lookup rb entry in position of @ofs in rb-tree,
>> + * if hit, return the entry, otherwise, return NULL
>> + * @prev_ex: extent before ofs
>> + * @next_ex: extent after ofs
>> + * @insert_p: insert point for new extent at ofs
>> + * in order to simpfy the insertion after.
>> + * tree must stay unchanged between lookup and insertion.
>> + */
>> +static struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
>> +				struct rb_entry *cached_re,
>> +				unsigned int ofs,
>> +				struct rb_entry **prev_entry,
>> +				struct rb_entry **next_entry,
>> +				struct rb_node ***insert_p,
>> +				struct rb_node **insert_parent)
>> +{
>> +	struct rb_node **pnode = &root->rb_node;
>> +	struct rb_node *parent = NULL, *tmp_node;
>> +	struct rb_entry *re = cached_re;
>> +
>> +	*insert_p = NULL;
>> +	*insert_parent = NULL;
>> +	*prev_entry = NULL;
>> +	*next_entry = NULL;
>> +
>> +	if (RB_EMPTY_ROOT(root))
>> +		return NULL;
>> +
>> +	if (re) {
>> +		if (re->ofs <= ofs && re->ofs + re->len > ofs)
>> +			goto lookup_neighbors;
>> +	}
>> +
>> +	while (*pnode) {
>> +		parent = *pnode;
>> +		re = rb_entry(*pnode, struct rb_entry, rb_node);
>> +
>> +		if (ofs < re->ofs)
>> +			pnode = &(*pnode)->rb_left;
>> +		else if (ofs >= re->ofs + re->len)
>> +			pnode = &(*pnode)->rb_right;
>> +		else
>> +			goto lookup_neighbors;
>> +	}
>> +
>> +	*insert_p = pnode;
>> +	*insert_parent = parent;
>> +
>> +	re = rb_entry(parent, struct rb_entry, rb_node);
>> +	tmp_node = parent;
>> +	if (parent && ofs > re->ofs)
>> +		tmp_node = rb_next(parent);
>> +	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
>> +
>> +	tmp_node = parent;
>> +	if (parent && ofs < re->ofs)
>> +		tmp_node = rb_prev(parent);
>> +	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
>> +	return NULL;
>> +
>> +lookup_neighbors:
>> +	if (ofs == re->ofs) {
>> +		/* lookup prev node for merging backward later */
>> +		tmp_node = rb_prev(&re->rb_node);
>> +		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
>> +	}
>> +	if (ofs == re->ofs + re->len - 1) {
>> +		/* lookup next node for merging frontward later */
>> +		tmp_node = rb_next(&re->rb_node);
>> +		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
>> +	}
>> +	return re;
>> +}
>> +
>>  static struct kmem_cache *extent_tree_slab;
>>  static struct kmem_cache *extent_node_slab;
>>  
>> @@ -102,36 +243,6 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
>>  	return et;
>>  }
>>  
>> -static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
>> -				struct extent_tree *et, unsigned int fofs)
>> -{
>> -	struct rb_node *node = et->root.rb_node;
>> -	struct extent_node *en = et->cached_en;
>> -
>> -	if (en) {
>> -		struct extent_info *cei = &en->ei;
>> -
>> -		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
>> -			stat_inc_cached_node_hit(sbi);
>> -			return en;
>> -		}
>> -	}
>> -
>> -	while (node) {
>> -		en = rb_entry(node, struct extent_node, rb_node);
>> -
>> -		if (fofs < en->ei.fofs) {
>> -			node = node->rb_left;
>> -		} else if (fofs >= en->ei.fofs + en->ei.len) {
>> -			node = node->rb_right;
>> -		} else {
>> -			stat_inc_rbtree_node_hit(sbi);
>> -			return en;
>> -		}
>> -	}
>> -	return NULL;
>> -}
>> -
>>  static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>>  				struct extent_tree *et, struct extent_info *ei)
>>  {
>> @@ -237,17 +348,27 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>>  		goto out;
>>  	}
>>  
>> -	en = __lookup_extent_tree(sbi, et, pgofs);
>> +	en = (struct extent_node *)__lookup_rb_tree_fast(
>> +				(struct rb_entry *)et->cached_en, pgofs);
>>  	if (en) {
>> -		*ei = en->ei;
>> -		spin_lock(&sbi->extent_lock);
>> -		if (!list_empty(&en->list)) {
>> -			list_move_tail(&en->list, &sbi->extent_list);
>> -			et->cached_en = en;
>> -		}
>> -		spin_unlock(&sbi->extent_lock);
>> -		ret = true;
>> +		stat_inc_cached_node_hit(sbi);
>> +	} else {
>> +		en = (struct extent_node *)__lookup_rb_tree_slow(&et->root,
>> +								pgofs);
>> +		if (en)
>> +			stat_inc_rbtree_node_hit(sbi);
>> +		else
>> +			goto out;
>>  	}
> 
> We can use __lookup_rb_tree() like this?

Agreed, let me merge this and resend the patch. :)

Thanks,

> 
> ---
>  fs/f2fs/extent_cache.c | 18 +++++++-----------
>  1 file changed, 7 insertions(+), 11 deletions(-)
> 
> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
> index 1a353f8c01d9..9b86d549a829 100644
> --- a/fs/f2fs/extent_cache.c
> +++ b/fs/f2fs/extent_cache.c
> @@ -27,7 +27,6 @@ static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
>  			return cached_re;
>  		}
>  	}
> -
>  	return NULL;
>  }
>  
> @@ -348,18 +347,15 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  		goto out;
>  	}
>  
> -	en = (struct extent_node *)__lookup_rb_tree_fast(
> +	en = (struct extent_node *)__lookup_rb_tree(&et->root,
>  				(struct rb_entry *)et->cached_en, pgofs);
> -	if (en) {
> +	if (!en)
> +		goto out;
> +
> +	if (en == et->cached_en)
>  		stat_inc_cached_node_hit(sbi);
> -	} else {
> -		en = (struct extent_node *)__lookup_rb_tree_slow(&et->root,
> -								pgofs);
> -		if (en)
> -			stat_inc_rbtree_node_hit(sbi);
> -		else
> -			goto out;
> -	}
> +	else
> +		stat_inc_rbtree_node_hit(sbi);
>  
>  	*ei = en->ei;
>  	spin_lock(&sbi->extent_lock);
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2017-04-11  1:23 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-07 10:29 [PATCH] f2fs: extract rb-tree operation infrastructure Chao Yu
2017-04-11  0:05 ` Jaegeuk Kim
2017-04-11  1:22   ` Chao Yu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).