All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-07-07  1:58 ` Chao Yu
  0 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-07-07  1:58 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu, Chao Yu

Compressed inode may suffer read performance issue due to it can not
use extent cache, so I propose to add this unaligned extent support
to improve it.

Currently, it only works in readonly format f2fs image.

Unaligned extent: in one compressed cluster, physical block number
will be less than logical block number, so we add an extra physical
block length in extent info in order to indicate such extent status.

The idea is if one whole cluster blocks are contiguous physically,
once its mapping info was readed at first time, we will cache an
unaligned (or aligned) extent info entry in extent cache, it expects
that the mapping info will be hitted when rereading cluster.

Merge policy:
- Aligned extents can be merged.
- Aligned extent and unaligned extent can not be merged.

Signed-off-by: Chao Yu <chao@kernel.org>
---

I just post this for comments, it passes compiling, w/o any test.

 fs/f2fs/compress.c     | 25 ++++++++++++
 fs/f2fs/data.c         | 38 +++++++++++++-----
 fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
 fs/f2fs/f2fs.h         | 33 +++++++++++++---
 fs/f2fs/node.c         | 20 ++++++++++
 5 files changed, 181 insertions(+), 25 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 455561826c7d..f072ac33eba5 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
 	f2fs_put_dic(dic);
 }
 
+/*
+ * check whether cluster blocks are contiguous, and add extent cache entry
+ * only if cluster blocks are logically and physically contiguous.
+ */
+int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
+{
+	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
+	int i = compressed ? 1 : 0;
+	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
+						dn->ofs_in_node + i);
+
+	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
+		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+						dn->ofs_in_node + i);
+
+		if (!__is_valid_data_blkaddr(blkaddr))
+			break;
+		if (first_blkaddr + i - 1 != blkaddr)
+			return 0;
+	}
+
+	return compressed ? i - 1 : i;
+}
+
+
 const struct address_space_operations f2fs_compress_aops = {
 	.releasepage = f2fs_release_page,
 	.invalidatepage = f2fs_invalidate_page,
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index d2cf48c5a2e4..9572d78da4d7 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 	sector_t last_block_in_file;
 	const unsigned blocksize = blks_to_bytes(inode, 1);
 	struct decompress_io_ctx *dic = NULL;
+	struct extent_info_unaligned eiu;
+	bool extent_cache = false;
 	int i;
 	int ret = 0;
 
@@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 	if (f2fs_cluster_is_empty(cc))
 		goto out;
 
-	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
-	if (ret)
-		goto out;
+	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
+		extent_cache = true;
 
-	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
+	if (!extent_cache) {
+		set_new_dnode(&dn, inode, NULL, NULL, 0);
+		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
+		if (ret)
+			goto out;
+
+		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
+	}
 
 	for (i = 1; i < cc->cluster_size; i++) {
 		block_t blkaddr;
 
-		blkaddr = data_blkaddr(dn.inode, dn.node_page,
-						dn.ofs_in_node + i);
+		if (extent_cache)
+			blkaddr = eiu.ei.blk + i;
+		else
+			blkaddr = data_blkaddr(dn.inode, dn.node_page,
+							dn.ofs_in_node + i);
 
 		if (!__is_valid_data_blkaddr(blkaddr))
 			break;
@@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 			goto out_put_dnode;
 		}
 		cc->nr_cpages++;
+
+		if (extent_cache && i >= eiu.plen)
+			break;
 	}
 
 	/* nothing to decompress */
@@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 		block_t blkaddr;
 		struct bio_post_read_ctx *ctx;
 
-		blkaddr = data_blkaddr(dn.inode, dn.node_page,
+		if (extent_cache)
+			blkaddr = eiu.plen + i + 1;
+		else
+			blkaddr = data_blkaddr(dn.inode, dn.node_page,
 						dn.ofs_in_node + i + 1);
 
 		f2fs_wait_on_block_writeback(inode, blkaddr);
@@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 		*last_block_in_bio = blkaddr;
 	}
 
-	f2fs_put_dnode(&dn);
+	if (!extent_cache)
+		f2fs_put_dnode(&dn);
 
 	*bio_ret = bio;
 	return 0;
 
 out_put_dnode:
-	f2fs_put_dnode(&dn);
+	if (!extent_cache)
+		f2fs_put_dnode(&dn);
 out:
 	for (i = 0; i < cc->cluster_size; i++) {
 		if (cc->rpages[i]) {
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 3ebf976a682d..db9de95f90dc 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
 				struct extent_tree *et, struct extent_info *ei,
 				struct rb_node *parent, struct rb_node **p,
-				bool leftmost)
+				bool leftmost, bool unaligned)
 {
 	struct extent_node *en;
 
@@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
 	INIT_LIST_HEAD(&en->list);
 	en->et = et;
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (unaligned)
+		en->plen = ((struct extent_info_unaligned *)ei)->plen;
+#endif
+
 	rb_link_node(&en->rb_node, parent, p);
 	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
 	atomic_inc(&et->node_cnt);
@@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
 	struct rb_node **p = &et->root.rb_root.rb_node;
 	struct extent_node *en;
 
-	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
+	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
 	if (!en)
 		return NULL;
 
@@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 		stat_inc_rbtree_node_hit(sbi);
 
 	*ei = en->ei;
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+				!f2fs_sb_has_readonly(sbi)) {
+		struct extent_info_unaligned *eiu =
+				(struct extent_info_unaligned *)ei;
+
+		eiu->plen = en->plen;
+	}
+#endif
+
 	spin_lock(&sbi->extent_lock);
 	if (!list_empty(&en->list)) {
 		list_move_tail(&en->list, &sbi->extent_list);
@@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
 				struct extent_tree *et, struct extent_info *ei,
 				struct extent_node *prev_ex,
-				struct extent_node *next_ex)
+				struct extent_node *next_ex,
+				bool unaligned)
 {
 	struct extent_node *en = NULL;
 
-	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
+	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
 		prev_ex->ei.len += ei->len;
 		ei = &prev_ex->ei;
 		en = prev_ex;
 	}
 
-	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
+	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
 		next_ex->ei.fofs = ei->fofs;
 		next_ex->ei.blk = ei->blk;
 		next_ex->ei.len += ei->len;
@@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
 				struct extent_tree *et, struct extent_info *ei,
 				struct rb_node **insert_p,
 				struct rb_node *insert_parent,
-				bool leftmost)
+				bool leftmost, bool unaligned)
 {
 	struct rb_node **p;
 	struct rb_node *parent = NULL;
@@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
 	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
 						ei->fofs, &leftmost);
 do_insert:
-	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
+	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
 	if (!en)
 		return NULL;
 
@@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
 						end - dei.fofs + dei.blk,
 						org_end - end);
 				en1 = __insert_extent_tree(sbi, et, &ei,
-							NULL, NULL, true);
+						NULL, NULL, true, false);
 				next_en = en1;
 			} else {
 				en->ei.fofs = end;
@@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
 	if (blkaddr) {
 
 		set_extent_info(&ei, fofs, blkaddr, len);
-		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+		if (!__try_merge_extent_node(sbi, et, &ei,
+					prev_en, next_en, false))
 			__insert_extent_tree(sbi, et, &ei,
-					insert_p, insert_parent, leftmost);
+				insert_p, insert_parent, leftmost, false);
 
 		/* give up extent_cache, if split and small updates happen */
 		if (dei.len >= 1 &&
@@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
 		f2fs_mark_inode_dirty_sync(inode, true);
 }
 
+void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
+				pgoff_t fofs, block_t blkaddr, unsigned int llen,
+				unsigned int plen)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	struct extent_node *en = NULL;
+	struct extent_node *prev_en = NULL, *next_en = NULL;
+	struct extent_info_unaligned eiu;
+	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	bool leftmost = false;
+
+	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
+
+	write_lock(&et->lock);
+
+	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
+		write_unlock(&et->lock);
+		return;
+	}
+
+	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
+				(struct rb_entry *)et->cached_en, fofs,
+				(struct rb_entry **)&prev_en,
+				(struct rb_entry **)&next_en,
+				&insert_p, &insert_parent, false,
+				&leftmost);
+	f2fs_bug_on(sbi, en);
+
+	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
+	eiu.plen = plen;
+
+	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
+				prev_en, next_en, true))
+		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
+				insert_p, insert_parent, leftmost, true);
+
+	write_unlock(&et->lock);
+}
+
+
 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 {
 	struct extent_tree *et, *next;
@@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
 	return f2fs_lookup_extent_tree(inode, pgofs, ei);
 }
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
+					struct extent_info_unaligned *eiu)
+{
+	if (!f2fs_may_extent_tree(inode))
+		return false;
+
+	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
+}
+#endif
+
 void f2fs_update_extent_cache(struct dnode_of_data *dn)
 {
 	pgoff_t fofs;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 0fe239dd50f4..3a02642a26d4 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -578,11 +578,21 @@ struct extent_info {
 	u32 blk;			/* start block address of the extent */
 };
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+struct extent_info_unaligned {
+	struct extent_info ei;		/* extent info */
+	unsigned int plen;		/* physical extent length of compressed blocks */
+};
+#endif
+
 struct extent_node {
 	struct rb_node rb_node;		/* rb node located in rb-tree */
 	struct extent_info ei;		/* extent info */
 	struct list_head list;		/* node in global extent list of sbi */
 	struct extent_tree *et;		/* extent tree pointer */
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	unsigned int plen;		/* physical extent length of compressed blocks */
+#endif
 };
 
 struct extent_tree {
@@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
 }
 
 static inline bool __is_extent_mergeable(struct extent_info *back,
-						struct extent_info *front)
+				struct extent_info *front, bool unaligned)
 {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
+	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
+
+	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
+		return false;
+#endif
 	return (back->fofs + back->len == front->fofs &&
 			back->blk + back->len == front->blk);
 }
 
 static inline bool __is_back_mergeable(struct extent_info *cur,
-						struct extent_info *back)
+				struct extent_info *back, bool unaligned)
 {
-	return __is_extent_mergeable(back, cur);
+	return __is_extent_mergeable(back, cur, unaligned);
 }
 
 static inline bool __is_front_mergeable(struct extent_info *cur,
-						struct extent_info *front)
+				struct extent_info *front, bool unaligned)
 {
-	return __is_extent_mergeable(cur, front);
+	return __is_extent_mergeable(cur, front, unaligned);
 }
 
 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
@@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
 		bool force, bool *leftmost);
 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
 				struct rb_root_cached *root, bool check_key);
+void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
+				pgoff_t fofs, block_t blkaddr, unsigned int llen,
+				unsigned int plen);
 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
 void f2fs_drop_extent_tree(struct inode *inode);
@@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
 void f2fs_destroy_extent_tree(struct inode *inode);
 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
 			struct extent_info *ei);
+bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
+					struct extent_info_unaligned *eiu);
 void f2fs_update_extent_cache(struct dnode_of_data *dn);
 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
 			pgoff_t fofs, block_t blkaddr, unsigned int len);
@@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
 void f2fs_put_page_dic(struct page *page);
+int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
 int f2fs_init_compress_ctx(struct compress_ctx *cc);
 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index dd611efa8aa4..7be2b01caa2a 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 	dn->ofs_in_node = offset[level];
 	dn->node_page = npage[level];
 	dn->data_blkaddr = f2fs_data_blkaddr(dn);
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
+			!f2fs_sb_has_readonly(sbi)) {
+		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
+
+		if (blknum) {
+			block_t blkaddr = f2fs_data_blkaddr(dn);
+
+			if (blkaddr == COMPRESS_ADDR)
+				blkaddr = data_blkaddr(dn->inode, dn->node_page,
+							dn->ofs_in_node + 1);
+
+			f2fs_update_extent_tree_range_unaligned(dn->inode,
+					index, blkaddr,
+					F2FS_I(dn->inode)->i_cluster_size,
+					blknum);
+		}
+	}
+#endif
 	return 0;
 
 release_pages:
-- 
2.22.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-07-07  1:58 ` Chao Yu
  0 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-07-07  1:58 UTC (permalink / raw)
  To: jaegeuk; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

Compressed inode may suffer read performance issue due to it can not
use extent cache, so I propose to add this unaligned extent support
to improve it.

Currently, it only works in readonly format f2fs image.

Unaligned extent: in one compressed cluster, physical block number
will be less than logical block number, so we add an extra physical
block length in extent info in order to indicate such extent status.

The idea is if one whole cluster blocks are contiguous physically,
once its mapping info was readed at first time, we will cache an
unaligned (or aligned) extent info entry in extent cache, it expects
that the mapping info will be hitted when rereading cluster.

Merge policy:
- Aligned extents can be merged.
- Aligned extent and unaligned extent can not be merged.

Signed-off-by: Chao Yu <chao@kernel.org>
---

I just post this for comments, it passes compiling, w/o any test.

 fs/f2fs/compress.c     | 25 ++++++++++++
 fs/f2fs/data.c         | 38 +++++++++++++-----
 fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
 fs/f2fs/f2fs.h         | 33 +++++++++++++---
 fs/f2fs/node.c         | 20 ++++++++++
 5 files changed, 181 insertions(+), 25 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 455561826c7d..f072ac33eba5 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
 	f2fs_put_dic(dic);
 }
 
+/*
+ * check whether cluster blocks are contiguous, and add extent cache entry
+ * only if cluster blocks are logically and physically contiguous.
+ */
+int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
+{
+	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
+	int i = compressed ? 1 : 0;
+	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
+						dn->ofs_in_node + i);
+
+	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
+		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+						dn->ofs_in_node + i);
+
+		if (!__is_valid_data_blkaddr(blkaddr))
+			break;
+		if (first_blkaddr + i - 1 != blkaddr)
+			return 0;
+	}
+
+	return compressed ? i - 1 : i;
+}
+
+
 const struct address_space_operations f2fs_compress_aops = {
 	.releasepage = f2fs_release_page,
 	.invalidatepage = f2fs_invalidate_page,
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index d2cf48c5a2e4..9572d78da4d7 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 	sector_t last_block_in_file;
 	const unsigned blocksize = blks_to_bytes(inode, 1);
 	struct decompress_io_ctx *dic = NULL;
+	struct extent_info_unaligned eiu;
+	bool extent_cache = false;
 	int i;
 	int ret = 0;
 
@@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 	if (f2fs_cluster_is_empty(cc))
 		goto out;
 
-	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
-	if (ret)
-		goto out;
+	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
+		extent_cache = true;
 
-	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
+	if (!extent_cache) {
+		set_new_dnode(&dn, inode, NULL, NULL, 0);
+		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
+		if (ret)
+			goto out;
+
+		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
+	}
 
 	for (i = 1; i < cc->cluster_size; i++) {
 		block_t blkaddr;
 
-		blkaddr = data_blkaddr(dn.inode, dn.node_page,
-						dn.ofs_in_node + i);
+		if (extent_cache)
+			blkaddr = eiu.ei.blk + i;
+		else
+			blkaddr = data_blkaddr(dn.inode, dn.node_page,
+							dn.ofs_in_node + i);
 
 		if (!__is_valid_data_blkaddr(blkaddr))
 			break;
@@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 			goto out_put_dnode;
 		}
 		cc->nr_cpages++;
+
+		if (extent_cache && i >= eiu.plen)
+			break;
 	}
 
 	/* nothing to decompress */
@@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 		block_t blkaddr;
 		struct bio_post_read_ctx *ctx;
 
-		blkaddr = data_blkaddr(dn.inode, dn.node_page,
+		if (extent_cache)
+			blkaddr = eiu.plen + i + 1;
+		else
+			blkaddr = data_blkaddr(dn.inode, dn.node_page,
 						dn.ofs_in_node + i + 1);
 
 		f2fs_wait_on_block_writeback(inode, blkaddr);
@@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 		*last_block_in_bio = blkaddr;
 	}
 
-	f2fs_put_dnode(&dn);
+	if (!extent_cache)
+		f2fs_put_dnode(&dn);
 
 	*bio_ret = bio;
 	return 0;
 
 out_put_dnode:
-	f2fs_put_dnode(&dn);
+	if (!extent_cache)
+		f2fs_put_dnode(&dn);
 out:
 	for (i = 0; i < cc->cluster_size; i++) {
 		if (cc->rpages[i]) {
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 3ebf976a682d..db9de95f90dc 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
 				struct extent_tree *et, struct extent_info *ei,
 				struct rb_node *parent, struct rb_node **p,
-				bool leftmost)
+				bool leftmost, bool unaligned)
 {
 	struct extent_node *en;
 
@@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
 	INIT_LIST_HEAD(&en->list);
 	en->et = et;
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (unaligned)
+		en->plen = ((struct extent_info_unaligned *)ei)->plen;
+#endif
+
 	rb_link_node(&en->rb_node, parent, p);
 	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
 	atomic_inc(&et->node_cnt);
@@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
 	struct rb_node **p = &et->root.rb_root.rb_node;
 	struct extent_node *en;
 
-	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
+	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
 	if (!en)
 		return NULL;
 
@@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 		stat_inc_rbtree_node_hit(sbi);
 
 	*ei = en->ei;
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+				!f2fs_sb_has_readonly(sbi)) {
+		struct extent_info_unaligned *eiu =
+				(struct extent_info_unaligned *)ei;
+
+		eiu->plen = en->plen;
+	}
+#endif
+
 	spin_lock(&sbi->extent_lock);
 	if (!list_empty(&en->list)) {
 		list_move_tail(&en->list, &sbi->extent_list);
@@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
 				struct extent_tree *et, struct extent_info *ei,
 				struct extent_node *prev_ex,
-				struct extent_node *next_ex)
+				struct extent_node *next_ex,
+				bool unaligned)
 {
 	struct extent_node *en = NULL;
 
-	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
+	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
 		prev_ex->ei.len += ei->len;
 		ei = &prev_ex->ei;
 		en = prev_ex;
 	}
 
-	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
+	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
 		next_ex->ei.fofs = ei->fofs;
 		next_ex->ei.blk = ei->blk;
 		next_ex->ei.len += ei->len;
@@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
 				struct extent_tree *et, struct extent_info *ei,
 				struct rb_node **insert_p,
 				struct rb_node *insert_parent,
-				bool leftmost)
+				bool leftmost, bool unaligned)
 {
 	struct rb_node **p;
 	struct rb_node *parent = NULL;
@@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
 	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
 						ei->fofs, &leftmost);
 do_insert:
-	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
+	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
 	if (!en)
 		return NULL;
 
@@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
 						end - dei.fofs + dei.blk,
 						org_end - end);
 				en1 = __insert_extent_tree(sbi, et, &ei,
-							NULL, NULL, true);
+						NULL, NULL, true, false);
 				next_en = en1;
 			} else {
 				en->ei.fofs = end;
@@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
 	if (blkaddr) {
 
 		set_extent_info(&ei, fofs, blkaddr, len);
-		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+		if (!__try_merge_extent_node(sbi, et, &ei,
+					prev_en, next_en, false))
 			__insert_extent_tree(sbi, et, &ei,
-					insert_p, insert_parent, leftmost);
+				insert_p, insert_parent, leftmost, false);
 
 		/* give up extent_cache, if split and small updates happen */
 		if (dei.len >= 1 &&
@@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
 		f2fs_mark_inode_dirty_sync(inode, true);
 }
 
+void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
+				pgoff_t fofs, block_t blkaddr, unsigned int llen,
+				unsigned int plen)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	struct extent_node *en = NULL;
+	struct extent_node *prev_en = NULL, *next_en = NULL;
+	struct extent_info_unaligned eiu;
+	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	bool leftmost = false;
+
+	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
+
+	write_lock(&et->lock);
+
+	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
+		write_unlock(&et->lock);
+		return;
+	}
+
+	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
+				(struct rb_entry *)et->cached_en, fofs,
+				(struct rb_entry **)&prev_en,
+				(struct rb_entry **)&next_en,
+				&insert_p, &insert_parent, false,
+				&leftmost);
+	f2fs_bug_on(sbi, en);
+
+	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
+	eiu.plen = plen;
+
+	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
+				prev_en, next_en, true))
+		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
+				insert_p, insert_parent, leftmost, true);
+
+	write_unlock(&et->lock);
+}
+
+
 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 {
 	struct extent_tree *et, *next;
@@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
 	return f2fs_lookup_extent_tree(inode, pgofs, ei);
 }
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
+					struct extent_info_unaligned *eiu)
+{
+	if (!f2fs_may_extent_tree(inode))
+		return false;
+
+	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
+}
+#endif
+
 void f2fs_update_extent_cache(struct dnode_of_data *dn)
 {
 	pgoff_t fofs;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 0fe239dd50f4..3a02642a26d4 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -578,11 +578,21 @@ struct extent_info {
 	u32 blk;			/* start block address of the extent */
 };
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+struct extent_info_unaligned {
+	struct extent_info ei;		/* extent info */
+	unsigned int plen;		/* physical extent length of compressed blocks */
+};
+#endif
+
 struct extent_node {
 	struct rb_node rb_node;		/* rb node located in rb-tree */
 	struct extent_info ei;		/* extent info */
 	struct list_head list;		/* node in global extent list of sbi */
 	struct extent_tree *et;		/* extent tree pointer */
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	unsigned int plen;		/* physical extent length of compressed blocks */
+#endif
 };
 
 struct extent_tree {
@@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
 }
 
 static inline bool __is_extent_mergeable(struct extent_info *back,
-						struct extent_info *front)
+				struct extent_info *front, bool unaligned)
 {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
+	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
+
+	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
+		return false;
+#endif
 	return (back->fofs + back->len == front->fofs &&
 			back->blk + back->len == front->blk);
 }
 
 static inline bool __is_back_mergeable(struct extent_info *cur,
-						struct extent_info *back)
+				struct extent_info *back, bool unaligned)
 {
-	return __is_extent_mergeable(back, cur);
+	return __is_extent_mergeable(back, cur, unaligned);
 }
 
 static inline bool __is_front_mergeable(struct extent_info *cur,
-						struct extent_info *front)
+				struct extent_info *front, bool unaligned)
 {
-	return __is_extent_mergeable(cur, front);
+	return __is_extent_mergeable(cur, front, unaligned);
 }
 
 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
@@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
 		bool force, bool *leftmost);
 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
 				struct rb_root_cached *root, bool check_key);
+void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
+				pgoff_t fofs, block_t blkaddr, unsigned int llen,
+				unsigned int plen);
 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
 void f2fs_drop_extent_tree(struct inode *inode);
@@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
 void f2fs_destroy_extent_tree(struct inode *inode);
 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
 			struct extent_info *ei);
+bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
+					struct extent_info_unaligned *eiu);
 void f2fs_update_extent_cache(struct dnode_of_data *dn);
 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
 			pgoff_t fofs, block_t blkaddr, unsigned int len);
@@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
 void f2fs_put_page_dic(struct page *page);
+int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
 int f2fs_init_compress_ctx(struct compress_ctx *cc);
 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index dd611efa8aa4..7be2b01caa2a 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 	dn->ofs_in_node = offset[level];
 	dn->node_page = npage[level];
 	dn->data_blkaddr = f2fs_data_blkaddr(dn);
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
+			!f2fs_sb_has_readonly(sbi)) {
+		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
+
+		if (blknum) {
+			block_t blkaddr = f2fs_data_blkaddr(dn);
+
+			if (blkaddr == COMPRESS_ADDR)
+				blkaddr = data_blkaddr(dn->inode, dn->node_page,
+							dn->ofs_in_node + 1);
+
+			f2fs_update_extent_tree_range_unaligned(dn->inode,
+					index, blkaddr,
+					F2FS_I(dn->inode)->i_cluster_size,
+					blknum);
+		}
+	}
+#endif
 	return 0;
 
 release_pages:
-- 
2.22.1



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-07-07  1:58 ` [f2fs-dev] " Chao Yu
  (?)
@ 2021-07-07  3:46 ` kernel test robot
  -1 siblings, 0 replies; 25+ messages in thread
From: kernel test robot @ 2021-07-07  3:46 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 4177 bytes --]

Hi Chao,

[FYI, it's a private test report for your RFC patch.]
[auto build test ERROR on f2fs/dev-test]
[also build test ERROR on next-20210706]
[cannot apply to v5.13]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Chao-Yu/f2fs-extent-cache-support-unaligned-extent/20210707-095906
base:   https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git dev-test
config: powerpc64-randconfig-m031-20210706 (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/50c0cf63d3708f88a8bb464377b3ac83149be510
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Chao-Yu/f2fs-extent-cache-support-unaligned-extent/20210707-095906
        git checkout 50c0cf63d3708f88a8bb464377b3ac83149be510
        # save the attached .config to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross O=build_dir ARCH=powerpc SHELL=/bin/bash fs/

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All error/warnings (new ones prefixed by >>):

   In file included from fs/f2fs/dir.c:13:
>> fs/f2fs/f2fs.h:3998:13: warning: 'struct extent_info_unaligned' declared inside parameter list will not be visible outside of this definition or declaration
    3998 |      struct extent_info_unaligned *eiu);
         |             ^~~~~~~~~~~~~~~~~~~~~
--
   In file included from fs/f2fs/extent_cache.c:14:
>> fs/f2fs/f2fs.h:3998:13: warning: 'struct extent_info_unaligned' declared inside parameter list will not be visible outside of this definition or declaration
    3998 |      struct extent_info_unaligned *eiu);
         |             ^~~~~~~~~~~~~~~~~~~~~
   fs/f2fs/extent_cache.c: In function 'f2fs_update_extent_tree_range_unaligned':
>> fs/f2fs/extent_cache.c:690:31: error: storage size of 'eiu' isn't known
     690 |  struct extent_info_unaligned eiu;
         |                               ^~~
   fs/f2fs/extent_cache.c:690:31: warning: unused variable 'eiu' [-Wunused-variable]


vim +690 fs/f2fs/extent_cache.c

   681	
   682	void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
   683					pgoff_t fofs, block_t blkaddr, unsigned int llen,
   684					unsigned int plen)
   685	{
   686		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
   687		struct extent_tree *et = F2FS_I(inode)->extent_tree;
   688		struct extent_node *en = NULL;
   689		struct extent_node *prev_en = NULL, *next_en = NULL;
 > 690		struct extent_info_unaligned eiu;
   691		struct rb_node **insert_p = NULL, *insert_parent = NULL;
   692		bool leftmost = false;
   693	
   694		trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
   695	
   696		write_lock(&et->lock);
   697	
   698		if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
   699			write_unlock(&et->lock);
   700			return;
   701		}
   702	
   703		en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
   704					(struct rb_entry *)et->cached_en, fofs,
   705					(struct rb_entry **)&prev_en,
   706					(struct rb_entry **)&next_en,
   707					&insert_p, &insert_parent, false,
   708					&leftmost);
   709		f2fs_bug_on(sbi, en);
   710	
   711		set_extent_info(&eiu.ei, fofs, blkaddr, llen);
   712		eiu.plen = plen;
   713	
   714		if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
   715					prev_en, next_en, true))
   716			__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
   717					insert_p, insert_parent, leftmost, true);
   718	
   719		write_unlock(&et->lock);
   720	}
   721	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 28450 bytes --]

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-07-07  1:58 ` [f2fs-dev] " Chao Yu
@ 2021-07-14  2:27   ` Chao Yu
  -1 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-07-14  2:27 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 2021/7/7 9:58, Chao Yu wrote:
> Compressed inode may suffer read performance issue due to it can not
> use extent cache, so I propose to add this unaligned extent support
> to improve it.
> 
> Currently, it only works in readonly format f2fs image.
> 
> Unaligned extent: in one compressed cluster, physical block number
> will be less than logical block number, so we add an extra physical
> block length in extent info in order to indicate such extent status.
> 
> The idea is if one whole cluster blocks are contiguous physically,
> once its mapping info was readed at first time, we will cache an
> unaligned (or aligned) extent info entry in extent cache, it expects
> that the mapping info will be hitted when rereading cluster.
> 
> Merge policy:
> - Aligned extents can be merged.
> - Aligned extent and unaligned extent can not be merged.
> 
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
> 
> I just post this for comments, it passes compiling, w/o any test.
> 
>   fs/f2fs/compress.c     | 25 ++++++++++++
>   fs/f2fs/data.c         | 38 +++++++++++++-----
>   fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>   fs/f2fs/f2fs.h         | 33 +++++++++++++---
>   fs/f2fs/node.c         | 20 ++++++++++
>   5 files changed, 181 insertions(+), 25 deletions(-)

Jaegeuk, any thoughts about this idea?

Thanks,

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-07-14  2:27   ` Chao Yu
  0 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-07-14  2:27 UTC (permalink / raw)
  To: jaegeuk; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 2021/7/7 9:58, Chao Yu wrote:
> Compressed inode may suffer read performance issue due to it can not
> use extent cache, so I propose to add this unaligned extent support
> to improve it.
> 
> Currently, it only works in readonly format f2fs image.
> 
> Unaligned extent: in one compressed cluster, physical block number
> will be less than logical block number, so we add an extra physical
> block length in extent info in order to indicate such extent status.
> 
> The idea is if one whole cluster blocks are contiguous physically,
> once its mapping info was readed at first time, we will cache an
> unaligned (or aligned) extent info entry in extent cache, it expects
> that the mapping info will be hitted when rereading cluster.
> 
> Merge policy:
> - Aligned extents can be merged.
> - Aligned extent and unaligned extent can not be merged.
> 
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
> 
> I just post this for comments, it passes compiling, w/o any test.
> 
>   fs/f2fs/compress.c     | 25 ++++++++++++
>   fs/f2fs/data.c         | 38 +++++++++++++-----
>   fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>   fs/f2fs/f2fs.h         | 33 +++++++++++++---
>   fs/f2fs/node.c         | 20 ++++++++++
>   5 files changed, 181 insertions(+), 25 deletions(-)

Jaegeuk, any thoughts about this idea?

Thanks,


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-07-07  1:58 ` [f2fs-dev] " Chao Yu
@ 2021-07-19 18:36   ` Jaegeuk Kim
  -1 siblings, 0 replies; 25+ messages in thread
From: Jaegeuk Kim @ 2021-07-19 18:36 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 07/07, Chao Yu wrote:
> Compressed inode may suffer read performance issue due to it can not
> use extent cache, so I propose to add this unaligned extent support
> to improve it.
> 
> Currently, it only works in readonly format f2fs image.
> 
> Unaligned extent: in one compressed cluster, physical block number
> will be less than logical block number, so we add an extra physical
> block length in extent info in order to indicate such extent status.
> 
> The idea is if one whole cluster blocks are contiguous physically,
> once its mapping info was readed at first time, we will cache an
> unaligned (or aligned) extent info entry in extent cache, it expects
> that the mapping info will be hitted when rereading cluster.

How about just modifying to handle COMPRESS_ADDR when modifying the extent_cache
like RO case?

> 
> Merge policy:
> - Aligned extents can be merged.
> - Aligned extent and unaligned extent can not be merged.
> 
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
> 
> I just post this for comments, it passes compiling, w/o any test.
> 
>  fs/f2fs/compress.c     | 25 ++++++++++++
>  fs/f2fs/data.c         | 38 +++++++++++++-----
>  fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>  fs/f2fs/f2fs.h         | 33 +++++++++++++---
>  fs/f2fs/node.c         | 20 ++++++++++
>  5 files changed, 181 insertions(+), 25 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 455561826c7d..f072ac33eba5 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
>  	f2fs_put_dic(dic);
>  }
>  
> +/*
> + * check whether cluster blocks are contiguous, and add extent cache entry
> + * only if cluster blocks are logically and physically contiguous.
> + */
> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
> +{
> +	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
> +	int i = compressed ? 1 : 0;
> +	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +						dn->ofs_in_node + i);
> +
> +	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
> +		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +						dn->ofs_in_node + i);
> +
> +		if (!__is_valid_data_blkaddr(blkaddr))
> +			break;
> +		if (first_blkaddr + i - 1 != blkaddr)
> +			return 0;
> +	}
> +
> +	return compressed ? i - 1 : i;
> +}
> +
> +
>  const struct address_space_operations f2fs_compress_aops = {
>  	.releasepage = f2fs_release_page,
>  	.invalidatepage = f2fs_invalidate_page,
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index d2cf48c5a2e4..9572d78da4d7 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  	sector_t last_block_in_file;
>  	const unsigned blocksize = blks_to_bytes(inode, 1);
>  	struct decompress_io_ctx *dic = NULL;
> +	struct extent_info_unaligned eiu;
> +	bool extent_cache = false;
>  	int i;
>  	int ret = 0;
>  
> @@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  	if (f2fs_cluster_is_empty(cc))
>  		goto out;
>  
> -	set_new_dnode(&dn, inode, NULL, NULL, 0);
> -	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
> -	if (ret)
> -		goto out;
> +	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
> +		extent_cache = true;
>  
> -	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
> +	if (!extent_cache) {
> +		set_new_dnode(&dn, inode, NULL, NULL, 0);
> +		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
> +		if (ret)
> +			goto out;
> +
> +		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
> +	}
>  
>  	for (i = 1; i < cc->cluster_size; i++) {
>  		block_t blkaddr;
>  
> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> -						dn.ofs_in_node + i);
> +		if (extent_cache)
> +			blkaddr = eiu.ei.blk + i;
> +		else
> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
> +							dn.ofs_in_node + i);
>  
>  		if (!__is_valid_data_blkaddr(blkaddr))
>  			break;
> @@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  			goto out_put_dnode;
>  		}
>  		cc->nr_cpages++;
> +
> +		if (extent_cache && i >= eiu.plen)
> +			break;
>  	}
>  
>  	/* nothing to decompress */
> @@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  		block_t blkaddr;
>  		struct bio_post_read_ctx *ctx;
>  
> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> +		if (extent_cache)
> +			blkaddr = eiu.plen + i + 1;
> +		else
> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>  						dn.ofs_in_node + i + 1);
>  
>  		f2fs_wait_on_block_writeback(inode, blkaddr);
> @@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  		*last_block_in_bio = blkaddr;
>  	}
>  
> -	f2fs_put_dnode(&dn);
> +	if (!extent_cache)
> +		f2fs_put_dnode(&dn);
>  
>  	*bio_ret = bio;
>  	return 0;
>  
>  out_put_dnode:
> -	f2fs_put_dnode(&dn);
> +	if (!extent_cache)
> +		f2fs_put_dnode(&dn);
>  out:
>  	for (i = 0; i < cc->cluster_size; i++) {
>  		if (cc->rpages[i]) {
> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
> index 3ebf976a682d..db9de95f90dc 100644
> --- a/fs/f2fs/extent_cache.c
> +++ b/fs/f2fs/extent_cache.c
> @@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
>  static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct rb_node *parent, struct rb_node **p,
> -				bool leftmost)
> +				bool leftmost, bool unaligned)
>  {
>  	struct extent_node *en;
>  
> @@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>  	INIT_LIST_HEAD(&en->list);
>  	en->et = et;
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (unaligned)
> +		en->plen = ((struct extent_info_unaligned *)ei)->plen;
> +#endif
> +
>  	rb_link_node(&en->rb_node, parent, p);
>  	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
>  	atomic_inc(&et->node_cnt);
> @@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>  	struct rb_node **p = &et->root.rb_root.rb_node;
>  	struct extent_node *en;
>  
> -	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
> +	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
>  	if (!en)
>  		return NULL;
>  
> @@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  		stat_inc_rbtree_node_hit(sbi);
>  
>  	*ei = en->ei;
> +
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
> +				!f2fs_sb_has_readonly(sbi)) {
> +		struct extent_info_unaligned *eiu =
> +				(struct extent_info_unaligned *)ei;
> +
> +		eiu->plen = en->plen;
> +	}
> +#endif
> +
>  	spin_lock(&sbi->extent_lock);
>  	if (!list_empty(&en->list)) {
>  		list_move_tail(&en->list, &sbi->extent_list);
> @@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct extent_node *prev_ex,
> -				struct extent_node *next_ex)
> +				struct extent_node *next_ex,
> +				bool unaligned)
>  {
>  	struct extent_node *en = NULL;
>  
> -	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
> +	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
>  		prev_ex->ei.len += ei->len;
>  		ei = &prev_ex->ei;
>  		en = prev_ex;
>  	}
>  
> -	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
> +	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
>  		next_ex->ei.fofs = ei->fofs;
>  		next_ex->ei.blk = ei->blk;
>  		next_ex->ei.len += ei->len;
> @@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct rb_node **insert_p,
>  				struct rb_node *insert_parent,
> -				bool leftmost)
> +				bool leftmost, bool unaligned)
>  {
>  	struct rb_node **p;
>  	struct rb_node *parent = NULL;
> @@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>  	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
>  						ei->fofs, &leftmost);
>  do_insert:
> -	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
> +	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
>  	if (!en)
>  		return NULL;
>  
> @@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  						end - dei.fofs + dei.blk,
>  						org_end - end);
>  				en1 = __insert_extent_tree(sbi, et, &ei,
> -							NULL, NULL, true);
> +						NULL, NULL, true, false);
>  				next_en = en1;
>  			} else {
>  				en->ei.fofs = end;
> @@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  	if (blkaddr) {
>  
>  		set_extent_info(&ei, fofs, blkaddr, len);
> -		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
> +		if (!__try_merge_extent_node(sbi, et, &ei,
> +					prev_en, next_en, false))
>  			__insert_extent_tree(sbi, et, &ei,
> -					insert_p, insert_parent, leftmost);
> +				insert_p, insert_parent, leftmost, false);
>  
>  		/* give up extent_cache, if split and small updates happen */
>  		if (dei.len >= 1 &&
> @@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  		f2fs_mark_inode_dirty_sync(inode, true);
>  }
>  
> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
> +				unsigned int plen)
> +{
> +	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> +	struct extent_tree *et = F2FS_I(inode)->extent_tree;
> +	struct extent_node *en = NULL;
> +	struct extent_node *prev_en = NULL, *next_en = NULL;
> +	struct extent_info_unaligned eiu;
> +	struct rb_node **insert_p = NULL, *insert_parent = NULL;
> +	bool leftmost = false;
> +
> +	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
> +
> +	write_lock(&et->lock);
> +
> +	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
> +		write_unlock(&et->lock);
> +		return;
> +	}
> +
> +	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
> +				(struct rb_entry *)et->cached_en, fofs,
> +				(struct rb_entry **)&prev_en,
> +				(struct rb_entry **)&next_en,
> +				&insert_p, &insert_parent, false,
> +				&leftmost);
> +	f2fs_bug_on(sbi, en);
> +
> +	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
> +	eiu.plen = plen;
> +
> +	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
> +				prev_en, next_en, true))
> +		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
> +				insert_p, insert_parent, leftmost, true);
> +
> +	write_unlock(&et->lock);
> +}
> +
> +
>  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
>  {
>  	struct extent_tree *et, *next;
> @@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>  	return f2fs_lookup_extent_tree(inode, pgofs, ei);
>  }
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
> +					struct extent_info_unaligned *eiu)
> +{
> +	if (!f2fs_may_extent_tree(inode))
> +		return false;
> +
> +	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
> +}
> +#endif
> +
>  void f2fs_update_extent_cache(struct dnode_of_data *dn)
>  {
>  	pgoff_t fofs;
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 0fe239dd50f4..3a02642a26d4 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -578,11 +578,21 @@ struct extent_info {
>  	u32 blk;			/* start block address of the extent */
>  };
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +struct extent_info_unaligned {
> +	struct extent_info ei;		/* extent info */
> +	unsigned int plen;		/* physical extent length of compressed blocks */
> +};
> +#endif
> +
>  struct extent_node {
>  	struct rb_node rb_node;		/* rb node located in rb-tree */
>  	struct extent_info ei;		/* extent info */
>  	struct list_head list;		/* node in global extent list of sbi */
>  	struct extent_tree *et;		/* extent tree pointer */
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	unsigned int plen;		/* physical extent length of compressed blocks */
> +#endif
>  };
>  
>  struct extent_tree {
> @@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
>  }
>  
>  static inline bool __is_extent_mergeable(struct extent_info *back,
> -						struct extent_info *front)
> +				struct extent_info *front, bool unaligned)
>  {
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
> +	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
> +
> +	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
> +		return false;
> +#endif
>  	return (back->fofs + back->len == front->fofs &&
>  			back->blk + back->len == front->blk);
>  }
>  
>  static inline bool __is_back_mergeable(struct extent_info *cur,
> -						struct extent_info *back)
> +				struct extent_info *back, bool unaligned)
>  {
> -	return __is_extent_mergeable(back, cur);
> +	return __is_extent_mergeable(back, cur, unaligned);
>  }
>  
>  static inline bool __is_front_mergeable(struct extent_info *cur,
> -						struct extent_info *front)
> +				struct extent_info *front, bool unaligned)
>  {
> -	return __is_extent_mergeable(cur, front);
> +	return __is_extent_mergeable(cur, front, unaligned);
>  }
>  
>  extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
> @@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
>  		bool force, bool *leftmost);
>  bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
>  				struct rb_root_cached *root, bool check_key);
> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
> +				unsigned int plen);
>  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
>  void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
>  void f2fs_drop_extent_tree(struct inode *inode);
> @@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
>  void f2fs_destroy_extent_tree(struct inode *inode);
>  bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>  			struct extent_info *ei);
> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
> +					struct extent_info_unaligned *eiu);
>  void f2fs_update_extent_cache(struct dnode_of_data *dn);
>  void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
>  			pgoff_t fofs, block_t blkaddr, unsigned int len);
> @@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
>  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
>  void f2fs_put_page_dic(struct page *page);
> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
>  int f2fs_init_compress_ctx(struct compress_ctx *cc);
>  void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
>  void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index dd611efa8aa4..7be2b01caa2a 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
>  	dn->ofs_in_node = offset[level];
>  	dn->node_page = npage[level];
>  	dn->data_blkaddr = f2fs_data_blkaddr(dn);
> +
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
> +			!f2fs_sb_has_readonly(sbi)) {
> +		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
> +
> +		if (blknum) {
> +			block_t blkaddr = f2fs_data_blkaddr(dn);
> +
> +			if (blkaddr == COMPRESS_ADDR)
> +				blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +							dn->ofs_in_node + 1);
> +
> +			f2fs_update_extent_tree_range_unaligned(dn->inode,
> +					index, blkaddr,
> +					F2FS_I(dn->inode)->i_cluster_size,
> +					blknum);
> +		}
> +	}
> +#endif
>  	return 0;
>  
>  release_pages:
> -- 
> 2.22.1

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-07-19 18:36   ` Jaegeuk Kim
  0 siblings, 0 replies; 25+ messages in thread
From: Jaegeuk Kim @ 2021-07-19 18:36 UTC (permalink / raw)
  To: Chao Yu; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 07/07, Chao Yu wrote:
> Compressed inode may suffer read performance issue due to it can not
> use extent cache, so I propose to add this unaligned extent support
> to improve it.
> 
> Currently, it only works in readonly format f2fs image.
> 
> Unaligned extent: in one compressed cluster, physical block number
> will be less than logical block number, so we add an extra physical
> block length in extent info in order to indicate such extent status.
> 
> The idea is if one whole cluster blocks are contiguous physically,
> once its mapping info was readed at first time, we will cache an
> unaligned (or aligned) extent info entry in extent cache, it expects
> that the mapping info will be hitted when rereading cluster.

How about just modifying to handle COMPRESS_ADDR when modifying the extent_cache
like RO case?

> 
> Merge policy:
> - Aligned extents can be merged.
> - Aligned extent and unaligned extent can not be merged.
> 
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
> 
> I just post this for comments, it passes compiling, w/o any test.
> 
>  fs/f2fs/compress.c     | 25 ++++++++++++
>  fs/f2fs/data.c         | 38 +++++++++++++-----
>  fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>  fs/f2fs/f2fs.h         | 33 +++++++++++++---
>  fs/f2fs/node.c         | 20 ++++++++++
>  5 files changed, 181 insertions(+), 25 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 455561826c7d..f072ac33eba5 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
>  	f2fs_put_dic(dic);
>  }
>  
> +/*
> + * check whether cluster blocks are contiguous, and add extent cache entry
> + * only if cluster blocks are logically and physically contiguous.
> + */
> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
> +{
> +	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
> +	int i = compressed ? 1 : 0;
> +	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +						dn->ofs_in_node + i);
> +
> +	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
> +		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +						dn->ofs_in_node + i);
> +
> +		if (!__is_valid_data_blkaddr(blkaddr))
> +			break;
> +		if (first_blkaddr + i - 1 != blkaddr)
> +			return 0;
> +	}
> +
> +	return compressed ? i - 1 : i;
> +}
> +
> +
>  const struct address_space_operations f2fs_compress_aops = {
>  	.releasepage = f2fs_release_page,
>  	.invalidatepage = f2fs_invalidate_page,
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index d2cf48c5a2e4..9572d78da4d7 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  	sector_t last_block_in_file;
>  	const unsigned blocksize = blks_to_bytes(inode, 1);
>  	struct decompress_io_ctx *dic = NULL;
> +	struct extent_info_unaligned eiu;
> +	bool extent_cache = false;
>  	int i;
>  	int ret = 0;
>  
> @@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  	if (f2fs_cluster_is_empty(cc))
>  		goto out;
>  
> -	set_new_dnode(&dn, inode, NULL, NULL, 0);
> -	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
> -	if (ret)
> -		goto out;
> +	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
> +		extent_cache = true;
>  
> -	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
> +	if (!extent_cache) {
> +		set_new_dnode(&dn, inode, NULL, NULL, 0);
> +		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
> +		if (ret)
> +			goto out;
> +
> +		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
> +	}
>  
>  	for (i = 1; i < cc->cluster_size; i++) {
>  		block_t blkaddr;
>  
> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> -						dn.ofs_in_node + i);
> +		if (extent_cache)
> +			blkaddr = eiu.ei.blk + i;
> +		else
> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
> +							dn.ofs_in_node + i);
>  
>  		if (!__is_valid_data_blkaddr(blkaddr))
>  			break;
> @@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  			goto out_put_dnode;
>  		}
>  		cc->nr_cpages++;
> +
> +		if (extent_cache && i >= eiu.plen)
> +			break;
>  	}
>  
>  	/* nothing to decompress */
> @@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  		block_t blkaddr;
>  		struct bio_post_read_ctx *ctx;
>  
> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> +		if (extent_cache)
> +			blkaddr = eiu.plen + i + 1;
> +		else
> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>  						dn.ofs_in_node + i + 1);
>  
>  		f2fs_wait_on_block_writeback(inode, blkaddr);
> @@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  		*last_block_in_bio = blkaddr;
>  	}
>  
> -	f2fs_put_dnode(&dn);
> +	if (!extent_cache)
> +		f2fs_put_dnode(&dn);
>  
>  	*bio_ret = bio;
>  	return 0;
>  
>  out_put_dnode:
> -	f2fs_put_dnode(&dn);
> +	if (!extent_cache)
> +		f2fs_put_dnode(&dn);
>  out:
>  	for (i = 0; i < cc->cluster_size; i++) {
>  		if (cc->rpages[i]) {
> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
> index 3ebf976a682d..db9de95f90dc 100644
> --- a/fs/f2fs/extent_cache.c
> +++ b/fs/f2fs/extent_cache.c
> @@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
>  static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct rb_node *parent, struct rb_node **p,
> -				bool leftmost)
> +				bool leftmost, bool unaligned)
>  {
>  	struct extent_node *en;
>  
> @@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>  	INIT_LIST_HEAD(&en->list);
>  	en->et = et;
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (unaligned)
> +		en->plen = ((struct extent_info_unaligned *)ei)->plen;
> +#endif
> +
>  	rb_link_node(&en->rb_node, parent, p);
>  	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
>  	atomic_inc(&et->node_cnt);
> @@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>  	struct rb_node **p = &et->root.rb_root.rb_node;
>  	struct extent_node *en;
>  
> -	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
> +	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
>  	if (!en)
>  		return NULL;
>  
> @@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  		stat_inc_rbtree_node_hit(sbi);
>  
>  	*ei = en->ei;
> +
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
> +				!f2fs_sb_has_readonly(sbi)) {
> +		struct extent_info_unaligned *eiu =
> +				(struct extent_info_unaligned *)ei;
> +
> +		eiu->plen = en->plen;
> +	}
> +#endif
> +
>  	spin_lock(&sbi->extent_lock);
>  	if (!list_empty(&en->list)) {
>  		list_move_tail(&en->list, &sbi->extent_list);
> @@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct extent_node *prev_ex,
> -				struct extent_node *next_ex)
> +				struct extent_node *next_ex,
> +				bool unaligned)
>  {
>  	struct extent_node *en = NULL;
>  
> -	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
> +	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
>  		prev_ex->ei.len += ei->len;
>  		ei = &prev_ex->ei;
>  		en = prev_ex;
>  	}
>  
> -	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
> +	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
>  		next_ex->ei.fofs = ei->fofs;
>  		next_ex->ei.blk = ei->blk;
>  		next_ex->ei.len += ei->len;
> @@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct rb_node **insert_p,
>  				struct rb_node *insert_parent,
> -				bool leftmost)
> +				bool leftmost, bool unaligned)
>  {
>  	struct rb_node **p;
>  	struct rb_node *parent = NULL;
> @@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>  	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
>  						ei->fofs, &leftmost);
>  do_insert:
> -	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
> +	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
>  	if (!en)
>  		return NULL;
>  
> @@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  						end - dei.fofs + dei.blk,
>  						org_end - end);
>  				en1 = __insert_extent_tree(sbi, et, &ei,
> -							NULL, NULL, true);
> +						NULL, NULL, true, false);
>  				next_en = en1;
>  			} else {
>  				en->ei.fofs = end;
> @@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  	if (blkaddr) {
>  
>  		set_extent_info(&ei, fofs, blkaddr, len);
> -		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
> +		if (!__try_merge_extent_node(sbi, et, &ei,
> +					prev_en, next_en, false))
>  			__insert_extent_tree(sbi, et, &ei,
> -					insert_p, insert_parent, leftmost);
> +				insert_p, insert_parent, leftmost, false);
>  
>  		/* give up extent_cache, if split and small updates happen */
>  		if (dei.len >= 1 &&
> @@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  		f2fs_mark_inode_dirty_sync(inode, true);
>  }
>  
> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
> +				unsigned int plen)
> +{
> +	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> +	struct extent_tree *et = F2FS_I(inode)->extent_tree;
> +	struct extent_node *en = NULL;
> +	struct extent_node *prev_en = NULL, *next_en = NULL;
> +	struct extent_info_unaligned eiu;
> +	struct rb_node **insert_p = NULL, *insert_parent = NULL;
> +	bool leftmost = false;
> +
> +	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
> +
> +	write_lock(&et->lock);
> +
> +	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
> +		write_unlock(&et->lock);
> +		return;
> +	}
> +
> +	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
> +				(struct rb_entry *)et->cached_en, fofs,
> +				(struct rb_entry **)&prev_en,
> +				(struct rb_entry **)&next_en,
> +				&insert_p, &insert_parent, false,
> +				&leftmost);
> +	f2fs_bug_on(sbi, en);
> +
> +	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
> +	eiu.plen = plen;
> +
> +	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
> +				prev_en, next_en, true))
> +		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
> +				insert_p, insert_parent, leftmost, true);
> +
> +	write_unlock(&et->lock);
> +}
> +
> +
>  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
>  {
>  	struct extent_tree *et, *next;
> @@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>  	return f2fs_lookup_extent_tree(inode, pgofs, ei);
>  }
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
> +					struct extent_info_unaligned *eiu)
> +{
> +	if (!f2fs_may_extent_tree(inode))
> +		return false;
> +
> +	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
> +}
> +#endif
> +
>  void f2fs_update_extent_cache(struct dnode_of_data *dn)
>  {
>  	pgoff_t fofs;
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 0fe239dd50f4..3a02642a26d4 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -578,11 +578,21 @@ struct extent_info {
>  	u32 blk;			/* start block address of the extent */
>  };
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +struct extent_info_unaligned {
> +	struct extent_info ei;		/* extent info */
> +	unsigned int plen;		/* physical extent length of compressed blocks */
> +};
> +#endif
> +
>  struct extent_node {
>  	struct rb_node rb_node;		/* rb node located in rb-tree */
>  	struct extent_info ei;		/* extent info */
>  	struct list_head list;		/* node in global extent list of sbi */
>  	struct extent_tree *et;		/* extent tree pointer */
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	unsigned int plen;		/* physical extent length of compressed blocks */
> +#endif
>  };
>  
>  struct extent_tree {
> @@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
>  }
>  
>  static inline bool __is_extent_mergeable(struct extent_info *back,
> -						struct extent_info *front)
> +				struct extent_info *front, bool unaligned)
>  {
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
> +	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
> +
> +	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
> +		return false;
> +#endif
>  	return (back->fofs + back->len == front->fofs &&
>  			back->blk + back->len == front->blk);
>  }
>  
>  static inline bool __is_back_mergeable(struct extent_info *cur,
> -						struct extent_info *back)
> +				struct extent_info *back, bool unaligned)
>  {
> -	return __is_extent_mergeable(back, cur);
> +	return __is_extent_mergeable(back, cur, unaligned);
>  }
>  
>  static inline bool __is_front_mergeable(struct extent_info *cur,
> -						struct extent_info *front)
> +				struct extent_info *front, bool unaligned)
>  {
> -	return __is_extent_mergeable(cur, front);
> +	return __is_extent_mergeable(cur, front, unaligned);
>  }
>  
>  extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
> @@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
>  		bool force, bool *leftmost);
>  bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
>  				struct rb_root_cached *root, bool check_key);
> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
> +				unsigned int plen);
>  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
>  void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
>  void f2fs_drop_extent_tree(struct inode *inode);
> @@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
>  void f2fs_destroy_extent_tree(struct inode *inode);
>  bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>  			struct extent_info *ei);
> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
> +					struct extent_info_unaligned *eiu);
>  void f2fs_update_extent_cache(struct dnode_of_data *dn);
>  void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
>  			pgoff_t fofs, block_t blkaddr, unsigned int len);
> @@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
>  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
>  void f2fs_put_page_dic(struct page *page);
> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
>  int f2fs_init_compress_ctx(struct compress_ctx *cc);
>  void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
>  void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index dd611efa8aa4..7be2b01caa2a 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
>  	dn->ofs_in_node = offset[level];
>  	dn->node_page = npage[level];
>  	dn->data_blkaddr = f2fs_data_blkaddr(dn);
> +
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
> +			!f2fs_sb_has_readonly(sbi)) {
> +		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
> +
> +		if (blknum) {
> +			block_t blkaddr = f2fs_data_blkaddr(dn);
> +
> +			if (blkaddr == COMPRESS_ADDR)
> +				blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +							dn->ofs_in_node + 1);
> +
> +			f2fs_update_extent_tree_range_unaligned(dn->inode,
> +					index, blkaddr,
> +					F2FS_I(dn->inode)->i_cluster_size,
> +					blknum);
> +		}
> +	}
> +#endif
>  	return 0;
>  
>  release_pages:
> -- 
> 2.22.1


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-07-19 18:36   ` [f2fs-dev] " Jaegeuk Kim
@ 2021-07-20  0:39     ` Chao Yu
  -1 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-07-20  0:39 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 2021/7/20 2:36, Jaegeuk Kim wrote:
> On 07/07, Chao Yu wrote:
>> Compressed inode may suffer read performance issue due to it can not
>> use extent cache, so I propose to add this unaligned extent support
>> to improve it.
>>
>> Currently, it only works in readonly format f2fs image.
>>
>> Unaligned extent: in one compressed cluster, physical block number
>> will be less than logical block number, so we add an extra physical
>> block length in extent info in order to indicate such extent status.
>>
>> The idea is if one whole cluster blocks are contiguous physically,
>> once its mapping info was readed at first time, we will cache an
>> unaligned (or aligned) extent info entry in extent cache, it expects
>> that the mapping info will be hitted when rereading cluster.
> 
> How about just modifying to handle COMPRESS_ADDR when modifying the extent_cache
> like RO case?

IIUC, update_largest_extent() in sload tries to find largest non-compressed cluster,
and persist its extent into inode, for a compressed file, non-compressed cluster is
not a common case, so I guess it's valuable to handle compressed cluster mapping
in extent cache to enhance read performance, especially for inode which has large
sized cluster.

Let me if I misunderstood what you mean.

Thanks,

> 
>>
>> Merge policy:
>> - Aligned extents can be merged.
>> - Aligned extent and unaligned extent can not be merged.
>>
>> Signed-off-by: Chao Yu <chao@kernel.org>
>> ---
>>
>> I just post this for comments, it passes compiling, w/o any test.
>>
>>   fs/f2fs/compress.c     | 25 ++++++++++++
>>   fs/f2fs/data.c         | 38 +++++++++++++-----
>>   fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>>   fs/f2fs/f2fs.h         | 33 +++++++++++++---
>>   fs/f2fs/node.c         | 20 ++++++++++
>>   5 files changed, 181 insertions(+), 25 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index 455561826c7d..f072ac33eba5 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
>>   	f2fs_put_dic(dic);
>>   }
>>   
>> +/*
>> + * check whether cluster blocks are contiguous, and add extent cache entry
>> + * only if cluster blocks are logically and physically contiguous.
>> + */
>> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
>> +{
>> +	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
>> +	int i = compressed ? 1 : 0;
>> +	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
>> +						dn->ofs_in_node + i);
>> +
>> +	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
>> +		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
>> +						dn->ofs_in_node + i);
>> +
>> +		if (!__is_valid_data_blkaddr(blkaddr))
>> +			break;
>> +		if (first_blkaddr + i - 1 != blkaddr)
>> +			return 0;
>> +	}
>> +
>> +	return compressed ? i - 1 : i;
>> +}
>> +
>> +
>>   const struct address_space_operations f2fs_compress_aops = {
>>   	.releasepage = f2fs_release_page,
>>   	.invalidatepage = f2fs_invalidate_page,
>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>> index d2cf48c5a2e4..9572d78da4d7 100644
>> --- a/fs/f2fs/data.c
>> +++ b/fs/f2fs/data.c
>> @@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   	sector_t last_block_in_file;
>>   	const unsigned blocksize = blks_to_bytes(inode, 1);
>>   	struct decompress_io_ctx *dic = NULL;
>> +	struct extent_info_unaligned eiu;
>> +	bool extent_cache = false;
>>   	int i;
>>   	int ret = 0;
>>   
>> @@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   	if (f2fs_cluster_is_empty(cc))
>>   		goto out;
>>   
>> -	set_new_dnode(&dn, inode, NULL, NULL, 0);
>> -	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
>> -	if (ret)
>> -		goto out;
>> +	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
>> +		extent_cache = true;
>>   
>> -	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
>> +	if (!extent_cache) {
>> +		set_new_dnode(&dn, inode, NULL, NULL, 0);
>> +		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
>> +		if (ret)
>> +			goto out;
>> +
>> +		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
>> +	}
>>   
>>   	for (i = 1; i < cc->cluster_size; i++) {
>>   		block_t blkaddr;
>>   
>> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>> -						dn.ofs_in_node + i);
>> +		if (extent_cache)
>> +			blkaddr = eiu.ei.blk + i;
>> +		else
>> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>> +							dn.ofs_in_node + i);
>>   
>>   		if (!__is_valid_data_blkaddr(blkaddr))
>>   			break;
>> @@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   			goto out_put_dnode;
>>   		}
>>   		cc->nr_cpages++;
>> +
>> +		if (extent_cache && i >= eiu.plen)
>> +			break;
>>   	}
>>   
>>   	/* nothing to decompress */
>> @@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   		block_t blkaddr;
>>   		struct bio_post_read_ctx *ctx;
>>   
>> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>> +		if (extent_cache)
>> +			blkaddr = eiu.plen + i + 1;
>> +		else
>> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>   						dn.ofs_in_node + i + 1);
>>   
>>   		f2fs_wait_on_block_writeback(inode, blkaddr);
>> @@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   		*last_block_in_bio = blkaddr;
>>   	}
>>   
>> -	f2fs_put_dnode(&dn);
>> +	if (!extent_cache)
>> +		f2fs_put_dnode(&dn);
>>   
>>   	*bio_ret = bio;
>>   	return 0;
>>   
>>   out_put_dnode:
>> -	f2fs_put_dnode(&dn);
>> +	if (!extent_cache)
>> +		f2fs_put_dnode(&dn);
>>   out:
>>   	for (i = 0; i < cc->cluster_size; i++) {
>>   		if (cc->rpages[i]) {
>> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
>> index 3ebf976a682d..db9de95f90dc 100644
>> --- a/fs/f2fs/extent_cache.c
>> +++ b/fs/f2fs/extent_cache.c
>> @@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
>>   static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>>   				struct extent_tree *et, struct extent_info *ei,
>>   				struct rb_node *parent, struct rb_node **p,
>> -				bool leftmost)
>> +				bool leftmost, bool unaligned)
>>   {
>>   	struct extent_node *en;
>>   
>> @@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>>   	INIT_LIST_HEAD(&en->list);
>>   	en->et = et;
>>   
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	if (unaligned)
>> +		en->plen = ((struct extent_info_unaligned *)ei)->plen;
>> +#endif
>> +
>>   	rb_link_node(&en->rb_node, parent, p);
>>   	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
>>   	atomic_inc(&et->node_cnt);
>> @@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>>   	struct rb_node **p = &et->root.rb_root.rb_node;
>>   	struct extent_node *en;
>>   
>> -	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
>> +	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
>>   	if (!en)
>>   		return NULL;
>>   
>> @@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>>   		stat_inc_rbtree_node_hit(sbi);
>>   
>>   	*ei = en->ei;
>> +
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
>> +				!f2fs_sb_has_readonly(sbi)) {
>> +		struct extent_info_unaligned *eiu =
>> +				(struct extent_info_unaligned *)ei;
>> +
>> +		eiu->plen = en->plen;
>> +	}
>> +#endif
>> +
>>   	spin_lock(&sbi->extent_lock);
>>   	if (!list_empty(&en->list)) {
>>   		list_move_tail(&en->list, &sbi->extent_list);
>> @@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>>   static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
>>   				struct extent_tree *et, struct extent_info *ei,
>>   				struct extent_node *prev_ex,
>> -				struct extent_node *next_ex)
>> +				struct extent_node *next_ex,
>> +				bool unaligned)
>>   {
>>   	struct extent_node *en = NULL;
>>   
>> -	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
>> +	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
>>   		prev_ex->ei.len += ei->len;
>>   		ei = &prev_ex->ei;
>>   		en = prev_ex;
>>   	}
>>   
>> -	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
>> +	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
>>   		next_ex->ei.fofs = ei->fofs;
>>   		next_ex->ei.blk = ei->blk;
>>   		next_ex->ei.len += ei->len;
>> @@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>>   				struct extent_tree *et, struct extent_info *ei,
>>   				struct rb_node **insert_p,
>>   				struct rb_node *insert_parent,
>> -				bool leftmost)
>> +				bool leftmost, bool unaligned)
>>   {
>>   	struct rb_node **p;
>>   	struct rb_node *parent = NULL;
>> @@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>>   	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
>>   						ei->fofs, &leftmost);
>>   do_insert:
>> -	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
>> +	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
>>   	if (!en)
>>   		return NULL;
>>   
>> @@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>   						end - dei.fofs + dei.blk,
>>   						org_end - end);
>>   				en1 = __insert_extent_tree(sbi, et, &ei,
>> -							NULL, NULL, true);
>> +						NULL, NULL, true, false);
>>   				next_en = en1;
>>   			} else {
>>   				en->ei.fofs = end;
>> @@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>   	if (blkaddr) {
>>   
>>   		set_extent_info(&ei, fofs, blkaddr, len);
>> -		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
>> +		if (!__try_merge_extent_node(sbi, et, &ei,
>> +					prev_en, next_en, false))
>>   			__insert_extent_tree(sbi, et, &ei,
>> -					insert_p, insert_parent, leftmost);
>> +				insert_p, insert_parent, leftmost, false);
>>   
>>   		/* give up extent_cache, if split and small updates happen */
>>   		if (dei.len >= 1 &&
>> @@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>   		f2fs_mark_inode_dirty_sync(inode, true);
>>   }
>>   
>> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
>> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
>> +				unsigned int plen)
>> +{
>> +	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
>> +	struct extent_tree *et = F2FS_I(inode)->extent_tree;
>> +	struct extent_node *en = NULL;
>> +	struct extent_node *prev_en = NULL, *next_en = NULL;
>> +	struct extent_info_unaligned eiu;
>> +	struct rb_node **insert_p = NULL, *insert_parent = NULL;
>> +	bool leftmost = false;
>> +
>> +	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
>> +
>> +	write_lock(&et->lock);
>> +
>> +	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
>> +		write_unlock(&et->lock);
>> +		return;
>> +	}
>> +
>> +	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
>> +				(struct rb_entry *)et->cached_en, fofs,
>> +				(struct rb_entry **)&prev_en,
>> +				(struct rb_entry **)&next_en,
>> +				&insert_p, &insert_parent, false,
>> +				&leftmost);
>> +	f2fs_bug_on(sbi, en);
>> +
>> +	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
>> +	eiu.plen = plen;
>> +
>> +	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
>> +				prev_en, next_en, true))
>> +		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
>> +				insert_p, insert_parent, leftmost, true);
>> +
>> +	write_unlock(&et->lock);
>> +}
>> +
>> +
>>   unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
>>   {
>>   	struct extent_tree *et, *next;
>> @@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>>   	return f2fs_lookup_extent_tree(inode, pgofs, ei);
>>   }
>>   
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
>> +					struct extent_info_unaligned *eiu)
>> +{
>> +	if (!f2fs_may_extent_tree(inode))
>> +		return false;
>> +
>> +	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
>> +}
>> +#endif
>> +
>>   void f2fs_update_extent_cache(struct dnode_of_data *dn)
>>   {
>>   	pgoff_t fofs;
>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>> index 0fe239dd50f4..3a02642a26d4 100644
>> --- a/fs/f2fs/f2fs.h
>> +++ b/fs/f2fs/f2fs.h
>> @@ -578,11 +578,21 @@ struct extent_info {
>>   	u32 blk;			/* start block address of the extent */
>>   };
>>   
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +struct extent_info_unaligned {
>> +	struct extent_info ei;		/* extent info */
>> +	unsigned int plen;		/* physical extent length of compressed blocks */
>> +};
>> +#endif
>> +
>>   struct extent_node {
>>   	struct rb_node rb_node;		/* rb node located in rb-tree */
>>   	struct extent_info ei;		/* extent info */
>>   	struct list_head list;		/* node in global extent list of sbi */
>>   	struct extent_tree *et;		/* extent tree pointer */
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	unsigned int plen;		/* physical extent length of compressed blocks */
>> +#endif
>>   };
>>   
>>   struct extent_tree {
>> @@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
>>   }
>>   
>>   static inline bool __is_extent_mergeable(struct extent_info *back,
>> -						struct extent_info *front)
>> +				struct extent_info *front, bool unaligned)
>>   {
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
>> +	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
>> +
>> +	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
>> +		return false;
>> +#endif
>>   	return (back->fofs + back->len == front->fofs &&
>>   			back->blk + back->len == front->blk);
>>   }
>>   
>>   static inline bool __is_back_mergeable(struct extent_info *cur,
>> -						struct extent_info *back)
>> +				struct extent_info *back, bool unaligned)
>>   {
>> -	return __is_extent_mergeable(back, cur);
>> +	return __is_extent_mergeable(back, cur, unaligned);
>>   }
>>   
>>   static inline bool __is_front_mergeable(struct extent_info *cur,
>> -						struct extent_info *front)
>> +				struct extent_info *front, bool unaligned)
>>   {
>> -	return __is_extent_mergeable(cur, front);
>> +	return __is_extent_mergeable(cur, front, unaligned);
>>   }
>>   
>>   extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
>> @@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
>>   		bool force, bool *leftmost);
>>   bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
>>   				struct rb_root_cached *root, bool check_key);
>> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
>> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
>> +				unsigned int plen);
>>   unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
>>   void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
>>   void f2fs_drop_extent_tree(struct inode *inode);
>> @@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
>>   void f2fs_destroy_extent_tree(struct inode *inode);
>>   bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>>   			struct extent_info *ei);
>> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
>> +					struct extent_info_unaligned *eiu);
>>   void f2fs_update_extent_cache(struct dnode_of_data *dn);
>>   void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
>>   			pgoff_t fofs, block_t blkaddr, unsigned int len);
>> @@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
>>   void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
>>   void f2fs_put_page_dic(struct page *page);
>> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
>>   int f2fs_init_compress_ctx(struct compress_ctx *cc);
>>   void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
>>   void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
>> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
>> index dd611efa8aa4..7be2b01caa2a 100644
>> --- a/fs/f2fs/node.c
>> +++ b/fs/f2fs/node.c
>> @@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
>>   	dn->ofs_in_node = offset[level];
>>   	dn->node_page = npage[level];
>>   	dn->data_blkaddr = f2fs_data_blkaddr(dn);
>> +
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
>> +			!f2fs_sb_has_readonly(sbi)) {
>> +		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
>> +
>> +		if (blknum) {
>> +			block_t blkaddr = f2fs_data_blkaddr(dn);
>> +
>> +			if (blkaddr == COMPRESS_ADDR)
>> +				blkaddr = data_blkaddr(dn->inode, dn->node_page,
>> +							dn->ofs_in_node + 1);
>> +
>> +			f2fs_update_extent_tree_range_unaligned(dn->inode,
>> +					index, blkaddr,
>> +					F2FS_I(dn->inode)->i_cluster_size,
>> +					blknum);
>> +		}
>> +	}
>> +#endif
>>   	return 0;
>>   
>>   release_pages:
>> -- 
>> 2.22.1

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-07-20  0:39     ` Chao Yu
  0 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-07-20  0:39 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 2021/7/20 2:36, Jaegeuk Kim wrote:
> On 07/07, Chao Yu wrote:
>> Compressed inode may suffer read performance issue due to it can not
>> use extent cache, so I propose to add this unaligned extent support
>> to improve it.
>>
>> Currently, it only works in readonly format f2fs image.
>>
>> Unaligned extent: in one compressed cluster, physical block number
>> will be less than logical block number, so we add an extra physical
>> block length in extent info in order to indicate such extent status.
>>
>> The idea is if one whole cluster blocks are contiguous physically,
>> once its mapping info was readed at first time, we will cache an
>> unaligned (or aligned) extent info entry in extent cache, it expects
>> that the mapping info will be hitted when rereading cluster.
> 
> How about just modifying to handle COMPRESS_ADDR when modifying the extent_cache
> like RO case?

IIUC, update_largest_extent() in sload tries to find largest non-compressed cluster,
and persist its extent into inode, for a compressed file, non-compressed cluster is
not a common case, so I guess it's valuable to handle compressed cluster mapping
in extent cache to enhance read performance, especially for inode which has large
sized cluster.

Let me if I misunderstood what you mean.

Thanks,

> 
>>
>> Merge policy:
>> - Aligned extents can be merged.
>> - Aligned extent and unaligned extent can not be merged.
>>
>> Signed-off-by: Chao Yu <chao@kernel.org>
>> ---
>>
>> I just post this for comments, it passes compiling, w/o any test.
>>
>>   fs/f2fs/compress.c     | 25 ++++++++++++
>>   fs/f2fs/data.c         | 38 +++++++++++++-----
>>   fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>>   fs/f2fs/f2fs.h         | 33 +++++++++++++---
>>   fs/f2fs/node.c         | 20 ++++++++++
>>   5 files changed, 181 insertions(+), 25 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index 455561826c7d..f072ac33eba5 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
>>   	f2fs_put_dic(dic);
>>   }
>>   
>> +/*
>> + * check whether cluster blocks are contiguous, and add extent cache entry
>> + * only if cluster blocks are logically and physically contiguous.
>> + */
>> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
>> +{
>> +	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
>> +	int i = compressed ? 1 : 0;
>> +	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
>> +						dn->ofs_in_node + i);
>> +
>> +	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
>> +		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
>> +						dn->ofs_in_node + i);
>> +
>> +		if (!__is_valid_data_blkaddr(blkaddr))
>> +			break;
>> +		if (first_blkaddr + i - 1 != blkaddr)
>> +			return 0;
>> +	}
>> +
>> +	return compressed ? i - 1 : i;
>> +}
>> +
>> +
>>   const struct address_space_operations f2fs_compress_aops = {
>>   	.releasepage = f2fs_release_page,
>>   	.invalidatepage = f2fs_invalidate_page,
>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>> index d2cf48c5a2e4..9572d78da4d7 100644
>> --- a/fs/f2fs/data.c
>> +++ b/fs/f2fs/data.c
>> @@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   	sector_t last_block_in_file;
>>   	const unsigned blocksize = blks_to_bytes(inode, 1);
>>   	struct decompress_io_ctx *dic = NULL;
>> +	struct extent_info_unaligned eiu;
>> +	bool extent_cache = false;
>>   	int i;
>>   	int ret = 0;
>>   
>> @@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   	if (f2fs_cluster_is_empty(cc))
>>   		goto out;
>>   
>> -	set_new_dnode(&dn, inode, NULL, NULL, 0);
>> -	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
>> -	if (ret)
>> -		goto out;
>> +	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
>> +		extent_cache = true;
>>   
>> -	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
>> +	if (!extent_cache) {
>> +		set_new_dnode(&dn, inode, NULL, NULL, 0);
>> +		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
>> +		if (ret)
>> +			goto out;
>> +
>> +		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
>> +	}
>>   
>>   	for (i = 1; i < cc->cluster_size; i++) {
>>   		block_t blkaddr;
>>   
>> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>> -						dn.ofs_in_node + i);
>> +		if (extent_cache)
>> +			blkaddr = eiu.ei.blk + i;
>> +		else
>> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>> +							dn.ofs_in_node + i);
>>   
>>   		if (!__is_valid_data_blkaddr(blkaddr))
>>   			break;
>> @@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   			goto out_put_dnode;
>>   		}
>>   		cc->nr_cpages++;
>> +
>> +		if (extent_cache && i >= eiu.plen)
>> +			break;
>>   	}
>>   
>>   	/* nothing to decompress */
>> @@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   		block_t blkaddr;
>>   		struct bio_post_read_ctx *ctx;
>>   
>> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>> +		if (extent_cache)
>> +			blkaddr = eiu.plen + i + 1;
>> +		else
>> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>   						dn.ofs_in_node + i + 1);
>>   
>>   		f2fs_wait_on_block_writeback(inode, blkaddr);
>> @@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   		*last_block_in_bio = blkaddr;
>>   	}
>>   
>> -	f2fs_put_dnode(&dn);
>> +	if (!extent_cache)
>> +		f2fs_put_dnode(&dn);
>>   
>>   	*bio_ret = bio;
>>   	return 0;
>>   
>>   out_put_dnode:
>> -	f2fs_put_dnode(&dn);
>> +	if (!extent_cache)
>> +		f2fs_put_dnode(&dn);
>>   out:
>>   	for (i = 0; i < cc->cluster_size; i++) {
>>   		if (cc->rpages[i]) {
>> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
>> index 3ebf976a682d..db9de95f90dc 100644
>> --- a/fs/f2fs/extent_cache.c
>> +++ b/fs/f2fs/extent_cache.c
>> @@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
>>   static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>>   				struct extent_tree *et, struct extent_info *ei,
>>   				struct rb_node *parent, struct rb_node **p,
>> -				bool leftmost)
>> +				bool leftmost, bool unaligned)
>>   {
>>   	struct extent_node *en;
>>   
>> @@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>>   	INIT_LIST_HEAD(&en->list);
>>   	en->et = et;
>>   
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	if (unaligned)
>> +		en->plen = ((struct extent_info_unaligned *)ei)->plen;
>> +#endif
>> +
>>   	rb_link_node(&en->rb_node, parent, p);
>>   	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
>>   	atomic_inc(&et->node_cnt);
>> @@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>>   	struct rb_node **p = &et->root.rb_root.rb_node;
>>   	struct extent_node *en;
>>   
>> -	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
>> +	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
>>   	if (!en)
>>   		return NULL;
>>   
>> @@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>>   		stat_inc_rbtree_node_hit(sbi);
>>   
>>   	*ei = en->ei;
>> +
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
>> +				!f2fs_sb_has_readonly(sbi)) {
>> +		struct extent_info_unaligned *eiu =
>> +				(struct extent_info_unaligned *)ei;
>> +
>> +		eiu->plen = en->plen;
>> +	}
>> +#endif
>> +
>>   	spin_lock(&sbi->extent_lock);
>>   	if (!list_empty(&en->list)) {
>>   		list_move_tail(&en->list, &sbi->extent_list);
>> @@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>>   static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
>>   				struct extent_tree *et, struct extent_info *ei,
>>   				struct extent_node *prev_ex,
>> -				struct extent_node *next_ex)
>> +				struct extent_node *next_ex,
>> +				bool unaligned)
>>   {
>>   	struct extent_node *en = NULL;
>>   
>> -	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
>> +	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
>>   		prev_ex->ei.len += ei->len;
>>   		ei = &prev_ex->ei;
>>   		en = prev_ex;
>>   	}
>>   
>> -	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
>> +	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
>>   		next_ex->ei.fofs = ei->fofs;
>>   		next_ex->ei.blk = ei->blk;
>>   		next_ex->ei.len += ei->len;
>> @@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>>   				struct extent_tree *et, struct extent_info *ei,
>>   				struct rb_node **insert_p,
>>   				struct rb_node *insert_parent,
>> -				bool leftmost)
>> +				bool leftmost, bool unaligned)
>>   {
>>   	struct rb_node **p;
>>   	struct rb_node *parent = NULL;
>> @@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>>   	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
>>   						ei->fofs, &leftmost);
>>   do_insert:
>> -	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
>> +	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
>>   	if (!en)
>>   		return NULL;
>>   
>> @@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>   						end - dei.fofs + dei.blk,
>>   						org_end - end);
>>   				en1 = __insert_extent_tree(sbi, et, &ei,
>> -							NULL, NULL, true);
>> +						NULL, NULL, true, false);
>>   				next_en = en1;
>>   			} else {
>>   				en->ei.fofs = end;
>> @@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>   	if (blkaddr) {
>>   
>>   		set_extent_info(&ei, fofs, blkaddr, len);
>> -		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
>> +		if (!__try_merge_extent_node(sbi, et, &ei,
>> +					prev_en, next_en, false))
>>   			__insert_extent_tree(sbi, et, &ei,
>> -					insert_p, insert_parent, leftmost);
>> +				insert_p, insert_parent, leftmost, false);
>>   
>>   		/* give up extent_cache, if split and small updates happen */
>>   		if (dei.len >= 1 &&
>> @@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>   		f2fs_mark_inode_dirty_sync(inode, true);
>>   }
>>   
>> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
>> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
>> +				unsigned int plen)
>> +{
>> +	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
>> +	struct extent_tree *et = F2FS_I(inode)->extent_tree;
>> +	struct extent_node *en = NULL;
>> +	struct extent_node *prev_en = NULL, *next_en = NULL;
>> +	struct extent_info_unaligned eiu;
>> +	struct rb_node **insert_p = NULL, *insert_parent = NULL;
>> +	bool leftmost = false;
>> +
>> +	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
>> +
>> +	write_lock(&et->lock);
>> +
>> +	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
>> +		write_unlock(&et->lock);
>> +		return;
>> +	}
>> +
>> +	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
>> +				(struct rb_entry *)et->cached_en, fofs,
>> +				(struct rb_entry **)&prev_en,
>> +				(struct rb_entry **)&next_en,
>> +				&insert_p, &insert_parent, false,
>> +				&leftmost);
>> +	f2fs_bug_on(sbi, en);
>> +
>> +	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
>> +	eiu.plen = plen;
>> +
>> +	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
>> +				prev_en, next_en, true))
>> +		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
>> +				insert_p, insert_parent, leftmost, true);
>> +
>> +	write_unlock(&et->lock);
>> +}
>> +
>> +
>>   unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
>>   {
>>   	struct extent_tree *et, *next;
>> @@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>>   	return f2fs_lookup_extent_tree(inode, pgofs, ei);
>>   }
>>   
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
>> +					struct extent_info_unaligned *eiu)
>> +{
>> +	if (!f2fs_may_extent_tree(inode))
>> +		return false;
>> +
>> +	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
>> +}
>> +#endif
>> +
>>   void f2fs_update_extent_cache(struct dnode_of_data *dn)
>>   {
>>   	pgoff_t fofs;
>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>> index 0fe239dd50f4..3a02642a26d4 100644
>> --- a/fs/f2fs/f2fs.h
>> +++ b/fs/f2fs/f2fs.h
>> @@ -578,11 +578,21 @@ struct extent_info {
>>   	u32 blk;			/* start block address of the extent */
>>   };
>>   
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +struct extent_info_unaligned {
>> +	struct extent_info ei;		/* extent info */
>> +	unsigned int plen;		/* physical extent length of compressed blocks */
>> +};
>> +#endif
>> +
>>   struct extent_node {
>>   	struct rb_node rb_node;		/* rb node located in rb-tree */
>>   	struct extent_info ei;		/* extent info */
>>   	struct list_head list;		/* node in global extent list of sbi */
>>   	struct extent_tree *et;		/* extent tree pointer */
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	unsigned int plen;		/* physical extent length of compressed blocks */
>> +#endif
>>   };
>>   
>>   struct extent_tree {
>> @@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
>>   }
>>   
>>   static inline bool __is_extent_mergeable(struct extent_info *back,
>> -						struct extent_info *front)
>> +				struct extent_info *front, bool unaligned)
>>   {
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
>> +	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
>> +
>> +	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
>> +		return false;
>> +#endif
>>   	return (back->fofs + back->len == front->fofs &&
>>   			back->blk + back->len == front->blk);
>>   }
>>   
>>   static inline bool __is_back_mergeable(struct extent_info *cur,
>> -						struct extent_info *back)
>> +				struct extent_info *back, bool unaligned)
>>   {
>> -	return __is_extent_mergeable(back, cur);
>> +	return __is_extent_mergeable(back, cur, unaligned);
>>   }
>>   
>>   static inline bool __is_front_mergeable(struct extent_info *cur,
>> -						struct extent_info *front)
>> +				struct extent_info *front, bool unaligned)
>>   {
>> -	return __is_extent_mergeable(cur, front);
>> +	return __is_extent_mergeable(cur, front, unaligned);
>>   }
>>   
>>   extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
>> @@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
>>   		bool force, bool *leftmost);
>>   bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
>>   				struct rb_root_cached *root, bool check_key);
>> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
>> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
>> +				unsigned int plen);
>>   unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
>>   void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
>>   void f2fs_drop_extent_tree(struct inode *inode);
>> @@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
>>   void f2fs_destroy_extent_tree(struct inode *inode);
>>   bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>>   			struct extent_info *ei);
>> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
>> +					struct extent_info_unaligned *eiu);
>>   void f2fs_update_extent_cache(struct dnode_of_data *dn);
>>   void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
>>   			pgoff_t fofs, block_t blkaddr, unsigned int len);
>> @@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>   struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
>>   void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
>>   void f2fs_put_page_dic(struct page *page);
>> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
>>   int f2fs_init_compress_ctx(struct compress_ctx *cc);
>>   void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
>>   void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
>> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
>> index dd611efa8aa4..7be2b01caa2a 100644
>> --- a/fs/f2fs/node.c
>> +++ b/fs/f2fs/node.c
>> @@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
>>   	dn->ofs_in_node = offset[level];
>>   	dn->node_page = npage[level];
>>   	dn->data_blkaddr = f2fs_data_blkaddr(dn);
>> +
>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>> +	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
>> +			!f2fs_sb_has_readonly(sbi)) {
>> +		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
>> +
>> +		if (blknum) {
>> +			block_t blkaddr = f2fs_data_blkaddr(dn);
>> +
>> +			if (blkaddr == COMPRESS_ADDR)
>> +				blkaddr = data_blkaddr(dn->inode, dn->node_page,
>> +							dn->ofs_in_node + 1);
>> +
>> +			f2fs_update_extent_tree_range_unaligned(dn->inode,
>> +					index, blkaddr,
>> +					F2FS_I(dn->inode)->i_cluster_size,
>> +					blknum);
>> +		}
>> +	}
>> +#endif
>>   	return 0;
>>   
>>   release_pages:
>> -- 
>> 2.22.1


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-07-20  0:39     ` [f2fs-dev] " Chao Yu
@ 2021-07-29  1:41       ` Chao Yu
  -1 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-07-29  1:41 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

Ping,

On 2021/7/20 8:39, Chao Yu wrote:
> On 2021/7/20 2:36, Jaegeuk Kim wrote:
>> On 07/07, Chao Yu wrote:
>>> Compressed inode may suffer read performance issue due to it can not
>>> use extent cache, so I propose to add this unaligned extent support
>>> to improve it.
>>>
>>> Currently, it only works in readonly format f2fs image.
>>>
>>> Unaligned extent: in one compressed cluster, physical block number
>>> will be less than logical block number, so we add an extra physical
>>> block length in extent info in order to indicate such extent status.
>>>
>>> The idea is if one whole cluster blocks are contiguous physically,
>>> once its mapping info was readed at first time, we will cache an
>>> unaligned (or aligned) extent info entry in extent cache, it expects
>>> that the mapping info will be hitted when rereading cluster.
>>
>> How about just modifying to handle COMPRESS_ADDR when modifying the extent_cache
>> like RO case?
> 
> IIUC, update_largest_extent() in sload tries to find largest non-compressed cluster,
> and persist its extent into inode, for a compressed file, non-compressed cluster is
> not a common case, so I guess it's valuable to handle compressed cluster mapping
> in extent cache to enhance read performance, especially for inode which has large
> sized cluster.
> 
> Let me if I misunderstood what you mean.
> 
> Thanks,
> 
>>
>>>
>>> Merge policy:
>>> - Aligned extents can be merged.
>>> - Aligned extent and unaligned extent can not be merged.
>>>
>>> Signed-off-by: Chao Yu <chao@kernel.org>
>>> ---
>>>
>>> I just post this for comments, it passes compiling, w/o any test.
>>>
>>>    fs/f2fs/compress.c     | 25 ++++++++++++
>>>    fs/f2fs/data.c         | 38 +++++++++++++-----
>>>    fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>>>    fs/f2fs/f2fs.h         | 33 +++++++++++++---
>>>    fs/f2fs/node.c         | 20 ++++++++++
>>>    5 files changed, 181 insertions(+), 25 deletions(-)
>>>
>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>> index 455561826c7d..f072ac33eba5 100644
>>> --- a/fs/f2fs/compress.c
>>> +++ b/fs/f2fs/compress.c
>>> @@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
>>>    	f2fs_put_dic(dic);
>>>    }
>>>    
>>> +/*
>>> + * check whether cluster blocks are contiguous, and add extent cache entry
>>> + * only if cluster blocks are logically and physically contiguous.
>>> + */
>>> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
>>> +{
>>> +	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
>>> +	int i = compressed ? 1 : 0;
>>> +	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
>>> +						dn->ofs_in_node + i);
>>> +
>>> +	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
>>> +		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
>>> +						dn->ofs_in_node + i);
>>> +
>>> +		if (!__is_valid_data_blkaddr(blkaddr))
>>> +			break;
>>> +		if (first_blkaddr + i - 1 != blkaddr)
>>> +			return 0;
>>> +	}
>>> +
>>> +	return compressed ? i - 1 : i;
>>> +}
>>> +
>>> +
>>>    const struct address_space_operations f2fs_compress_aops = {
>>>    	.releasepage = f2fs_release_page,
>>>    	.invalidatepage = f2fs_invalidate_page,
>>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>>> index d2cf48c5a2e4..9572d78da4d7 100644
>>> --- a/fs/f2fs/data.c
>>> +++ b/fs/f2fs/data.c
>>> @@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    	sector_t last_block_in_file;
>>>    	const unsigned blocksize = blks_to_bytes(inode, 1);
>>>    	struct decompress_io_ctx *dic = NULL;
>>> +	struct extent_info_unaligned eiu;
>>> +	bool extent_cache = false;
>>>    	int i;
>>>    	int ret = 0;
>>>    
>>> @@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    	if (f2fs_cluster_is_empty(cc))
>>>    		goto out;
>>>    
>>> -	set_new_dnode(&dn, inode, NULL, NULL, 0);
>>> -	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
>>> -	if (ret)
>>> -		goto out;
>>> +	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
>>> +		extent_cache = true;
>>>    
>>> -	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
>>> +	if (!extent_cache) {
>>> +		set_new_dnode(&dn, inode, NULL, NULL, 0);
>>> +		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
>>> +		if (ret)
>>> +			goto out;
>>> +
>>> +		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
>>> +	}
>>>    
>>>    	for (i = 1; i < cc->cluster_size; i++) {
>>>    		block_t blkaddr;
>>>    
>>> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>> -						dn.ofs_in_node + i);
>>> +		if (extent_cache)
>>> +			blkaddr = eiu.ei.blk + i;
>>> +		else
>>> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>> +							dn.ofs_in_node + i);
>>>    
>>>    		if (!__is_valid_data_blkaddr(blkaddr))
>>>    			break;
>>> @@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    			goto out_put_dnode;
>>>    		}
>>>    		cc->nr_cpages++;
>>> +
>>> +		if (extent_cache && i >= eiu.plen)
>>> +			break;
>>>    	}
>>>    
>>>    	/* nothing to decompress */
>>> @@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    		block_t blkaddr;
>>>    		struct bio_post_read_ctx *ctx;
>>>    
>>> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>> +		if (extent_cache)
>>> +			blkaddr = eiu.plen + i + 1;
>>> +		else
>>> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>>    						dn.ofs_in_node + i + 1);
>>>    
>>>    		f2fs_wait_on_block_writeback(inode, blkaddr);
>>> @@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    		*last_block_in_bio = blkaddr;
>>>    	}
>>>    
>>> -	f2fs_put_dnode(&dn);
>>> +	if (!extent_cache)
>>> +		f2fs_put_dnode(&dn);
>>>    
>>>    	*bio_ret = bio;
>>>    	return 0;
>>>    
>>>    out_put_dnode:
>>> -	f2fs_put_dnode(&dn);
>>> +	if (!extent_cache)
>>> +		f2fs_put_dnode(&dn);
>>>    out:
>>>    	for (i = 0; i < cc->cluster_size; i++) {
>>>    		if (cc->rpages[i]) {
>>> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
>>> index 3ebf976a682d..db9de95f90dc 100644
>>> --- a/fs/f2fs/extent_cache.c
>>> +++ b/fs/f2fs/extent_cache.c
>>> @@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
>>>    static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>>>    				struct extent_tree *et, struct extent_info *ei,
>>>    				struct rb_node *parent, struct rb_node **p,
>>> -				bool leftmost)
>>> +				bool leftmost, bool unaligned)
>>>    {
>>>    	struct extent_node *en;
>>>    
>>> @@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>>>    	INIT_LIST_HEAD(&en->list);
>>>    	en->et = et;
>>>    
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	if (unaligned)
>>> +		en->plen = ((struct extent_info_unaligned *)ei)->plen;
>>> +#endif
>>> +
>>>    	rb_link_node(&en->rb_node, parent, p);
>>>    	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
>>>    	atomic_inc(&et->node_cnt);
>>> @@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>>>    	struct rb_node **p = &et->root.rb_root.rb_node;
>>>    	struct extent_node *en;
>>>    
>>> -	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
>>> +	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
>>>    	if (!en)
>>>    		return NULL;
>>>    
>>> @@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>>>    		stat_inc_rbtree_node_hit(sbi);
>>>    
>>>    	*ei = en->ei;
>>> +
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
>>> +				!f2fs_sb_has_readonly(sbi)) {
>>> +		struct extent_info_unaligned *eiu =
>>> +				(struct extent_info_unaligned *)ei;
>>> +
>>> +		eiu->plen = en->plen;
>>> +	}
>>> +#endif
>>> +
>>>    	spin_lock(&sbi->extent_lock);
>>>    	if (!list_empty(&en->list)) {
>>>    		list_move_tail(&en->list, &sbi->extent_list);
>>> @@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>>>    static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
>>>    				struct extent_tree *et, struct extent_info *ei,
>>>    				struct extent_node *prev_ex,
>>> -				struct extent_node *next_ex)
>>> +				struct extent_node *next_ex,
>>> +				bool unaligned)
>>>    {
>>>    	struct extent_node *en = NULL;
>>>    
>>> -	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
>>> +	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
>>>    		prev_ex->ei.len += ei->len;
>>>    		ei = &prev_ex->ei;
>>>    		en = prev_ex;
>>>    	}
>>>    
>>> -	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
>>> +	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
>>>    		next_ex->ei.fofs = ei->fofs;
>>>    		next_ex->ei.blk = ei->blk;
>>>    		next_ex->ei.len += ei->len;
>>> @@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>>>    				struct extent_tree *et, struct extent_info *ei,
>>>    				struct rb_node **insert_p,
>>>    				struct rb_node *insert_parent,
>>> -				bool leftmost)
>>> +				bool leftmost, bool unaligned)
>>>    {
>>>    	struct rb_node **p;
>>>    	struct rb_node *parent = NULL;
>>> @@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>>>    	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
>>>    						ei->fofs, &leftmost);
>>>    do_insert:
>>> -	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
>>> +	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
>>>    	if (!en)
>>>    		return NULL;
>>>    
>>> @@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>>    						end - dei.fofs + dei.blk,
>>>    						org_end - end);
>>>    				en1 = __insert_extent_tree(sbi, et, &ei,
>>> -							NULL, NULL, true);
>>> +						NULL, NULL, true, false);
>>>    				next_en = en1;
>>>    			} else {
>>>    				en->ei.fofs = end;
>>> @@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>>    	if (blkaddr) {
>>>    
>>>    		set_extent_info(&ei, fofs, blkaddr, len);
>>> -		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
>>> +		if (!__try_merge_extent_node(sbi, et, &ei,
>>> +					prev_en, next_en, false))
>>>    			__insert_extent_tree(sbi, et, &ei,
>>> -					insert_p, insert_parent, leftmost);
>>> +				insert_p, insert_parent, leftmost, false);
>>>    
>>>    		/* give up extent_cache, if split and small updates happen */
>>>    		if (dei.len >= 1 &&
>>> @@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>>    		f2fs_mark_inode_dirty_sync(inode, true);
>>>    }
>>>    
>>> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
>>> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
>>> +				unsigned int plen)
>>> +{
>>> +	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
>>> +	struct extent_tree *et = F2FS_I(inode)->extent_tree;
>>> +	struct extent_node *en = NULL;
>>> +	struct extent_node *prev_en = NULL, *next_en = NULL;
>>> +	struct extent_info_unaligned eiu;
>>> +	struct rb_node **insert_p = NULL, *insert_parent = NULL;
>>> +	bool leftmost = false;
>>> +
>>> +	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
>>> +
>>> +	write_lock(&et->lock);
>>> +
>>> +	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
>>> +		write_unlock(&et->lock);
>>> +		return;
>>> +	}
>>> +
>>> +	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
>>> +				(struct rb_entry *)et->cached_en, fofs,
>>> +				(struct rb_entry **)&prev_en,
>>> +				(struct rb_entry **)&next_en,
>>> +				&insert_p, &insert_parent, false,
>>> +				&leftmost);
>>> +	f2fs_bug_on(sbi, en);
>>> +
>>> +	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
>>> +	eiu.plen = plen;
>>> +
>>> +	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
>>> +				prev_en, next_en, true))
>>> +		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
>>> +				insert_p, insert_parent, leftmost, true);
>>> +
>>> +	write_unlock(&et->lock);
>>> +}
>>> +
>>> +
>>>    unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
>>>    {
>>>    	struct extent_tree *et, *next;
>>> @@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>>>    	return f2fs_lookup_extent_tree(inode, pgofs, ei);
>>>    }
>>>    
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
>>> +					struct extent_info_unaligned *eiu)
>>> +{
>>> +	if (!f2fs_may_extent_tree(inode))
>>> +		return false;
>>> +
>>> +	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
>>> +}
>>> +#endif
>>> +
>>>    void f2fs_update_extent_cache(struct dnode_of_data *dn)
>>>    {
>>>    	pgoff_t fofs;
>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>> index 0fe239dd50f4..3a02642a26d4 100644
>>> --- a/fs/f2fs/f2fs.h
>>> +++ b/fs/f2fs/f2fs.h
>>> @@ -578,11 +578,21 @@ struct extent_info {
>>>    	u32 blk;			/* start block address of the extent */
>>>    };
>>>    
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +struct extent_info_unaligned {
>>> +	struct extent_info ei;		/* extent info */
>>> +	unsigned int plen;		/* physical extent length of compressed blocks */
>>> +};
>>> +#endif
>>> +
>>>    struct extent_node {
>>>    	struct rb_node rb_node;		/* rb node located in rb-tree */
>>>    	struct extent_info ei;		/* extent info */
>>>    	struct list_head list;		/* node in global extent list of sbi */
>>>    	struct extent_tree *et;		/* extent tree pointer */
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	unsigned int plen;		/* physical extent length of compressed blocks */
>>> +#endif
>>>    };
>>>    
>>>    struct extent_tree {
>>> @@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
>>>    }
>>>    
>>>    static inline bool __is_extent_mergeable(struct extent_info *back,
>>> -						struct extent_info *front)
>>> +				struct extent_info *front, bool unaligned)
>>>    {
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
>>> +	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
>>> +
>>> +	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
>>> +		return false;
>>> +#endif
>>>    	return (back->fofs + back->len == front->fofs &&
>>>    			back->blk + back->len == front->blk);
>>>    }
>>>    
>>>    static inline bool __is_back_mergeable(struct extent_info *cur,
>>> -						struct extent_info *back)
>>> +				struct extent_info *back, bool unaligned)
>>>    {
>>> -	return __is_extent_mergeable(back, cur);
>>> +	return __is_extent_mergeable(back, cur, unaligned);
>>>    }
>>>    
>>>    static inline bool __is_front_mergeable(struct extent_info *cur,
>>> -						struct extent_info *front)
>>> +				struct extent_info *front, bool unaligned)
>>>    {
>>> -	return __is_extent_mergeable(cur, front);
>>> +	return __is_extent_mergeable(cur, front, unaligned);
>>>    }
>>>    
>>>    extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
>>> @@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
>>>    		bool force, bool *leftmost);
>>>    bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
>>>    				struct rb_root_cached *root, bool check_key);
>>> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
>>> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
>>> +				unsigned int plen);
>>>    unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
>>>    void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
>>>    void f2fs_drop_extent_tree(struct inode *inode);
>>> @@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
>>>    void f2fs_destroy_extent_tree(struct inode *inode);
>>>    bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>>>    			struct extent_info *ei);
>>> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
>>> +					struct extent_info_unaligned *eiu);
>>>    void f2fs_update_extent_cache(struct dnode_of_data *dn);
>>>    void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
>>>    			pgoff_t fofs, block_t blkaddr, unsigned int len);
>>> @@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
>>>    void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
>>>    void f2fs_put_page_dic(struct page *page);
>>> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
>>>    int f2fs_init_compress_ctx(struct compress_ctx *cc);
>>>    void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
>>>    void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
>>> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
>>> index dd611efa8aa4..7be2b01caa2a 100644
>>> --- a/fs/f2fs/node.c
>>> +++ b/fs/f2fs/node.c
>>> @@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
>>>    	dn->ofs_in_node = offset[level];
>>>    	dn->node_page = npage[level];
>>>    	dn->data_blkaddr = f2fs_data_blkaddr(dn);
>>> +
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
>>> +			!f2fs_sb_has_readonly(sbi)) {
>>> +		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
>>> +
>>> +		if (blknum) {
>>> +			block_t blkaddr = f2fs_data_blkaddr(dn);
>>> +
>>> +			if (blkaddr == COMPRESS_ADDR)
>>> +				blkaddr = data_blkaddr(dn->inode, dn->node_page,
>>> +							dn->ofs_in_node + 1);
>>> +
>>> +			f2fs_update_extent_tree_range_unaligned(dn->inode,
>>> +					index, blkaddr,
>>> +					F2FS_I(dn->inode)->i_cluster_size,
>>> +					blknum);
>>> +		}
>>> +	}
>>> +#endif
>>>    	return 0;
>>>    
>>>    release_pages:
>>> -- 
>>> 2.22.1
> 
> 
> _______________________________________________
> Linux-f2fs-devel mailing list
> Linux-f2fs-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
> 

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-07-29  1:41       ` Chao Yu
  0 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-07-29  1:41 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

Ping,

On 2021/7/20 8:39, Chao Yu wrote:
> On 2021/7/20 2:36, Jaegeuk Kim wrote:
>> On 07/07, Chao Yu wrote:
>>> Compressed inode may suffer read performance issue due to it can not
>>> use extent cache, so I propose to add this unaligned extent support
>>> to improve it.
>>>
>>> Currently, it only works in readonly format f2fs image.
>>>
>>> Unaligned extent: in one compressed cluster, physical block number
>>> will be less than logical block number, so we add an extra physical
>>> block length in extent info in order to indicate such extent status.
>>>
>>> The idea is if one whole cluster blocks are contiguous physically,
>>> once its mapping info was readed at first time, we will cache an
>>> unaligned (or aligned) extent info entry in extent cache, it expects
>>> that the mapping info will be hitted when rereading cluster.
>>
>> How about just modifying to handle COMPRESS_ADDR when modifying the extent_cache
>> like RO case?
> 
> IIUC, update_largest_extent() in sload tries to find largest non-compressed cluster,
> and persist its extent into inode, for a compressed file, non-compressed cluster is
> not a common case, so I guess it's valuable to handle compressed cluster mapping
> in extent cache to enhance read performance, especially for inode which has large
> sized cluster.
> 
> Let me if I misunderstood what you mean.
> 
> Thanks,
> 
>>
>>>
>>> Merge policy:
>>> - Aligned extents can be merged.
>>> - Aligned extent and unaligned extent can not be merged.
>>>
>>> Signed-off-by: Chao Yu <chao@kernel.org>
>>> ---
>>>
>>> I just post this for comments, it passes compiling, w/o any test.
>>>
>>>    fs/f2fs/compress.c     | 25 ++++++++++++
>>>    fs/f2fs/data.c         | 38 +++++++++++++-----
>>>    fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>>>    fs/f2fs/f2fs.h         | 33 +++++++++++++---
>>>    fs/f2fs/node.c         | 20 ++++++++++
>>>    5 files changed, 181 insertions(+), 25 deletions(-)
>>>
>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>> index 455561826c7d..f072ac33eba5 100644
>>> --- a/fs/f2fs/compress.c
>>> +++ b/fs/f2fs/compress.c
>>> @@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
>>>    	f2fs_put_dic(dic);
>>>    }
>>>    
>>> +/*
>>> + * check whether cluster blocks are contiguous, and add extent cache entry
>>> + * only if cluster blocks are logically and physically contiguous.
>>> + */
>>> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
>>> +{
>>> +	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
>>> +	int i = compressed ? 1 : 0;
>>> +	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
>>> +						dn->ofs_in_node + i);
>>> +
>>> +	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
>>> +		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
>>> +						dn->ofs_in_node + i);
>>> +
>>> +		if (!__is_valid_data_blkaddr(blkaddr))
>>> +			break;
>>> +		if (first_blkaddr + i - 1 != blkaddr)
>>> +			return 0;
>>> +	}
>>> +
>>> +	return compressed ? i - 1 : i;
>>> +}
>>> +
>>> +
>>>    const struct address_space_operations f2fs_compress_aops = {
>>>    	.releasepage = f2fs_release_page,
>>>    	.invalidatepage = f2fs_invalidate_page,
>>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>>> index d2cf48c5a2e4..9572d78da4d7 100644
>>> --- a/fs/f2fs/data.c
>>> +++ b/fs/f2fs/data.c
>>> @@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    	sector_t last_block_in_file;
>>>    	const unsigned blocksize = blks_to_bytes(inode, 1);
>>>    	struct decompress_io_ctx *dic = NULL;
>>> +	struct extent_info_unaligned eiu;
>>> +	bool extent_cache = false;
>>>    	int i;
>>>    	int ret = 0;
>>>    
>>> @@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    	if (f2fs_cluster_is_empty(cc))
>>>    		goto out;
>>>    
>>> -	set_new_dnode(&dn, inode, NULL, NULL, 0);
>>> -	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
>>> -	if (ret)
>>> -		goto out;
>>> +	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
>>> +		extent_cache = true;
>>>    
>>> -	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
>>> +	if (!extent_cache) {
>>> +		set_new_dnode(&dn, inode, NULL, NULL, 0);
>>> +		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
>>> +		if (ret)
>>> +			goto out;
>>> +
>>> +		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
>>> +	}
>>>    
>>>    	for (i = 1; i < cc->cluster_size; i++) {
>>>    		block_t blkaddr;
>>>    
>>> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>> -						dn.ofs_in_node + i);
>>> +		if (extent_cache)
>>> +			blkaddr = eiu.ei.blk + i;
>>> +		else
>>> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>> +							dn.ofs_in_node + i);
>>>    
>>>    		if (!__is_valid_data_blkaddr(blkaddr))
>>>    			break;
>>> @@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    			goto out_put_dnode;
>>>    		}
>>>    		cc->nr_cpages++;
>>> +
>>> +		if (extent_cache && i >= eiu.plen)
>>> +			break;
>>>    	}
>>>    
>>>    	/* nothing to decompress */
>>> @@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    		block_t blkaddr;
>>>    		struct bio_post_read_ctx *ctx;
>>>    
>>> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>> +		if (extent_cache)
>>> +			blkaddr = eiu.plen + i + 1;
>>> +		else
>>> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>>    						dn.ofs_in_node + i + 1);
>>>    
>>>    		f2fs_wait_on_block_writeback(inode, blkaddr);
>>> @@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    		*last_block_in_bio = blkaddr;
>>>    	}
>>>    
>>> -	f2fs_put_dnode(&dn);
>>> +	if (!extent_cache)
>>> +		f2fs_put_dnode(&dn);
>>>    
>>>    	*bio_ret = bio;
>>>    	return 0;
>>>    
>>>    out_put_dnode:
>>> -	f2fs_put_dnode(&dn);
>>> +	if (!extent_cache)
>>> +		f2fs_put_dnode(&dn);
>>>    out:
>>>    	for (i = 0; i < cc->cluster_size; i++) {
>>>    		if (cc->rpages[i]) {
>>> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
>>> index 3ebf976a682d..db9de95f90dc 100644
>>> --- a/fs/f2fs/extent_cache.c
>>> +++ b/fs/f2fs/extent_cache.c
>>> @@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
>>>    static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>>>    				struct extent_tree *et, struct extent_info *ei,
>>>    				struct rb_node *parent, struct rb_node **p,
>>> -				bool leftmost)
>>> +				bool leftmost, bool unaligned)
>>>    {
>>>    	struct extent_node *en;
>>>    
>>> @@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>>>    	INIT_LIST_HEAD(&en->list);
>>>    	en->et = et;
>>>    
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	if (unaligned)
>>> +		en->plen = ((struct extent_info_unaligned *)ei)->plen;
>>> +#endif
>>> +
>>>    	rb_link_node(&en->rb_node, parent, p);
>>>    	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
>>>    	atomic_inc(&et->node_cnt);
>>> @@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>>>    	struct rb_node **p = &et->root.rb_root.rb_node;
>>>    	struct extent_node *en;
>>>    
>>> -	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
>>> +	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
>>>    	if (!en)
>>>    		return NULL;
>>>    
>>> @@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>>>    		stat_inc_rbtree_node_hit(sbi);
>>>    
>>>    	*ei = en->ei;
>>> +
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
>>> +				!f2fs_sb_has_readonly(sbi)) {
>>> +		struct extent_info_unaligned *eiu =
>>> +				(struct extent_info_unaligned *)ei;
>>> +
>>> +		eiu->plen = en->plen;
>>> +	}
>>> +#endif
>>> +
>>>    	spin_lock(&sbi->extent_lock);
>>>    	if (!list_empty(&en->list)) {
>>>    		list_move_tail(&en->list, &sbi->extent_list);
>>> @@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>>>    static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
>>>    				struct extent_tree *et, struct extent_info *ei,
>>>    				struct extent_node *prev_ex,
>>> -				struct extent_node *next_ex)
>>> +				struct extent_node *next_ex,
>>> +				bool unaligned)
>>>    {
>>>    	struct extent_node *en = NULL;
>>>    
>>> -	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
>>> +	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
>>>    		prev_ex->ei.len += ei->len;
>>>    		ei = &prev_ex->ei;
>>>    		en = prev_ex;
>>>    	}
>>>    
>>> -	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
>>> +	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
>>>    		next_ex->ei.fofs = ei->fofs;
>>>    		next_ex->ei.blk = ei->blk;
>>>    		next_ex->ei.len += ei->len;
>>> @@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>>>    				struct extent_tree *et, struct extent_info *ei,
>>>    				struct rb_node **insert_p,
>>>    				struct rb_node *insert_parent,
>>> -				bool leftmost)
>>> +				bool leftmost, bool unaligned)
>>>    {
>>>    	struct rb_node **p;
>>>    	struct rb_node *parent = NULL;
>>> @@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>>>    	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
>>>    						ei->fofs, &leftmost);
>>>    do_insert:
>>> -	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
>>> +	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
>>>    	if (!en)
>>>    		return NULL;
>>>    
>>> @@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>>    						end - dei.fofs + dei.blk,
>>>    						org_end - end);
>>>    				en1 = __insert_extent_tree(sbi, et, &ei,
>>> -							NULL, NULL, true);
>>> +						NULL, NULL, true, false);
>>>    				next_en = en1;
>>>    			} else {
>>>    				en->ei.fofs = end;
>>> @@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>>    	if (blkaddr) {
>>>    
>>>    		set_extent_info(&ei, fofs, blkaddr, len);
>>> -		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
>>> +		if (!__try_merge_extent_node(sbi, et, &ei,
>>> +					prev_en, next_en, false))
>>>    			__insert_extent_tree(sbi, et, &ei,
>>> -					insert_p, insert_parent, leftmost);
>>> +				insert_p, insert_parent, leftmost, false);
>>>    
>>>    		/* give up extent_cache, if split and small updates happen */
>>>    		if (dei.len >= 1 &&
>>> @@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>>>    		f2fs_mark_inode_dirty_sync(inode, true);
>>>    }
>>>    
>>> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
>>> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
>>> +				unsigned int plen)
>>> +{
>>> +	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
>>> +	struct extent_tree *et = F2FS_I(inode)->extent_tree;
>>> +	struct extent_node *en = NULL;
>>> +	struct extent_node *prev_en = NULL, *next_en = NULL;
>>> +	struct extent_info_unaligned eiu;
>>> +	struct rb_node **insert_p = NULL, *insert_parent = NULL;
>>> +	bool leftmost = false;
>>> +
>>> +	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
>>> +
>>> +	write_lock(&et->lock);
>>> +
>>> +	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
>>> +		write_unlock(&et->lock);
>>> +		return;
>>> +	}
>>> +
>>> +	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
>>> +				(struct rb_entry *)et->cached_en, fofs,
>>> +				(struct rb_entry **)&prev_en,
>>> +				(struct rb_entry **)&next_en,
>>> +				&insert_p, &insert_parent, false,
>>> +				&leftmost);
>>> +	f2fs_bug_on(sbi, en);
>>> +
>>> +	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
>>> +	eiu.plen = plen;
>>> +
>>> +	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
>>> +				prev_en, next_en, true))
>>> +		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
>>> +				insert_p, insert_parent, leftmost, true);
>>> +
>>> +	write_unlock(&et->lock);
>>> +}
>>> +
>>> +
>>>    unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
>>>    {
>>>    	struct extent_tree *et, *next;
>>> @@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>>>    	return f2fs_lookup_extent_tree(inode, pgofs, ei);
>>>    }
>>>    
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
>>> +					struct extent_info_unaligned *eiu)
>>> +{
>>> +	if (!f2fs_may_extent_tree(inode))
>>> +		return false;
>>> +
>>> +	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
>>> +}
>>> +#endif
>>> +
>>>    void f2fs_update_extent_cache(struct dnode_of_data *dn)
>>>    {
>>>    	pgoff_t fofs;
>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>> index 0fe239dd50f4..3a02642a26d4 100644
>>> --- a/fs/f2fs/f2fs.h
>>> +++ b/fs/f2fs/f2fs.h
>>> @@ -578,11 +578,21 @@ struct extent_info {
>>>    	u32 blk;			/* start block address of the extent */
>>>    };
>>>    
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +struct extent_info_unaligned {
>>> +	struct extent_info ei;		/* extent info */
>>> +	unsigned int plen;		/* physical extent length of compressed blocks */
>>> +};
>>> +#endif
>>> +
>>>    struct extent_node {
>>>    	struct rb_node rb_node;		/* rb node located in rb-tree */
>>>    	struct extent_info ei;		/* extent info */
>>>    	struct list_head list;		/* node in global extent list of sbi */
>>>    	struct extent_tree *et;		/* extent tree pointer */
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	unsigned int plen;		/* physical extent length of compressed blocks */
>>> +#endif
>>>    };
>>>    
>>>    struct extent_tree {
>>> @@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
>>>    }
>>>    
>>>    static inline bool __is_extent_mergeable(struct extent_info *back,
>>> -						struct extent_info *front)
>>> +				struct extent_info *front, bool unaligned)
>>>    {
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
>>> +	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
>>> +
>>> +	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
>>> +		return false;
>>> +#endif
>>>    	return (back->fofs + back->len == front->fofs &&
>>>    			back->blk + back->len == front->blk);
>>>    }
>>>    
>>>    static inline bool __is_back_mergeable(struct extent_info *cur,
>>> -						struct extent_info *back)
>>> +				struct extent_info *back, bool unaligned)
>>>    {
>>> -	return __is_extent_mergeable(back, cur);
>>> +	return __is_extent_mergeable(back, cur, unaligned);
>>>    }
>>>    
>>>    static inline bool __is_front_mergeable(struct extent_info *cur,
>>> -						struct extent_info *front)
>>> +				struct extent_info *front, bool unaligned)
>>>    {
>>> -	return __is_extent_mergeable(cur, front);
>>> +	return __is_extent_mergeable(cur, front, unaligned);
>>>    }
>>>    
>>>    extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
>>> @@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
>>>    		bool force, bool *leftmost);
>>>    bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
>>>    				struct rb_root_cached *root, bool check_key);
>>> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
>>> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
>>> +				unsigned int plen);
>>>    unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
>>>    void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
>>>    void f2fs_drop_extent_tree(struct inode *inode);
>>> @@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
>>>    void f2fs_destroy_extent_tree(struct inode *inode);
>>>    bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>>>    			struct extent_info *ei);
>>> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
>>> +					struct extent_info_unaligned *eiu);
>>>    void f2fs_update_extent_cache(struct dnode_of_data *dn);
>>>    void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
>>>    			pgoff_t fofs, block_t blkaddr, unsigned int len);
>>> @@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>    struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
>>>    void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
>>>    void f2fs_put_page_dic(struct page *page);
>>> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
>>>    int f2fs_init_compress_ctx(struct compress_ctx *cc);
>>>    void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
>>>    void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
>>> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
>>> index dd611efa8aa4..7be2b01caa2a 100644
>>> --- a/fs/f2fs/node.c
>>> +++ b/fs/f2fs/node.c
>>> @@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
>>>    	dn->ofs_in_node = offset[level];
>>>    	dn->node_page = npage[level];
>>>    	dn->data_blkaddr = f2fs_data_blkaddr(dn);
>>> +
>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>> +	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
>>> +			!f2fs_sb_has_readonly(sbi)) {
>>> +		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
>>> +
>>> +		if (blknum) {
>>> +			block_t blkaddr = f2fs_data_blkaddr(dn);
>>> +
>>> +			if (blkaddr == COMPRESS_ADDR)
>>> +				blkaddr = data_blkaddr(dn->inode, dn->node_page,
>>> +							dn->ofs_in_node + 1);
>>> +
>>> +			f2fs_update_extent_tree_range_unaligned(dn->inode,
>>> +					index, blkaddr,
>>> +					F2FS_I(dn->inode)->i_cluster_size,
>>> +					blknum);
>>> +		}
>>> +	}
>>> +#endif
>>>    	return 0;
>>>    
>>>    release_pages:
>>> -- 
>>> 2.22.1
> 
> 
> _______________________________________________
> Linux-f2fs-devel mailing list
> Linux-f2fs-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
> 


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-07-07  1:58 ` [f2fs-dev] " Chao Yu
@ 2021-07-30 19:20   ` Jaegeuk Kim
  -1 siblings, 0 replies; 25+ messages in thread
From: Jaegeuk Kim @ 2021-07-30 19:20 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 07/07, Chao Yu wrote:
> Compressed inode may suffer read performance issue due to it can not
> use extent cache, so I propose to add this unaligned extent support
> to improve it.
> 
> Currently, it only works in readonly format f2fs image.
> 
> Unaligned extent: in one compressed cluster, physical block number
> will be less than logical block number, so we add an extra physical
> block length in extent info in order to indicate such extent status.
> 
> The idea is if one whole cluster blocks are contiguous physically,
> once its mapping info was readed at first time, we will cache an
> unaligned (or aligned) extent info entry in extent cache, it expects
> that the mapping info will be hitted when rereading cluster.
> 
> Merge policy:
> - Aligned extents can be merged.
> - Aligned extent and unaligned extent can not be merged.
> 
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
> 
> I just post this for comments, it passes compiling, w/o any test.
> 
>  fs/f2fs/compress.c     | 25 ++++++++++++
>  fs/f2fs/data.c         | 38 +++++++++++++-----
>  fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>  fs/f2fs/f2fs.h         | 33 +++++++++++++---
>  fs/f2fs/node.c         | 20 ++++++++++
>  5 files changed, 181 insertions(+), 25 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 455561826c7d..f072ac33eba5 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
>  	f2fs_put_dic(dic);
>  }
>  
> +/*
> + * check whether cluster blocks are contiguous, and add extent cache entry
> + * only if cluster blocks are logically and physically contiguous.
> + */
> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
> +{
> +	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
> +	int i = compressed ? 1 : 0;
> +	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +						dn->ofs_in_node + i);
> +
> +	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
> +		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +						dn->ofs_in_node + i);
> +
> +		if (!__is_valid_data_blkaddr(blkaddr))
> +			break;
> +		if (first_blkaddr + i - 1 != blkaddr)
> +			return 0;
> +	}
> +
> +	return compressed ? i - 1 : i;
> +}
> +
> +
>  const struct address_space_operations f2fs_compress_aops = {
>  	.releasepage = f2fs_release_page,
>  	.invalidatepage = f2fs_invalidate_page,
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index d2cf48c5a2e4..9572d78da4d7 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  	sector_t last_block_in_file;
>  	const unsigned blocksize = blks_to_bytes(inode, 1);
>  	struct decompress_io_ctx *dic = NULL;
> +	struct extent_info_unaligned eiu;
> +	bool extent_cache = false;
>  	int i;
>  	int ret = 0;
>  
> @@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  	if (f2fs_cluster_is_empty(cc))
>  		goto out;
>  
> -	set_new_dnode(&dn, inode, NULL, NULL, 0);
> -	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
> -	if (ret)
> -		goto out;
> +	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
> +		extent_cache = true;

Is there any race condition between the address in extent_cache and the one in
dnode? I feel that we could synchronize it by locking its dnode block.

>  
> -	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
> +	if (!extent_cache) {
> +		set_new_dnode(&dn, inode, NULL, NULL, 0);
> +		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
> +		if (ret)
> +			goto out;
> +
> +		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
> +	}
>  
>  	for (i = 1; i < cc->cluster_size; i++) {
>  		block_t blkaddr;
>  
> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> -						dn.ofs_in_node + i);
> +		if (extent_cache)
> +			blkaddr = eiu.ei.blk + i;
> +		else
> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
> +							dn.ofs_in_node + i);
>  
>  		if (!__is_valid_data_blkaddr(blkaddr))
>  			break;
> @@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  			goto out_put_dnode;
>  		}
>  		cc->nr_cpages++;
> +
> +		if (extent_cache && i >= eiu.plen)
> +			break;
>  	}
>  
>  	/* nothing to decompress */
> @@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  		block_t blkaddr;
>  		struct bio_post_read_ctx *ctx;
>  
> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> +		if (extent_cache)
> +			blkaddr = eiu.plen + i + 1;
> +		else
> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>  						dn.ofs_in_node + i + 1);
>  
>  		f2fs_wait_on_block_writeback(inode, blkaddr);
> @@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  		*last_block_in_bio = blkaddr;
>  	}
>  
> -	f2fs_put_dnode(&dn);
> +	if (!extent_cache)
> +		f2fs_put_dnode(&dn);
>  
>  	*bio_ret = bio;
>  	return 0;
>  
>  out_put_dnode:
> -	f2fs_put_dnode(&dn);
> +	if (!extent_cache)
> +		f2fs_put_dnode(&dn);
>  out:
>  	for (i = 0; i < cc->cluster_size; i++) {
>  		if (cc->rpages[i]) {
> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
> index 3ebf976a682d..db9de95f90dc 100644
> --- a/fs/f2fs/extent_cache.c
> +++ b/fs/f2fs/extent_cache.c
> @@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
>  static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct rb_node *parent, struct rb_node **p,
> -				bool leftmost)
> +				bool leftmost, bool unaligned)
>  {
>  	struct extent_node *en;
>  
> @@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>  	INIT_LIST_HEAD(&en->list);
>  	en->et = et;
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (unaligned)
> +		en->plen = ((struct extent_info_unaligned *)ei)->plen;
> +#endif
> +
>  	rb_link_node(&en->rb_node, parent, p);
>  	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
>  	atomic_inc(&et->node_cnt);
> @@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>  	struct rb_node **p = &et->root.rb_root.rb_node;
>  	struct extent_node *en;
>  
> -	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
> +	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
>  	if (!en)
>  		return NULL;
>  
> @@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  		stat_inc_rbtree_node_hit(sbi);
>  
>  	*ei = en->ei;
> +
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
> +				!f2fs_sb_has_readonly(sbi)) {
> +		struct extent_info_unaligned *eiu =
> +				(struct extent_info_unaligned *)ei;
> +
> +		eiu->plen = en->plen;
> +	}
> +#endif
> +
>  	spin_lock(&sbi->extent_lock);
>  	if (!list_empty(&en->list)) {
>  		list_move_tail(&en->list, &sbi->extent_list);
> @@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct extent_node *prev_ex,
> -				struct extent_node *next_ex)
> +				struct extent_node *next_ex,
> +				bool unaligned)
>  {
>  	struct extent_node *en = NULL;
>  
> -	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
> +	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
>  		prev_ex->ei.len += ei->len;
>  		ei = &prev_ex->ei;
>  		en = prev_ex;
>  	}
>  
> -	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
> +	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
>  		next_ex->ei.fofs = ei->fofs;
>  		next_ex->ei.blk = ei->blk;
>  		next_ex->ei.len += ei->len;
> @@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct rb_node **insert_p,
>  				struct rb_node *insert_parent,
> -				bool leftmost)
> +				bool leftmost, bool unaligned)
>  {
>  	struct rb_node **p;
>  	struct rb_node *parent = NULL;
> @@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>  	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
>  						ei->fofs, &leftmost);
>  do_insert:
> -	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
> +	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
>  	if (!en)
>  		return NULL;
>  
> @@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  						end - dei.fofs + dei.blk,
>  						org_end - end);
>  				en1 = __insert_extent_tree(sbi, et, &ei,
> -							NULL, NULL, true);
> +						NULL, NULL, true, false);
>  				next_en = en1;
>  			} else {
>  				en->ei.fofs = end;
> @@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  	if (blkaddr) {
>  
>  		set_extent_info(&ei, fofs, blkaddr, len);
> -		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
> +		if (!__try_merge_extent_node(sbi, et, &ei,
> +					prev_en, next_en, false))
>  			__insert_extent_tree(sbi, et, &ei,
> -					insert_p, insert_parent, leftmost);
> +				insert_p, insert_parent, leftmost, false);
>  
>  		/* give up extent_cache, if split and small updates happen */
>  		if (dei.len >= 1 &&
> @@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  		f2fs_mark_inode_dirty_sync(inode, true);
>  }
>  
> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
> +				unsigned int plen)
> +{
> +	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> +	struct extent_tree *et = F2FS_I(inode)->extent_tree;
> +	struct extent_node *en = NULL;
> +	struct extent_node *prev_en = NULL, *next_en = NULL;
> +	struct extent_info_unaligned eiu;
> +	struct rb_node **insert_p = NULL, *insert_parent = NULL;
> +	bool leftmost = false;
> +
> +	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
> +
> +	write_lock(&et->lock);
> +
> +	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
> +		write_unlock(&et->lock);
> +		return;
> +	}
> +
> +	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
> +				(struct rb_entry *)et->cached_en, fofs,
> +				(struct rb_entry **)&prev_en,
> +				(struct rb_entry **)&next_en,
> +				&insert_p, &insert_parent, false,
> +				&leftmost);
> +	f2fs_bug_on(sbi, en);
> +
> +	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
> +	eiu.plen = plen;
> +
> +	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
> +				prev_en, next_en, true))
> +		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
> +				insert_p, insert_parent, leftmost, true);
> +
> +	write_unlock(&et->lock);
> +}
> +
> +
>  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
>  {
>  	struct extent_tree *et, *next;
> @@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>  	return f2fs_lookup_extent_tree(inode, pgofs, ei);
>  }
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
> +					struct extent_info_unaligned *eiu)
> +{
> +	if (!f2fs_may_extent_tree(inode))
> +		return false;
> +
> +	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
> +}
> +#endif
> +
>  void f2fs_update_extent_cache(struct dnode_of_data *dn)
>  {
>  	pgoff_t fofs;
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 0fe239dd50f4..3a02642a26d4 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -578,11 +578,21 @@ struct extent_info {
>  	u32 blk;			/* start block address of the extent */
>  };
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +struct extent_info_unaligned {
> +	struct extent_info ei;		/* extent info */
> +	unsigned int plen;		/* physical extent length of compressed blocks */
> +};
> +#endif
> +
>  struct extent_node {
>  	struct rb_node rb_node;		/* rb node located in rb-tree */
>  	struct extent_info ei;		/* extent info */
>  	struct list_head list;		/* node in global extent list of sbi */
>  	struct extent_tree *et;		/* extent tree pointer */
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	unsigned int plen;		/* physical extent length of compressed blocks */
> +#endif
>  };
>  
>  struct extent_tree {
> @@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
>  }
>  
>  static inline bool __is_extent_mergeable(struct extent_info *back,
> -						struct extent_info *front)
> +				struct extent_info *front, bool unaligned)
>  {
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
> +	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
> +
> +	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
> +		return false;
> +#endif
>  	return (back->fofs + back->len == front->fofs &&
>  			back->blk + back->len == front->blk);
>  }
>  
>  static inline bool __is_back_mergeable(struct extent_info *cur,
> -						struct extent_info *back)
> +				struct extent_info *back, bool unaligned)
>  {
> -	return __is_extent_mergeable(back, cur);
> +	return __is_extent_mergeable(back, cur, unaligned);
>  }
>  
>  static inline bool __is_front_mergeable(struct extent_info *cur,
> -						struct extent_info *front)
> +				struct extent_info *front, bool unaligned)
>  {
> -	return __is_extent_mergeable(cur, front);
> +	return __is_extent_mergeable(cur, front, unaligned);
>  }
>  
>  extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
> @@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
>  		bool force, bool *leftmost);
>  bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
>  				struct rb_root_cached *root, bool check_key);
> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
> +				unsigned int plen);
>  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
>  void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
>  void f2fs_drop_extent_tree(struct inode *inode);
> @@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
>  void f2fs_destroy_extent_tree(struct inode *inode);
>  bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>  			struct extent_info *ei);
> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
> +					struct extent_info_unaligned *eiu);
>  void f2fs_update_extent_cache(struct dnode_of_data *dn);
>  void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
>  			pgoff_t fofs, block_t blkaddr, unsigned int len);
> @@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
>  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
>  void f2fs_put_page_dic(struct page *page);
> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
>  int f2fs_init_compress_ctx(struct compress_ctx *cc);
>  void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
>  void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index dd611efa8aa4..7be2b01caa2a 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
>  	dn->ofs_in_node = offset[level];
>  	dn->node_page = npage[level];
>  	dn->data_blkaddr = f2fs_data_blkaddr(dn);
> +
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
> +			!f2fs_sb_has_readonly(sbi)) {
> +		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
> +
> +		if (blknum) {
> +			block_t blkaddr = f2fs_data_blkaddr(dn);
> +
> +			if (blkaddr == COMPRESS_ADDR)
> +				blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +							dn->ofs_in_node + 1);
> +
> +			f2fs_update_extent_tree_range_unaligned(dn->inode,
> +					index, blkaddr,
> +					F2FS_I(dn->inode)->i_cluster_size,
> +					blknum);
> +		}
> +	}
> +#endif
>  	return 0;
>  
>  release_pages:
> -- 
> 2.22.1

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-07-30 19:20   ` Jaegeuk Kim
  0 siblings, 0 replies; 25+ messages in thread
From: Jaegeuk Kim @ 2021-07-30 19:20 UTC (permalink / raw)
  To: Chao Yu; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 07/07, Chao Yu wrote:
> Compressed inode may suffer read performance issue due to it can not
> use extent cache, so I propose to add this unaligned extent support
> to improve it.
> 
> Currently, it only works in readonly format f2fs image.
> 
> Unaligned extent: in one compressed cluster, physical block number
> will be less than logical block number, so we add an extra physical
> block length in extent info in order to indicate such extent status.
> 
> The idea is if one whole cluster blocks are contiguous physically,
> once its mapping info was readed at first time, we will cache an
> unaligned (or aligned) extent info entry in extent cache, it expects
> that the mapping info will be hitted when rereading cluster.
> 
> Merge policy:
> - Aligned extents can be merged.
> - Aligned extent and unaligned extent can not be merged.
> 
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
> 
> I just post this for comments, it passes compiling, w/o any test.
> 
>  fs/f2fs/compress.c     | 25 ++++++++++++
>  fs/f2fs/data.c         | 38 +++++++++++++-----
>  fs/f2fs/extent_cache.c | 90 +++++++++++++++++++++++++++++++++++++-----
>  fs/f2fs/f2fs.h         | 33 +++++++++++++---
>  fs/f2fs/node.c         | 20 ++++++++++
>  5 files changed, 181 insertions(+), 25 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 455561826c7d..f072ac33eba5 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -1666,6 +1666,31 @@ void f2fs_put_page_dic(struct page *page)
>  	f2fs_put_dic(dic);
>  }
>  
> +/*
> + * check whether cluster blocks are contiguous, and add extent cache entry
> + * only if cluster blocks are logically and physically contiguous.
> + */
> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
> +{
> +	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
> +	int i = compressed ? 1 : 0;
> +	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +						dn->ofs_in_node + i);
> +
> +	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
> +		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +						dn->ofs_in_node + i);
> +
> +		if (!__is_valid_data_blkaddr(blkaddr))
> +			break;
> +		if (first_blkaddr + i - 1 != blkaddr)
> +			return 0;
> +	}
> +
> +	return compressed ? i - 1 : i;
> +}
> +
> +
>  const struct address_space_operations f2fs_compress_aops = {
>  	.releasepage = f2fs_release_page,
>  	.invalidatepage = f2fs_invalidate_page,
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index d2cf48c5a2e4..9572d78da4d7 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2115,6 +2115,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  	sector_t last_block_in_file;
>  	const unsigned blocksize = blks_to_bytes(inode, 1);
>  	struct decompress_io_ctx *dic = NULL;
> +	struct extent_info_unaligned eiu;
> +	bool extent_cache = false;
>  	int i;
>  	int ret = 0;
>  
> @@ -2145,18 +2147,26 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  	if (f2fs_cluster_is_empty(cc))
>  		goto out;
>  
> -	set_new_dnode(&dn, inode, NULL, NULL, 0);
> -	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
> -	if (ret)
> -		goto out;
> +	if (f2fs_lookup_extent_cache_unaligned(inode, start_idx, &eiu))
> +		extent_cache = true;

Is there any race condition between the address in extent_cache and the one in
dnode? I feel that we could synchronize it by locking its dnode block.

>  
> -	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
> +	if (!extent_cache) {
> +		set_new_dnode(&dn, inode, NULL, NULL, 0);
> +		ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
> +		if (ret)
> +			goto out;
> +
> +		f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
> +	}
>  
>  	for (i = 1; i < cc->cluster_size; i++) {
>  		block_t blkaddr;
>  
> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> -						dn.ofs_in_node + i);
> +		if (extent_cache)
> +			blkaddr = eiu.ei.blk + i;
> +		else
> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
> +							dn.ofs_in_node + i);
>  
>  		if (!__is_valid_data_blkaddr(blkaddr))
>  			break;
> @@ -2166,6 +2176,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  			goto out_put_dnode;
>  		}
>  		cc->nr_cpages++;
> +
> +		if (extent_cache && i >= eiu.plen)
> +			break;
>  	}
>  
>  	/* nothing to decompress */
> @@ -2185,7 +2198,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  		block_t blkaddr;
>  		struct bio_post_read_ctx *ctx;
>  
> -		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> +		if (extent_cache)
> +			blkaddr = eiu.plen + i + 1;
> +		else
> +			blkaddr = data_blkaddr(dn.inode, dn.node_page,
>  						dn.ofs_in_node + i + 1);
>  
>  		f2fs_wait_on_block_writeback(inode, blkaddr);
> @@ -2231,13 +2247,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  		*last_block_in_bio = blkaddr;
>  	}
>  
> -	f2fs_put_dnode(&dn);
> +	if (!extent_cache)
> +		f2fs_put_dnode(&dn);
>  
>  	*bio_ret = bio;
>  	return 0;
>  
>  out_put_dnode:
> -	f2fs_put_dnode(&dn);
> +	if (!extent_cache)
> +		f2fs_put_dnode(&dn);
>  out:
>  	for (i = 0; i < cc->cluster_size; i++) {
>  		if (cc->rpages[i]) {
> diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
> index 3ebf976a682d..db9de95f90dc 100644
> --- a/fs/f2fs/extent_cache.c
> +++ b/fs/f2fs/extent_cache.c
> @@ -235,7 +235,7 @@ static struct kmem_cache *extent_node_slab;
>  static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct rb_node *parent, struct rb_node **p,
> -				bool leftmost)
> +				bool leftmost, bool unaligned)
>  {
>  	struct extent_node *en;
>  
> @@ -247,6 +247,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
>  	INIT_LIST_HEAD(&en->list);
>  	en->et = et;
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (unaligned)
> +		en->plen = ((struct extent_info_unaligned *)ei)->plen;
> +#endif
> +
>  	rb_link_node(&en->rb_node, parent, p);
>  	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
>  	atomic_inc(&et->node_cnt);
> @@ -320,7 +325,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
>  	struct rb_node **p = &et->root.rb_root.rb_node;
>  	struct extent_node *en;
>  
> -	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
> +	en = __attach_extent_node(sbi, et, ei, NULL, p, true, false);
>  	if (!en)
>  		return NULL;
>  
> @@ -439,6 +444,17 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  		stat_inc_rbtree_node_hit(sbi);
>  
>  	*ei = en->ei;
> +
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
> +				!f2fs_sb_has_readonly(sbi)) {
> +		struct extent_info_unaligned *eiu =
> +				(struct extent_info_unaligned *)ei;
> +
> +		eiu->plen = en->plen;
> +	}
> +#endif
> +
>  	spin_lock(&sbi->extent_lock);
>  	if (!list_empty(&en->list)) {
>  		list_move_tail(&en->list, &sbi->extent_list);
> @@ -457,17 +473,18 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
>  static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct extent_node *prev_ex,
> -				struct extent_node *next_ex)
> +				struct extent_node *next_ex,
> +				bool unaligned)
>  {
>  	struct extent_node *en = NULL;
>  
> -	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
> +	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, unaligned)) {
>  		prev_ex->ei.len += ei->len;
>  		ei = &prev_ex->ei;
>  		en = prev_ex;
>  	}
>  
> -	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
> +	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, unaligned)) {
>  		next_ex->ei.fofs = ei->fofs;
>  		next_ex->ei.blk = ei->blk;
>  		next_ex->ei.len += ei->len;
> @@ -495,7 +512,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>  				struct extent_tree *et, struct extent_info *ei,
>  				struct rb_node **insert_p,
>  				struct rb_node *insert_parent,
> -				bool leftmost)
> +				bool leftmost, bool unaligned)
>  {
>  	struct rb_node **p;
>  	struct rb_node *parent = NULL;
> @@ -512,7 +529,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
>  	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
>  						ei->fofs, &leftmost);
>  do_insert:
> -	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
> +	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost, unaligned);
>  	if (!en)
>  		return NULL;
>  
> @@ -594,7 +611,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  						end - dei.fofs + dei.blk,
>  						org_end - end);
>  				en1 = __insert_extent_tree(sbi, et, &ei,
> -							NULL, NULL, true);
> +						NULL, NULL, true, false);
>  				next_en = en1;
>  			} else {
>  				en->ei.fofs = end;
> @@ -633,9 +650,10 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  	if (blkaddr) {
>  
>  		set_extent_info(&ei, fofs, blkaddr, len);
> -		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
> +		if (!__try_merge_extent_node(sbi, et, &ei,
> +					prev_en, next_en, false))
>  			__insert_extent_tree(sbi, et, &ei,
> -					insert_p, insert_parent, leftmost);
> +				insert_p, insert_parent, leftmost, false);
>  
>  		/* give up extent_cache, if split and small updates happen */
>  		if (dei.len >= 1 &&
> @@ -661,6 +679,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
>  		f2fs_mark_inode_dirty_sync(inode, true);
>  }
>  
> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
> +				unsigned int plen)
> +{
> +	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> +	struct extent_tree *et = F2FS_I(inode)->extent_tree;
> +	struct extent_node *en = NULL;
> +	struct extent_node *prev_en = NULL, *next_en = NULL;
> +	struct extent_info_unaligned eiu;
> +	struct rb_node **insert_p = NULL, *insert_parent = NULL;
> +	bool leftmost = false;
> +
> +	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
> +
> +	write_lock(&et->lock);
> +
> +	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
> +		write_unlock(&et->lock);
> +		return;
> +	}
> +
> +	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
> +				(struct rb_entry *)et->cached_en, fofs,
> +				(struct rb_entry **)&prev_en,
> +				(struct rb_entry **)&next_en,
> +				&insert_p, &insert_parent, false,
> +				&leftmost);
> +	f2fs_bug_on(sbi, en);
> +
> +	set_extent_info(&eiu.ei, fofs, blkaddr, llen);
> +	eiu.plen = plen;
> +
> +	if (!__try_merge_extent_node(sbi, et, (struct extent_info *)&eiu,
> +				prev_en, next_en, true))
> +		__insert_extent_tree(sbi, et, (struct extent_info *)&eiu,
> +				insert_p, insert_parent, leftmost, true);
> +
> +	write_unlock(&et->lock);
> +}
> +
> +
>  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
>  {
>  	struct extent_tree *et, *next;
> @@ -818,6 +877,17 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>  	return f2fs_lookup_extent_tree(inode, pgofs, ei);
>  }
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
> +					struct extent_info_unaligned *eiu)
> +{
> +	if (!f2fs_may_extent_tree(inode))
> +		return false;
> +
> +	return f2fs_lookup_extent_tree(inode, pgofs, (struct extent_info *)eiu);
> +}
> +#endif
> +
>  void f2fs_update_extent_cache(struct dnode_of_data *dn)
>  {
>  	pgoff_t fofs;
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 0fe239dd50f4..3a02642a26d4 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -578,11 +578,21 @@ struct extent_info {
>  	u32 blk;			/* start block address of the extent */
>  };
>  
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +struct extent_info_unaligned {
> +	struct extent_info ei;		/* extent info */
> +	unsigned int plen;		/* physical extent length of compressed blocks */
> +};
> +#endif
> +
>  struct extent_node {
>  	struct rb_node rb_node;		/* rb node located in rb-tree */
>  	struct extent_info ei;		/* extent info */
>  	struct list_head list;		/* node in global extent list of sbi */
>  	struct extent_tree *et;		/* extent tree pointer */
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	unsigned int plen;		/* physical extent length of compressed blocks */
> +#endif
>  };
>  
>  struct extent_tree {
> @@ -817,22 +827,29 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
>  }
>  
>  static inline bool __is_extent_mergeable(struct extent_info *back,
> -						struct extent_info *front)
> +				struct extent_info *front, bool unaligned)
>  {
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	struct extent_info_unaligned *be = (struct extent_info_unaligned *)back;
> +	struct extent_info_unaligned *fe = (struct extent_info_unaligned *)front;
> +
> +	if (!unaligned || be->ei.len != be->plen || fe->ei.len != fe->plen)
> +		return false;
> +#endif
>  	return (back->fofs + back->len == front->fofs &&
>  			back->blk + back->len == front->blk);
>  }
>  
>  static inline bool __is_back_mergeable(struct extent_info *cur,
> -						struct extent_info *back)
> +				struct extent_info *back, bool unaligned)
>  {
> -	return __is_extent_mergeable(back, cur);
> +	return __is_extent_mergeable(back, cur, unaligned);
>  }
>  
>  static inline bool __is_front_mergeable(struct extent_info *cur,
> -						struct extent_info *front)
> +				struct extent_info *front, bool unaligned)
>  {
> -	return __is_extent_mergeable(cur, front);
> +	return __is_extent_mergeable(cur, front, unaligned);
>  }
>  
>  extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
> @@ -3972,6 +3989,9 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
>  		bool force, bool *leftmost);
>  bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
>  				struct rb_root_cached *root, bool check_key);
> +void f2fs_update_extent_tree_range_unaligned(struct inode *inode,
> +				pgoff_t fofs, block_t blkaddr, unsigned int llen,
> +				unsigned int plen);
>  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
>  void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
>  void f2fs_drop_extent_tree(struct inode *inode);
> @@ -3979,6 +3999,8 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode);
>  void f2fs_destroy_extent_tree(struct inode *inode);
>  bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
>  			struct extent_info *ei);
> +bool f2fs_lookup_extent_cache_unaligned(struct inode *inode, pgoff_t pgofs,
> +					struct extent_info_unaligned *eiu);
>  void f2fs_update_extent_cache(struct dnode_of_data *dn);
>  void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
>  			pgoff_t fofs, block_t blkaddr, unsigned int len);
> @@ -4055,6 +4077,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
>  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
>  void f2fs_put_page_dic(struct page *page);
> +int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
>  int f2fs_init_compress_ctx(struct compress_ctx *cc);
>  void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
>  void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index dd611efa8aa4..7be2b01caa2a 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -832,6 +832,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
>  	dn->ofs_in_node = offset[level];
>  	dn->node_page = npage[level];
>  	dn->data_blkaddr = f2fs_data_blkaddr(dn);
> +
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
> +			!f2fs_sb_has_readonly(sbi)) {
> +		int blknum = f2fs_cluster_blocks_are_contiguous(dn);
> +
> +		if (blknum) {
> +			block_t blkaddr = f2fs_data_blkaddr(dn);
> +
> +			if (blkaddr == COMPRESS_ADDR)
> +				blkaddr = data_blkaddr(dn->inode, dn->node_page,
> +							dn->ofs_in_node + 1);
> +
> +			f2fs_update_extent_tree_range_unaligned(dn->inode,
> +					index, blkaddr,
> +					F2FS_I(dn->inode)->i_cluster_size,
> +					blknum);
> +		}
> +	}
> +#endif
>  	return 0;
>  
>  release_pages:
> -- 
> 2.22.1


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-07-30 19:20   ` [f2fs-dev] " Jaegeuk Kim
@ 2021-08-01  7:03     ` Chao Yu
  -1 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-08-01  7:03 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 2021/7/31 3:20, Jaegeuk Kim wrote:
> On 07/07, Chao Yu wrote:
>> Currently, it only works in readonly format f2fs image.

There wouldn't be any race condition because unaligned extent only works
for ro feature now?

> Is there any race condition between the address in extent_cache and the one in
> dnode? I feel that we could synchronize it by locking its dnode block.

Thanks,

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-08-01  7:03     ` Chao Yu
  0 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-08-01  7:03 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 2021/7/31 3:20, Jaegeuk Kim wrote:
> On 07/07, Chao Yu wrote:
>> Currently, it only works in readonly format f2fs image.

There wouldn't be any race condition because unaligned extent only works
for ro feature now?

> Is there any race condition between the address in extent_cache and the one in
> dnode? I feel that we could synchronize it by locking its dnode block.

Thanks,


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-08-01  7:03     ` [f2fs-dev] " Chao Yu
@ 2021-08-02 18:03       ` Jaegeuk Kim
  -1 siblings, 0 replies; 25+ messages in thread
From: Jaegeuk Kim @ 2021-08-02 18:03 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 08/01, Chao Yu wrote:
> On 2021/7/31 3:20, Jaegeuk Kim wrote:
> > On 07/07, Chao Yu wrote:
> > > Currently, it only works in readonly format f2fs image.
> 
> There wouldn't be any race condition because unaligned extent only works
> for ro feature now?

Isn't your patch proposing on writable partition?

> 
> > Is there any race condition between the address in extent_cache and the one in
> > dnode? I feel that we could synchronize it by locking its dnode block.
> 
> Thanks,

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-08-02 18:03       ` Jaegeuk Kim
  0 siblings, 0 replies; 25+ messages in thread
From: Jaegeuk Kim @ 2021-08-02 18:03 UTC (permalink / raw)
  To: Chao Yu; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 08/01, Chao Yu wrote:
> On 2021/7/31 3:20, Jaegeuk Kim wrote:
> > On 07/07, Chao Yu wrote:
> > > Currently, it only works in readonly format f2fs image.
> 
> There wouldn't be any race condition because unaligned extent only works
> for ro feature now?

Isn't your patch proposing on writable partition?

> 
> > Is there any race condition between the address in extent_cache and the one in
> > dnode? I feel that we could synchronize it by locking its dnode block.
> 
> Thanks,


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-08-02 18:03       ` [f2fs-dev] " Jaegeuk Kim
@ 2021-08-03  1:07         ` Chao Yu
  -1 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-08-03  1:07 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 2021/8/3 2:03, Jaegeuk Kim wrote:
> On 08/01, Chao Yu wrote:
>> On 2021/7/31 3:20, Jaegeuk Kim wrote:
>>> On 07/07, Chao Yu wrote:
>>>> Currently, it only works in readonly format f2fs image.
>>
>> There wouldn't be any race condition because unaligned extent only works
>> for ro feature now?
> 
> Isn't your patch proposing on writable partition?

Please check description in patch message, now it was designed only for
compression case w/ ro feature, let's check and support rw partition later.

Thanks,

> 
>>
>>> Is there any race condition between the address in extent_cache and the one in
>>> dnode? I feel that we could synchronize it by locking its dnode block.
>>
>> Thanks,

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-08-03  1:07         ` Chao Yu
  0 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-08-03  1:07 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 2021/8/3 2:03, Jaegeuk Kim wrote:
> On 08/01, Chao Yu wrote:
>> On 2021/7/31 3:20, Jaegeuk Kim wrote:
>>> On 07/07, Chao Yu wrote:
>>>> Currently, it only works in readonly format f2fs image.
>>
>> There wouldn't be any race condition because unaligned extent only works
>> for ro feature now?
> 
> Isn't your patch proposing on writable partition?

Please check description in patch message, now it was designed only for
compression case w/ ro feature, let's check and support rw partition later.

Thanks,

> 
>>
>>> Is there any race condition between the address in extent_cache and the one in
>>> dnode? I feel that we could synchronize it by locking its dnode block.
>>
>> Thanks,


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-08-03  1:07         ` [f2fs-dev] " Chao Yu
@ 2021-08-03  1:25           ` Jaegeuk Kim
  -1 siblings, 0 replies; 25+ messages in thread
From: Jaegeuk Kim @ 2021-08-03  1:25 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 08/03, Chao Yu wrote:
> On 2021/8/3 2:03, Jaegeuk Kim wrote:
> > On 08/01, Chao Yu wrote:
> > > On 2021/7/31 3:20, Jaegeuk Kim wrote:
> > > > On 07/07, Chao Yu wrote:
> > > > > Currently, it only works in readonly format f2fs image.
> > > 
> > > There wouldn't be any race condition because unaligned extent only works
> > > for ro feature now?
> > 
> > Isn't your patch proposing on writable partition?
> 
> Please check description in patch message, now it was designed only for
                                                 --
                                                 what do you refer "it"?

> compression case w/ ro feature, let's check and support rw partition later.

Quite confused the patch description and code changes as well. You added some
change with this as well which is for RW.

+       if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
+                       !f2fs_sb_has_readonly(sbi)) {

> 
> Thanks,
> 
> > 
> > > 
> > > > Is there any race condition between the address in extent_cache and the one in
> > > > dnode? I feel that we could synchronize it by locking its dnode block.
> > > 
> > > Thanks,

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-08-03  1:25           ` Jaegeuk Kim
  0 siblings, 0 replies; 25+ messages in thread
From: Jaegeuk Kim @ 2021-08-03  1:25 UTC (permalink / raw)
  To: Chao Yu; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 08/03, Chao Yu wrote:
> On 2021/8/3 2:03, Jaegeuk Kim wrote:
> > On 08/01, Chao Yu wrote:
> > > On 2021/7/31 3:20, Jaegeuk Kim wrote:
> > > > On 07/07, Chao Yu wrote:
> > > > > Currently, it only works in readonly format f2fs image.
> > > 
> > > There wouldn't be any race condition because unaligned extent only works
> > > for ro feature now?
> > 
> > Isn't your patch proposing on writable partition?
> 
> Please check description in patch message, now it was designed only for
                                                 --
                                                 what do you refer "it"?

> compression case w/ ro feature, let's check and support rw partition later.

Quite confused the patch description and code changes as well. You added some
change with this as well which is for RW.

+       if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
+                       !f2fs_sb_has_readonly(sbi)) {

> 
> Thanks,
> 
> > 
> > > 
> > > > Is there any race condition between the address in extent_cache and the one in
> > > > dnode? I feel that we could synchronize it by locking its dnode block.
> > > 
> > > Thanks,


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-08-03  1:25           ` [f2fs-dev] " Jaegeuk Kim
@ 2021-08-03  1:29             ` Chao Yu
  -1 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-08-03  1:29 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 2021/8/3 9:25, Jaegeuk Kim wrote:
> On 08/03, Chao Yu wrote:
>> On 2021/8/3 2:03, Jaegeuk Kim wrote:
>>> On 08/01, Chao Yu wrote:
>>>> On 2021/7/31 3:20, Jaegeuk Kim wrote:
>>>>> On 07/07, Chao Yu wrote:
>>>>>> Currently, it only works in readonly format f2fs image.
>>>>
>>>> There wouldn't be any race condition because unaligned extent only works
>>>> for ro feature now?
>>>
>>> Isn't your patch proposing on writable partition?
>>
>> Please check description in patch message, now it was designed only for
>                                                   --
>                                                   what do you refer "it"?
> 
>> compression case w/ ro feature, let's check and support rw partition later.
> 
> Quite confused the patch description and code changes as well. You added some
> change with this as well which is for RW.
> 
> +       if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
> +                       !f2fs_sb_has_readonly(sbi)) {

My bad, I've updated in my dev branch, but forgot to resend it...:

https://git.kernel.org/pub/scm/linux/kernel/git/chao/linux.git/commit/?h=dev&id=c3a40f6a186ba064f95432b308173d0a8fe375dc

Thanks,

> 
>>
>> Thanks,
>>
>>>
>>>>
>>>>> Is there any race condition between the address in extent_cache and the one in
>>>>> dnode? I feel that we could synchronize it by locking its dnode block.
>>>>
>>>> Thanks,

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-08-03  1:29             ` Chao Yu
  0 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-08-03  1:29 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 2021/8/3 9:25, Jaegeuk Kim wrote:
> On 08/03, Chao Yu wrote:
>> On 2021/8/3 2:03, Jaegeuk Kim wrote:
>>> On 08/01, Chao Yu wrote:
>>>> On 2021/7/31 3:20, Jaegeuk Kim wrote:
>>>>> On 07/07, Chao Yu wrote:
>>>>>> Currently, it only works in readonly format f2fs image.
>>>>
>>>> There wouldn't be any race condition because unaligned extent only works
>>>> for ro feature now?
>>>
>>> Isn't your patch proposing on writable partition?
>>
>> Please check description in patch message, now it was designed only for
>                                                   --
>                                                   what do you refer "it"?
> 
>> compression case w/ ro feature, let's check and support rw partition later.
> 
> Quite confused the patch description and code changes as well. You added some
> change with this as well which is for RW.
> 
> +       if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
> +                       !f2fs_sb_has_readonly(sbi)) {

My bad, I've updated in my dev branch, but forgot to resend it...:

https://git.kernel.org/pub/scm/linux/kernel/git/chao/linux.git/commit/?h=dev&id=c3a40f6a186ba064f95432b308173d0a8fe375dc

Thanks,

> 
>>
>> Thanks,
>>
>>>
>>>>
>>>>> Is there any race condition between the address in extent_cache and the one in
>>>>> dnode? I feel that we could synchronize it by locking its dnode block.
>>>>
>>>> Thanks,


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [RFC NO MERGE] f2fs: extent cache: support unaligned extent
  2021-08-03  1:25           ` [f2fs-dev] " Jaegeuk Kim
@ 2021-08-03  1:31             ` Chao Yu
  -1 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-08-03  1:31 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 2021/8/3 9:25, Jaegeuk Kim wrote:>> Please check description in patch message, now it was designed only for
>                                                   --
>                                                   what do you refer "it"?

Oh...I mean this feature I proposed: unaligned extent support.

Thanks,

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [f2fs-dev] [RFC NO MERGE] f2fs: extent cache: support unaligned extent
@ 2021-08-03  1:31             ` Chao Yu
  0 siblings, 0 replies; 25+ messages in thread
From: Chao Yu @ 2021-08-03  1:31 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 2021/8/3 9:25, Jaegeuk Kim wrote:>> Please check description in patch message, now it was designed only for
>                                                   --
>                                                   what do you refer "it"?

Oh...I mean this feature I proposed: unaligned extent support.

Thanks,


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2021-08-03  1:31 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-07  1:58 [RFC NO MERGE] f2fs: extent cache: support unaligned extent Chao Yu
2021-07-07  1:58 ` [f2fs-dev] " Chao Yu
2021-07-07  3:46 ` kernel test robot
2021-07-14  2:27 ` Chao Yu
2021-07-14  2:27   ` [f2fs-dev] " Chao Yu
2021-07-19 18:36 ` Jaegeuk Kim
2021-07-19 18:36   ` [f2fs-dev] " Jaegeuk Kim
2021-07-20  0:39   ` Chao Yu
2021-07-20  0:39     ` [f2fs-dev] " Chao Yu
2021-07-29  1:41     ` Chao Yu
2021-07-29  1:41       ` Chao Yu
2021-07-30 19:20 ` Jaegeuk Kim
2021-07-30 19:20   ` [f2fs-dev] " Jaegeuk Kim
2021-08-01  7:03   ` Chao Yu
2021-08-01  7:03     ` [f2fs-dev] " Chao Yu
2021-08-02 18:03     ` Jaegeuk Kim
2021-08-02 18:03       ` [f2fs-dev] " Jaegeuk Kim
2021-08-03  1:07       ` Chao Yu
2021-08-03  1:07         ` [f2fs-dev] " Chao Yu
2021-08-03  1:25         ` Jaegeuk Kim
2021-08-03  1:25           ` [f2fs-dev] " Jaegeuk Kim
2021-08-03  1:29           ` Chao Yu
2021-08-03  1:29             ` [f2fs-dev] " Chao Yu
2021-08-03  1:31           ` Chao Yu
2021-08-03  1:31             ` [f2fs-dev] " Chao Yu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.