linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks
@ 2020-12-09  8:43 Chao Yu
  2020-12-09  8:43 ` [PATCH RESEND v2 2/5] f2fs: compress: support compress level Chao Yu
                   ` (4 more replies)
  0 siblings, 5 replies; 11+ messages in thread
From: Chao Yu @ 2020-12-09  8:43 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao, Chao Yu

Support to use address space of inner inode to cache compressed block,
in order to improve cache hit ratio of random read.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 Documentation/filesystems/f2fs.rst |   3 +
 fs/f2fs/compress.c                 | 198 +++++++++++++++++++++++++++--
 fs/f2fs/data.c                     |  29 ++++-
 fs/f2fs/debug.c                    |  13 ++
 fs/f2fs/f2fs.h                     |  34 ++++-
 fs/f2fs/gc.c                       |   1 +
 fs/f2fs/inode.c                    |  21 ++-
 fs/f2fs/segment.c                  |   6 +-
 fs/f2fs/super.c                    |  19 ++-
 include/linux/f2fs_fs.h            |   1 +
 10 files changed, 305 insertions(+), 20 deletions(-)

diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index dae15c96e659..5fa45fd8e4af 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -268,6 +268,9 @@ compress_mode=%s	 Control file compression mode. This supports "fs" and "user"
 			 choosing the target file and the timing. The user can do manual
 			 compression/decompression on the compression enabled files using
 			 ioctls.
+compress_cache		 Support to use address space of a filesystem managed inode to
+			 cache compressed block, in order to improve cache hit ratio of
+			 random read.
 inlinecrypt		 When possible, encrypt/decrypt the contents of encrypted
 			 files using the blk-crypto framework rather than
 			 filesystem-layer encryption. This allows the use of
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 4bcbacfe3325..446dd41a7bad 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -12,9 +12,11 @@
 #include <linux/lzo.h>
 #include <linux/lz4.h>
 #include <linux/zstd.h>
+#include <linux/pagevec.h>
 
 #include "f2fs.h"
 #include "node.h"
+#include "segment.h"
 #include <trace/events/f2fs.h>
 
 static struct kmem_cache *cic_entry_slab;
@@ -721,25 +723,14 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 	return ret;
 }
 
-void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
+void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity)
 {
-	struct decompress_io_ctx *dic =
-			(struct decompress_io_ctx *)page_private(page);
-	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
 	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
 	const struct f2fs_compress_ops *cops =
 			f2fs_cops[fi->i_compress_algorithm];
 	int ret;
 	int i;
 
-	dec_page_count(sbi, F2FS_RD_DATA);
-
-	if (bio->bi_status || PageError(page))
-		dic->failed = true;
-
-	if (atomic_dec_return(&dic->pending_pages))
-		return;
-
 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
 				dic->cluster_size, fi->i_compress_algorithm);
 
@@ -797,6 +788,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
 	ret = cops->decompress_pages(dic);
 
 	if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
+		struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
 		u32 provided = le32_to_cpu(dic->cbuf->chksum);
 		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
 
@@ -830,6 +822,30 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
 		f2fs_free_dic(dic);
 }
 
+void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+						nid_t ino, block_t blkaddr);
+void f2fs_decompress_pages(struct bio *bio, struct page *page,
+						bool verity, unsigned int ofs)
+{
+	struct decompress_io_ctx *dic =
+			(struct decompress_io_ctx *)page_private(page);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+	block_t blkaddr;
+
+	dec_page_count(sbi, F2FS_RD_DATA);
+
+	if (bio->bi_status || PageError(page))
+		dic->failed = true;
+
+	blkaddr = SECTOR_TO_BLOCK(bio->bi_iter.bi_sector) + ofs;
+	f2fs_cache_compressed_page(sbi, page, dic->inode->i_ino, blkaddr);
+
+	if (atomic_dec_return(&dic->pending_pages))
+		return;
+
+	f2fs_do_decompress_pages(dic, verity);
+}
+
 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
 {
 	if (cc->cluster_idx == NULL_CLUSTER)
@@ -1600,6 +1616,164 @@ void f2fs_decompress_end_io(struct page **rpages,
 	}
 }
 
+const struct address_space_operations f2fs_compress_aops = {
+	.releasepage = f2fs_release_page,
+	.invalidatepage = f2fs_invalidate_page,
+};
+
+struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
+{
+	return sbi->compress_inode->i_mapping;
+}
+
+void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+	if (!sbi->compress_inode)
+		return;
+	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
+}
+
+void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+						nid_t ino, block_t blkaddr)
+{
+	struct page *cpage;
+	int ret;
+	struct sysinfo si;
+	unsigned long free_ram, avail_ram;
+
+	if (!test_opt(sbi, COMPRESS_CACHE))
+		return;
+
+	si_meminfo(&si);
+	free_ram = si.freeram;
+	avail_ram = si.totalram - si.totalhigh;
+
+	/* free memory is lower than watermark, deny caching compress page */
+	if (free_ram <= sbi->compress_watermark / 100 * avail_ram)
+		return;
+
+	/* cached page count exceed threshold, deny caching compress page */
+	if (COMPRESS_MAPPING(sbi)->nrpages >=
+			free_ram / 100 * sbi->compress_percent)
+		return;
+
+	cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
+	if (cpage) {
+		f2fs_put_page(cpage, 0);
+		return;
+	}
+
+	cpage = alloc_page(__GFP_IO);
+	if (!cpage)
+		return;
+
+	ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
+						blkaddr, GFP_NOFS);
+	if (ret) {
+		f2fs_put_page(cpage, 0);
+		return;
+	}
+
+	memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
+	SetPageUptodate(cpage);
+
+	f2fs_set_page_private(cpage, ino);
+
+	f2fs_put_page(cpage, 1);
+}
+
+void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+								block_t blkaddr)
+{
+	struct page *cpage;
+
+	if (!test_opt(sbi, COMPRESS_CACHE))
+		return;
+
+	cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
+				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
+	if (cpage) {
+		if (PageUptodate(cpage)) {
+			atomic_inc(&sbi->compress_page_hit);
+			memcpy(page_address(page),
+				page_address(cpage), PAGE_SIZE);
+			SetPageUptodate(page);
+		}
+		f2fs_put_page(cpage, 1);
+	}
+}
+
+void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
+{
+	struct address_space *mapping = sbi->compress_inode->i_mapping;
+	struct pagevec pvec;
+	pgoff_t index = 0;
+	pgoff_t end = MAX_BLKADDR(sbi);
+
+	pagevec_init(&pvec);
+
+	do {
+		unsigned int nr_pages;
+		int i;
+
+		nr_pages = pagevec_lookup_range(&pvec, mapping,
+						&index, end - 1);
+		if (!nr_pages)
+			break;
+
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			if (page->index > end)
+				break;
+
+			lock_page(page);
+			if (page->mapping != mapping) {
+				unlock_page(page);
+				continue;
+			}
+
+			if (ino != page_private(page)) {
+				unlock_page(page);
+				continue;
+			}
+
+			generic_error_remove_page(mapping, page);
+			unlock_page(page);
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	} while (index < end);
+}
+
+int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
+{
+	struct inode *inode;
+
+	if (!test_opt(sbi, COMPRESS_CACHE))
+		return 0;
+
+	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
+	sbi->compress_inode = inode;
+
+	sbi->compress_percent = COMPRESS_PERCENT;
+	sbi->compress_watermark = COMPRESS_WATERMARK;
+
+	atomic_set(&sbi->compress_page_hit, 0);
+
+	return 0;
+}
+
+void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
+{
+	if (!sbi->compress_inode)
+		return;
+	iput(sbi->compress_inode);
+	sbi->compress_inode = NULL;
+}
+
 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
 {
 	dev_t dev = sbi->sb->s_bdev->bd_dev;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index aa34d620bec9..6787a7a03e86 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -133,17 +133,21 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
 	struct page *page;
 	struct bio_vec *bv;
 	struct bvec_iter_all iter_all;
+	unsigned int ofs = 0;
 
 	bio_for_each_segment_all(bv, bio, iter_all) {
 		page = bv->bv_page;
 
 #ifdef CONFIG_F2FS_FS_COMPRESSION
 		if (compr && f2fs_is_compressed_page(page)) {
-			f2fs_decompress_pages(bio, page, verity);
+			f2fs_decompress_pages(bio, page, verity, ofs);
+			ofs++;
 			continue;
 		}
-		if (verity)
+		if (verity) {
+			ofs++;
 			continue;
+		}
 #endif
 
 		/* PG_error was set if any post_read step failed */
@@ -156,6 +160,7 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
 		}
 		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
 		unlock_page(page);
+		ofs++;
 	}
 }
 
@@ -1421,9 +1426,11 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
 	old_blkaddr = dn->data_blkaddr;
 	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
 				&sum, seg_type, NULL);
-	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
 		invalidate_mapping_pages(META_MAPPING(sbi),
 					old_blkaddr, old_blkaddr);
+		f2fs_invalidate_compress_page(sbi, old_blkaddr);
+	}
 	f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
 
 	/*
@@ -2261,6 +2268,22 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
 		blkaddr = data_blkaddr(dn.inode, dn.node_page,
 						dn.ofs_in_node + i + 1);
 
+		f2fs_load_compressed_page(sbi, page, blkaddr);
+		if (PageUptodate(page)) {
+			if (!atomic_dec_return(&dic->pending_pages)) {
+				bool verity =
+					f2fs_need_verity(inode, start_idx);
+
+				f2fs_do_decompress_pages(dic, verity);
+				if (verity) {
+					f2fs_verify_pages(dic->rpages,
+							dic->cluster_size);
+					f2fs_free_dic(dic);
+				}
+			}
+			continue;
+		}
+
 		if (bio && (!page_is_mergeable(sbi, bio,
 					*last_block_in_bio, blkaddr) ||
 		    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 197c914119da..f1f8714066c5 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -145,6 +145,12 @@ static void update_general_status(struct f2fs_sb_info *sbi)
 		si->node_pages = NODE_MAPPING(sbi)->nrpages;
 	if (sbi->meta_inode)
 		si->meta_pages = META_MAPPING(sbi)->nrpages;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (sbi->compress_inode) {
+		si->compress_pages = COMPRESS_MAPPING(sbi)->nrpages;
+		si->compress_page_hit = atomic_read(&sbi->compress_page_hit);
+	}
+#endif
 	si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
 	si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
 	si->sits = MAIN_SEGS(sbi);
@@ -299,6 +305,12 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
 		unsigned npages = META_MAPPING(sbi)->nrpages;
 		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
 	}
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (sbi->compress_inode) {
+		unsigned npages = COMPRESS_MAPPING(sbi)->nrpages;
+		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+	}
+#endif
 }
 
 static int stat_show(struct seq_file *s, void *v)
@@ -461,6 +473,7 @@ static int stat_show(struct seq_file *s, void *v)
 			"volatile IO: %4d (Max. %4d)\n",
 			   si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
 			   si->vw_cnt, si->max_vw_cnt);
+		seq_printf(s, "  - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
 		seq_printf(s, "  - nodes: %4d in %4d\n",
 			   si->ndirty_node, si->node_pages);
 		seq_printf(s, "  - dents: %4d in dirs:%4d (%4d)\n",
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 7364d453783f..0ff8b18eda05 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -97,6 +97,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
 #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
 #define F2FS_MOUNT_NORECOVERY		0x04000000
 #define F2FS_MOUNT_ATGC			0x08000000
+#define F2FS_MOUNT_COMPRESS_CACHE	0x10000000
 
 #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
 #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
@@ -1298,6 +1299,9 @@ enum compress_flag {
 	COMPRESS_MAX_FLAG,
 };
 
+#define	COMPRESS_WATERMARK			20
+#define	COMPRESS_PERCENT			20
+
 #define COMPRESS_DATA_RESERVED_SIZE		4
 struct compress_data {
 	__le32 clen;			/* compressed data size */
@@ -1571,6 +1575,11 @@ struct f2fs_sb_info {
 #ifdef CONFIG_F2FS_FS_COMPRESSION
 	struct kmem_cache *page_array_slab;	/* page array entry */
 	unsigned int page_array_slab_size;	/* default page array slab size */
+
+	struct inode *compress_inode;		/* cache compressed blocks */
+	unsigned int compress_percent;		/* cache page percentage */
+	unsigned int compress_watermark;	/* cache page watermark */
+	atomic_t compress_page_hit;		/* cache hit count */
 #endif
 };
 
@@ -3536,7 +3545,8 @@ struct f2fs_stat_info {
 	unsigned int bimodal, avg_vblocks;
 	int util_free, util_valid, util_invalid;
 	int rsvd_segs, overp_segs;
-	int dirty_count, node_pages, meta_pages;
+	int dirty_count, node_pages, meta_pages, compress_pages;
+	int compress_page_hit;
 	int prefree_count, call_count, cp_count, bg_cp_count;
 	int tot_segs, node_segs, data_segs, free_segs, free_secs;
 	int bg_node_segs, bg_data_segs;
@@ -3874,7 +3884,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
 bool f2fs_is_compress_backend_ready(struct inode *inode);
 int f2fs_init_compress_mempool(void);
 void f2fs_destroy_compress_mempool(void);
-void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
+void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity);
+void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity, unsigned int ofs);
 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
@@ -3893,10 +3904,19 @@ void f2fs_decompress_end_io(struct page **rpages,
 int f2fs_init_compress_ctx(struct compress_ctx *cc);
 void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
+void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
 int __init f2fs_init_compress_cache(void);
 void f2fs_destroy_compress_cache(void);
+struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
+void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
+void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+						nid_t ino, block_t blkaddr);
+void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+								block_t blkaddr);
+void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
 #else
 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
@@ -3913,10 +3933,20 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
 }
 static inline int f2fs_init_compress_mempool(void) { return 0; }
 static inline void f2fs_destroy_compress_mempool(void) { }
+static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
+static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
 static inline int __init f2fs_init_compress_cache(void) { return 0; }
 static inline void f2fs_destroy_compress_cache(void) { }
+static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
+				block_t blkaddr) { }
+static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
+				struct page *page, nid_t ino, block_t blkaddr) { }
+static inline void f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
+				struct page *page, block_t blkaddr) { }
+static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
+							nid_t ino) { }
 #endif
 
 static inline void set_compress_context(struct inode *inode)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 3ef84e6ded41..43919a3ae6a6 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1225,6 +1225,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
 	f2fs_put_page(mpage, 1);
 	invalidate_mapping_pages(META_MAPPING(fio.sbi),
 				fio.old_blkaddr, fio.old_blkaddr);
+	f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
 
 	set_page_dirty(fio.encrypted_page);
 	if (clear_page_dirty_for_io(fio.encrypted_page))
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 349d9cb933ee..f030b9b79202 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -18,6 +18,10 @@
 
 #include <trace/events/f2fs.h>
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+extern const struct address_space_operations f2fs_compress_aops;
+#endif
+
 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
 {
 	if (is_inode_flag_set(inode, FI_NEW_INODE))
@@ -494,6 +498,11 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
 	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
 		goto make_now;
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (ino == F2FS_COMPRESS_INO(sbi))
+		goto make_now;
+#endif
+
 	ret = do_read_inode(inode);
 	if (ret)
 		goto bad_inode;
@@ -504,6 +513,12 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
 	} else if (ino == F2FS_META_INO(sbi)) {
 		inode->i_mapping->a_ops = &f2fs_meta_aops;
 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+		inode->i_mapping->a_ops = &f2fs_compress_aops;
+#endif
+		mapping_set_gfp_mask(inode->i_mapping,
+			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
 	} else if (S_ISREG(inode->i_mode)) {
 		inode->i_op = &f2fs_file_inode_operations;
 		inode->i_fop = &f2fs_file_operations;
@@ -722,8 +737,12 @@ void f2fs_evict_inode(struct inode *inode)
 	trace_f2fs_evict_inode(inode);
 	truncate_inode_pages_final(&inode->i_data);
 
+	if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
+		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
+
 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
-			inode->i_ino == F2FS_META_INO(sbi))
+			inode->i_ino == F2FS_META_INO(sbi) ||
+			inode->i_ino == F2FS_COMPRESS_INO(sbi))
 		goto out_clear;
 
 	f2fs_bug_on(sbi, get_dirty_pages(inode));
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index deca74cb17df..d8570b0359f5 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2305,6 +2305,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
 		return;
 
 	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
+	f2fs_invalidate_compress_page(sbi, addr);
 
 	/* add it into sit main buffer */
 	down_write(&sit_i->sentry_lock);
@@ -3432,9 +3433,11 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
 reallocate:
 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
 			&fio->new_blkaddr, sum, type, fio);
-	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
 		invalidate_mapping_pages(META_MAPPING(fio->sbi),
 					fio->old_blkaddr, fio->old_blkaddr);
+		f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
+	}
 
 	/* writeout dirty page into bdev */
 	f2fs_submit_page_write(fio);
@@ -3607,6 +3610,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
 		invalidate_mapping_pages(META_MAPPING(sbi),
 					old_blkaddr, old_blkaddr);
+		f2fs_invalidate_compress_page(sbi, old_blkaddr);
 		if (!from_gc)
 			update_segment_mtime(sbi, old_blkaddr, 0);
 		update_sit_entry(sbi, old_blkaddr, -1);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 8a82721b69ef..50e749169841 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -148,6 +148,7 @@ enum {
 	Opt_compress_extension,
 	Opt_compress_chksum,
 	Opt_compress_mode,
+	Opt_compress_cache,
 	Opt_atgc,
 	Opt_err,
 };
@@ -218,6 +219,7 @@ static match_table_t f2fs_tokens = {
 	{Opt_compress_extension, "compress_extension=%s"},
 	{Opt_compress_chksum, "compress_chksum"},
 	{Opt_compress_mode, "compress_mode=%s"},
+	{Opt_compress_cache, "compress_cache"},
 	{Opt_atgc, "atgc"},
 	{Opt_err, NULL},
 };
@@ -955,12 +957,16 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
 			}
 			kfree(name);
 			break;
+		case Opt_compress_cache:
+			set_opt(sbi, COMPRESS_CACHE);
+			break;
 #else
 		case Opt_compress_algorithm:
 		case Opt_compress_log_size:
 		case Opt_compress_extension:
 		case Opt_compress_chksum:
 		case Opt_compress_mode:
+		case Opt_compress_cache:
 			f2fs_info(sbi, "compression options not supported");
 			break;
 #endif
@@ -1285,6 +1291,8 @@ static void f2fs_put_super(struct super_block *sb)
 
 	f2fs_bug_on(sbi, sbi->fsync_node_num);
 
+	f2fs_destroy_compress_inode(sbi);
+
 	iput(sbi->node_inode);
 	sbi->node_inode = NULL;
 
@@ -1554,6 +1562,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
 		seq_printf(seq, ",compress_mode=%s", "fs");
 	else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
 		seq_printf(seq, ",compress_mode=%s", "user");
+
+	if (test_opt(sbi, COMPRESS_CACHE))
+		seq_puts(seq, ",compress_cache");
 }
 
 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
@@ -3759,10 +3770,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 		goto free_node_inode;
 	}
 
-	err = f2fs_register_sysfs(sbi);
+	err = f2fs_init_compress_inode(sbi);
 	if (err)
 		goto free_root_inode;
 
+	err = f2fs_register_sysfs(sbi);
+	if (err)
+		goto free_compress_inode;
+
 #ifdef CONFIG_QUOTA
 	/* Enable quota usage during mount */
 	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
@@ -3896,6 +3911,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 	/* evict some inodes being cached by GC */
 	evict_inodes(sb);
 	f2fs_unregister_sysfs(sbi);
+free_compress_inode:
+	f2fs_destroy_compress_inode(sbi);
 free_root_inode:
 	dput(sb->s_root);
 	sb->s_root = NULL;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 7dc2a06cf19a..55be7afeee90 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -34,6 +34,7 @@
 #define F2FS_ROOT_INO(sbi)	((sbi)->root_ino_num)
 #define F2FS_NODE_INO(sbi)	((sbi)->node_ino_num)
 #define F2FS_META_INO(sbi)	((sbi)->meta_ino_num)
+#define F2FS_COMPRESS_INO(sbi)	(NM_I(sbi)->max_nid)
 
 #define F2FS_MAX_QUOTAS		3
 
-- 
2.29.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH RESEND v2 2/5] f2fs: compress: support compress level
  2020-12-09  8:43 [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
@ 2020-12-09  8:43 ` Chao Yu
  2020-12-09  8:43 ` [PATCH RESEND v2 3/5] f2fs: compress: deny setting unsupported compress algorithm Chao Yu
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 11+ messages in thread
From: Chao Yu @ 2020-12-09  8:43 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao, Chao Yu

Expand 'compress_algorithm' mount option to accept parameter as format of
<algorithm>:<level>, by this way, it gives a way to allow user to do more
specified config on lz4 and zstd compression level, then f2fs compression
can provide higher compress ratio.

In order to set compress level for lz4 algorithm, it needs to set
CONFIG_LZ4HC_COMPRESS and CONFIG_F2FS_FS_LZ4HC config to enable lz4hc
compress algorithm.

CR and performance number on lz4/lz4hc algorithm:

dd if=enwik9 of=compressed_file conv=fsync

Original blocks:	244382

			lz4			lz4hc-9
compressed blocks	170647			163270
compress ratio		69.8%			66.8%
speed			16.4207 s, 60.9 MB/s	26.7299 s, 37.4 MB/s

compress ratio = after / before

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 Documentation/filesystems/f2fs.rst |  5 +++
 fs/f2fs/Kconfig                    | 10 +++++
 fs/f2fs/compress.c                 | 41 +++++++++++++++--
 fs/f2fs/f2fs.h                     |  9 ++++
 fs/f2fs/super.c                    | 71 +++++++++++++++++++++++++++++-
 include/linux/f2fs_fs.h            |  3 ++
 6 files changed, 134 insertions(+), 5 deletions(-)

diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index 5fa45fd8e4af..cd1e5b826ba3 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -249,6 +249,11 @@ checkpoint=%s[:%u[%]]	 Set to "disable" to turn off checkpointing. Set to "enabl
 			 This space is reclaimed once checkpoint=enable.
 compress_algorithm=%s	 Control compress algorithm, currently f2fs supports "lzo",
 			 "lz4", "zstd" and "lzo-rle" algorithm.
+compress_algorithm=%s:%d Control compress algorithm and its compress level, now, only
+			 "lz4" and "zstd" support compress level config.
+			 algorithm	level range
+			 lz4		3 - 16
+			 zstd		1 - 22
 compress_log_size=%u	 Support configuring compress cluster size, the size will
 			 be 4KB * (1 << %u), 16KB is minimum size, also it's
 			 default size.
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index d13c5c6a9787..63c1fc1a0e3b 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -119,6 +119,16 @@ config F2FS_FS_LZ4
 	help
 	  Support LZ4 compress algorithm, if unsure, say Y.
 
+config F2FS_FS_LZ4HC
+	bool "LZ4HC compression support"
+	depends on F2FS_FS_COMPRESSION
+	depends on F2FS_FS_LZ4
+	select LZ4HC_COMPRESS
+	default y
+	help
+	  Support LZ4HC compress algorithm, LZ4HC has compatible on-disk
+	  layout with LZ4, if unsure, say Y.
+
 config F2FS_FS_ZSTD
 	bool "ZSTD compression support"
 	depends on F2FS_FS_COMPRESSION
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 446dd41a7bad..8840f5f41bf1 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -254,8 +254,14 @@ static const struct f2fs_compress_ops f2fs_lzo_ops = {
 #ifdef CONFIG_F2FS_FS_LZ4
 static int lz4_init_compress_ctx(struct compress_ctx *cc)
 {
-	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
-				LZ4_MEM_COMPRESS, GFP_NOFS);
+	unsigned int size = LZ4_MEM_COMPRESS;
+
+#ifdef CONFIG_F2FS_FS_LZ4HC
+	if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
+		size = LZ4HC_MEM_COMPRESS;
+#endif
+
+	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
 	if (!cc->private)
 		return -ENOMEM;
 
@@ -274,10 +280,34 @@ static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
 	cc->private = NULL;
 }
 
+#ifdef CONFIG_F2FS_FS_LZ4HC
+static int lz4hc_compress_pages(struct compress_ctx *cc)
+{
+	unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
+						COMPRESS_LEVEL_OFFSET;
+	int len;
+
+	if (level)
+		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
+					cc->clen, level, cc->private);
+	else
+		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
+						cc->clen, cc->private);
+	if (!len)
+		return -EAGAIN;
+
+	cc->clen = len;
+	return 0;
+}
+#endif
+
 static int lz4_compress_pages(struct compress_ctx *cc)
 {
 	int len;
 
+#ifdef CONFIG_F2FS_FS_LZ4HC
+	return lz4hc_compress_pages(cc);
+#endif
 	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 						cc->clen, cc->private);
 	if (!len)
@@ -327,8 +357,13 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
 	ZSTD_CStream *stream;
 	void *workspace;
 	unsigned int workspace_size;
+	unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
+						COMPRESS_LEVEL_OFFSET;
+
+	if (!level)
+		level = F2FS_ZSTD_DEFAULT_CLEVEL;
 
-	params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
+	params = ZSTD_getParams(level, cc->rlen, 0);
 	workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
 
 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 0ff8b18eda05..fbaef39e51df 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -147,6 +147,7 @@ struct f2fs_mount_info {
 	/* For compression */
 	unsigned char compress_algorithm;	/* algorithm type */
 	unsigned char compress_log_size;	/* cluster log size */
+	unsigned char compress_level;		/* compress level */
 	bool compress_chksum;			/* compressed data chksum */
 	unsigned char compress_ext_cnt;		/* extension count */
 	int compress_mode;			/* compression mode */
@@ -736,6 +737,7 @@ struct f2fs_inode_info {
 	atomic_t i_compr_blocks;		/* # of compressed blocks */
 	unsigned char i_compress_algorithm;	/* algorithm type */
 	unsigned char i_log_cluster_size;	/* log of cluster size */
+	unsigned char i_compress_level;		/* compress level (lz4hc,zstd) */
 	unsigned short i_compress_flag;		/* compress flag */
 	unsigned int i_cluster_size;		/* cluster size */
 };
@@ -1314,6 +1316,8 @@ struct compress_data {
 
 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
 
+#define	COMPRESS_LEVEL_OFFSET	8
+
 /* compress context */
 struct compress_ctx {
 	struct inode *inode;		/* inode the context belong to */
@@ -3962,6 +3966,11 @@ static inline void set_compress_context(struct inode *inode)
 				1 << COMPRESS_CHKSUM : 0;
 	F2FS_I(inode)->i_cluster_size =
 			1 << F2FS_I(inode)->i_log_cluster_size;
+	if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
+			F2FS_OPTION(sbi).compress_level)
+		F2FS_I(inode)->i_compress_flag |=
+				F2FS_OPTION(sbi).compress_level <<
+				COMPRESS_LEVEL_OFFSET;
 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
 	set_inode_flag(inode, FI_COMPRESSED_FILE);
 	stat_inc_compr_inode(inode);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 50e749169841..7c5c880a97be 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -25,6 +25,8 @@
 #include <linux/quota.h>
 #include <linux/unicode.h>
 #include <linux/part_stat.h>
+#include <linux/zstd.h>
+#include <linux/lz4.h>
 
 #include "f2fs.h"
 #include "node.h"
@@ -466,6 +468,56 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
 	return 0;
 }
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+static int f2fs_compress_set_level(struct f2fs_sb_info *sbi, const char *str,
+						int type)
+{
+	unsigned int level;
+	int len;
+
+	if (type == COMPRESS_LZ4)
+		len = 3;
+	else if (type == COMPRESS_ZSTD)
+		len = 4;
+	else
+		return 0;
+
+	if (strlen(str) == len)
+		return 0;
+
+	str += len;
+
+	if (str[0] != ':') {
+		f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+		return -EINVAL;
+	}
+	if (kstrtouint(str + 1, 10, &level))
+		return -EINVAL;
+	if (type == COMPRESS_LZ4) {
+#ifdef CONFIG_F2FS_FS_LZ4HC
+		if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
+			f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
+			return -EINVAL;
+		}
+#else
+		f2fs_info(sbi, "doesn't support lz4hc compression");
+		return 0;
+#endif
+	} else if (type == COMPRESS_ZSTD) {
+#ifdef CONFIG_F2FS_FS_ZSTD
+		if (!level || level > ZSTD_maxCLevel()) {
+			f2fs_info(sbi, "invalid zstd compress level: %d", level);
+			return -EINVAL;
+		}
+#else
+		f2fs_info(sbi, "doesn't support zstd compression");
+#endif
+	}
+	F2FS_OPTION(sbi).compress_level = level;
+	return 0;
+}
+#endif
+
 static int parse_options(struct super_block *sb, char *options, bool is_remount)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -886,10 +938,22 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
 			if (!strcmp(name, "lzo")) {
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_LZO;
-			} else if (!strcmp(name, "lz4")) {
+			} else if (!strncmp(name, "lz4", 3)) {
+				ret = f2fs_compress_set_level(sbi, name,
+								COMPRESS_LZ4);
+				if (ret) {
+					kfree(name);
+					return -EINVAL;
+				}
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_LZ4;
-			} else if (!strcmp(name, "zstd")) {
+			} else if (!strncmp(name, "zstd", 4)) {
+				ret = f2fs_compress_set_level(sbi, name,
+								COMPRESS_ZSTD);
+				if (ret) {
+					kfree(name);
+					return -EINVAL;
+				}
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_ZSTD;
 			} else if (!strcmp(name, "lzo-rle")) {
@@ -1547,6 +1611,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
 	}
 	seq_printf(seq, ",compress_algorithm=%s", algtype);
 
+	if (F2FS_OPTION(sbi).compress_level)
+		seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
+
 	seq_printf(seq, ",compress_log_size=%u",
 			F2FS_OPTION(sbi).compress_log_size);
 
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 55be7afeee90..2dcc63fe8494 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -275,6 +275,9 @@ struct f2fs_inode {
 			__u8 i_compress_algorithm;	/* compress algorithm */
 			__u8 i_log_cluster_size;	/* log of cluster size */
 			__le16 i_compress_flag;		/* compress flag */
+						/* 0 bit: chksum flag
+						 * [10,15] bits: compress level
+						 */
 			__le32 i_extra_end[0];	/* for attribute size calculation */
 		} __packed;
 		__le32 i_addr[DEF_ADDRS_PER_INODE];	/* Pointers to data blocks */
-- 
2.29.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH RESEND v2 3/5] f2fs: compress: deny setting unsupported compress algorithm
  2020-12-09  8:43 [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
  2020-12-09  8:43 ` [PATCH RESEND v2 2/5] f2fs: compress: support compress level Chao Yu
@ 2020-12-09  8:43 ` Chao Yu
  2020-12-09  8:43 ` [PATCH RESEND v2 4/5] f2fs: introduce a new per-sb directory in sysfs Chao Yu
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 11+ messages in thread
From: Chao Yu @ 2020-12-09  8:43 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao, Chao Yu

If kernel doesn't support certain kinds of compress algorithm, deny to set
them as compress algorithm of f2fs via 'compress_algorithm=%s' mount option.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 fs/f2fs/super.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7c5c880a97be..fe077ef88768 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -936,9 +936,14 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
 			if (!name)
 				return -ENOMEM;
 			if (!strcmp(name, "lzo")) {
+#ifdef CONFIG_F2FS_FS_LZO
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_LZO;
+#else
+				f2fs_info(sbi, "kernel doesn't support lzo compression");
+#endif
 			} else if (!strncmp(name, "lz4", 3)) {
+#ifdef CONFIG_F2FS_FS_LZ4
 				ret = f2fs_compress_set_level(sbi, name,
 								COMPRESS_LZ4);
 				if (ret) {
@@ -947,7 +952,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
 				}
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_LZ4;
+#else
+				f2fs_info(sbi, "kernel doesn't support lz4 compression");
+#endif
 			} else if (!strncmp(name, "zstd", 4)) {
+#ifdef CONFIG_F2FS_FS_ZSTD
 				ret = f2fs_compress_set_level(sbi, name,
 								COMPRESS_ZSTD);
 				if (ret) {
@@ -956,9 +965,16 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
 				}
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_ZSTD;
+#else
+				f2fs_info(sbi, "kernel doesn't support zstd compression");
+#endif
 			} else if (!strcmp(name, "lzo-rle")) {
+#ifdef CONFIG_F2FS_FS_LZORLE
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_LZORLE;
+#else
+				f2fs_info(sbi, "kernel doesn't support lzorle compression");
+#endif
 			} else {
 				kfree(name);
 				return -EINVAL;
-- 
2.29.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH RESEND v2 4/5] f2fs: introduce a new per-sb directory in sysfs
  2020-12-09  8:43 [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
  2020-12-09  8:43 ` [PATCH RESEND v2 2/5] f2fs: compress: support compress level Chao Yu
  2020-12-09  8:43 ` [PATCH RESEND v2 3/5] f2fs: compress: deny setting unsupported compress algorithm Chao Yu
@ 2020-12-09  8:43 ` Chao Yu
  2020-12-09  8:43 ` [PATCH RESEND v2 5/5] f2fs: introduce sb_status sysfs node Chao Yu
  2020-12-10  1:56 ` [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
  4 siblings, 0 replies; 11+ messages in thread
From: Chao Yu @ 2020-12-09  8:43 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao, Chao Yu

Add a new directory 'stat' in path of /sys/fs/f2fs/<devname>/, later
we can add new readonly stat sysfs file into this directory, it will
make <devname> directory less mess.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 fs/f2fs/f2fs.h  |  5 +++-
 fs/f2fs/sysfs.c | 69 +++++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 68 insertions(+), 6 deletions(-)

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index fbaef39e51df..cb94f650ec3d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1549,9 +1549,12 @@ struct f2fs_sb_info {
 	unsigned int node_io_flag;
 
 	/* For sysfs suppport */
-	struct kobject s_kobj;
+	struct kobject s_kobj;			/* /sys/fs/f2fs/<devname> */
 	struct completion s_kobj_unregister;
 
+	struct kobject s_stat_kobj;		/* /sys/fs/f2fs/<devname>/stat */
+	struct completion s_stat_kobj_unregister;
+
 	/* For shrinker support */
 	struct list_head s_list;
 	int s_ndevs;				/* number of devices */
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 989a649cfa8b..ebca0b4961e8 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -711,6 +711,11 @@ static struct attribute *f2fs_feat_attrs[] = {
 };
 ATTRIBUTE_GROUPS(f2fs_feat);
 
+static struct attribute *f2fs_stat_attrs[] = {
+	NULL,
+};
+ATTRIBUTE_GROUPS(f2fs_stat);
+
 static const struct sysfs_ops f2fs_attr_ops = {
 	.show	= f2fs_attr_show,
 	.store	= f2fs_attr_store,
@@ -739,6 +744,44 @@ static struct kobject f2fs_feat = {
 	.kset	= &f2fs_kset,
 };
 
+static ssize_t f2fs_stat_attr_show(struct kobject *kobj,
+				struct attribute *attr, char *buf)
+{
+	struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+								s_stat_kobj);
+	struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+	return a->show ? a->show(a, sbi, buf) : 0;
+}
+
+static ssize_t f2fs_stat_attr_store(struct kobject *kobj, struct attribute *attr,
+						const char *buf, size_t len)
+{
+	struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+								s_stat_kobj);
+	struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+	return a->store ? a->store(a, sbi, buf, len) : 0;
+}
+
+static void f2fs_stat_kobj_release(struct kobject *kobj)
+{
+	struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+								s_stat_kobj);
+	complete(&sbi->s_stat_kobj_unregister);
+}
+
+static const struct sysfs_ops f2fs_stat_attr_ops = {
+	.show	= f2fs_stat_attr_show,
+	.store	= f2fs_stat_attr_store,
+};
+
+static struct kobj_type f2fs_stat_ktype = {
+	.default_groups = f2fs_stat_groups,
+	.sysfs_ops	= &f2fs_stat_attr_ops,
+	.release	= f2fs_stat_kobj_release,
+};
+
 static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
 						void *offset)
 {
@@ -945,11 +988,15 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
 	init_completion(&sbi->s_kobj_unregister);
 	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL,
 				"%s", sb->s_id);
-	if (err) {
-		kobject_put(&sbi->s_kobj);
-		wait_for_completion(&sbi->s_kobj_unregister);
-		return err;
-	}
+	if (err)
+		goto put_sb_kobj;
+
+	sbi->s_stat_kobj.kset = &f2fs_kset;
+	init_completion(&sbi->s_stat_kobj_unregister);
+	err = kobject_init_and_add(&sbi->s_stat_kobj, &f2fs_stat_ktype,
+						&sbi->s_kobj, "stat");
+	if (err)
+		goto put_stat_kobj;
 
 	if (f2fs_proc_root)
 		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
@@ -965,6 +1012,13 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
 				victim_bits_seq_show, sb);
 	}
 	return 0;
+put_stat_kobj:
+	kobject_put(&sbi->s_stat_kobj);
+	wait_for_completion(&sbi->s_stat_kobj_unregister);
+put_sb_kobj:
+	kobject_put(&sbi->s_kobj);
+	wait_for_completion(&sbi->s_kobj_unregister);
+	return err;
 }
 
 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
@@ -976,6 +1030,11 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
 		remove_proc_entry("victim_bits", sbi->s_proc);
 		remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
 	}
+
+	kobject_del(&sbi->s_stat_kobj);
+	kobject_put(&sbi->s_stat_kobj);
+	wait_for_completion(&sbi->s_stat_kobj_unregister);
+
 	kobject_del(&sbi->s_kobj);
 	kobject_put(&sbi->s_kobj);
 	wait_for_completion(&sbi->s_kobj_unregister);
-- 
2.29.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH RESEND v2 5/5] f2fs: introduce sb_status sysfs node
  2020-12-09  8:43 [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
                   ` (2 preceding siblings ...)
  2020-12-09  8:43 ` [PATCH RESEND v2 4/5] f2fs: introduce a new per-sb directory in sysfs Chao Yu
@ 2020-12-09  8:43 ` Chao Yu
  2020-12-09 16:04   ` Jaegeuk Kim
  2020-12-10  1:56 ` [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
  4 siblings, 1 reply; 11+ messages in thread
From: Chao Yu @ 2020-12-09  8:43 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao, Chao Yu

Introduce /sys/fs/f2fs/<devname>/stat/sb_status to show superblock
status in real time as below:

IS_DIRTY:		no
IS_CLOSE:		no
IS_SHUTDOWN:		no
IS_RECOVERED:		no
IS_RESIZEFS:		no
NEED_FSCK:		no
POR_DOING:		no
NEED_SB_WRITE:		no
NEED_CP:		no
CP_DISABLED:		no
CP_DISABLED_QUICK:	no
QUOTA_NEED_FLUSH:	no
QUOTA_SKIP_FLUSH:	no
QUOTA_NEED_REPAIR:	no

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 Documentation/ABI/testing/sysfs-fs-f2fs |  5 ++++
 fs/f2fs/sysfs.c                         | 36 +++++++++++++++++++++++++
 2 files changed, 41 insertions(+)

diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 3dfee94e0618..57ab839dc3a2 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -377,3 +377,8 @@ Description:	This gives a control to limit the bio size in f2fs.
 		Default is zero, which will follow underlying block layer limit,
 		whereas, if it has a certain bytes value, f2fs won't submit a
 		bio larger than that size.
+
+What:		/sys/fs/f2fs/<disk>/stat/sb_status
+Date:		December 2020
+Contact:	"Chao Yu" <yuchao0@huawei.com>
+Description:	Show status of f2fs superblock in real time.
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index ebca0b4961e8..1b85e6d16a94 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -101,6 +101,40 @@ static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
 				sbi->sectors_written_start) >> 1)));
 }
 
+#define	SB_STATUS(s)	(s ? "yes" : "no")
+static ssize_t sb_status_show(struct f2fs_attr *a,
+		struct f2fs_sb_info *sbi, char *buf)
+{
+	return sprintf(buf, "IS_DIRTY:		%s\n"
+				"IS_CLOSE:		%s\n"
+				"IS_SHUTDOWN:		%s\n"
+				"IS_RECOVERED:		%s\n"
+				"IS_RESIZEFS:		%s\n"
+				"NEED_FSCK:		%s\n"
+				"POR_DOING:		%s\n"
+				"NEED_SB_WRITE:		%s\n"
+				"NEED_CP:		%s\n"
+				"CP_DISABLED:		%s\n"
+				"CP_DISABLED_QUICK:	%s\n"
+				"QUOTA_NEED_FLUSH:	%s\n"
+				"QUOTA_SKIP_FLUSH:	%s\n"
+				"QUOTA_NEED_REPAIR:	%s\n",
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_DIRTY)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_CLOSE)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_RECOVERED)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_NEED_FSCK)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_POR_DOING)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_NEED_CP)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_CP_DISABLED)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)),
+			SB_STATUS(is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)));
+}
+
 static ssize_t features_show(struct f2fs_attr *a,
 		struct f2fs_sb_info *sbi, char *buf)
 {
@@ -711,7 +745,9 @@ static struct attribute *f2fs_feat_attrs[] = {
 };
 ATTRIBUTE_GROUPS(f2fs_feat);
 
+F2FS_GENERAL_RO_ATTR(sb_status);
 static struct attribute *f2fs_stat_attrs[] = {
+	ATTR_LIST(sb_status),
 	NULL,
 };
 ATTRIBUTE_GROUPS(f2fs_stat);
-- 
2.29.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH RESEND v2 5/5] f2fs: introduce sb_status sysfs node
  2020-12-09  8:43 ` [PATCH RESEND v2 5/5] f2fs: introduce sb_status sysfs node Chao Yu
@ 2020-12-09 16:04   ` Jaegeuk Kim
  2020-12-10  1:40     ` Chao Yu
  0 siblings, 1 reply; 11+ messages in thread
From: Jaegeuk Kim @ 2020-12-09 16:04 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, chao

On 12/09, Chao Yu wrote:
> Introduce /sys/fs/f2fs/<devname>/stat/sb_status to show superblock
> status in real time as below:
> 
> IS_DIRTY:		no
> IS_CLOSE:		no
> IS_SHUTDOWN:		no
> IS_RECOVERED:		no
> IS_RESIZEFS:		no
> NEED_FSCK:		no
> POR_DOING:		no
> NEED_SB_WRITE:		no
> NEED_CP:		no
> CP_DISABLED:		no
> CP_DISABLED_QUICK:	no
> QUOTA_NEED_FLUSH:	no
> QUOTA_SKIP_FLUSH:	no
> QUOTA_NEED_REPAIR:	no

Wait, this is breaking a sysfs rule where one entry should show one value.

> 
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  Documentation/ABI/testing/sysfs-fs-f2fs |  5 ++++
>  fs/f2fs/sysfs.c                         | 36 +++++++++++++++++++++++++
>  2 files changed, 41 insertions(+)
> 
> diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
> index 3dfee94e0618..57ab839dc3a2 100644
> --- a/Documentation/ABI/testing/sysfs-fs-f2fs
> +++ b/Documentation/ABI/testing/sysfs-fs-f2fs
> @@ -377,3 +377,8 @@ Description:	This gives a control to limit the bio size in f2fs.
>  		Default is zero, which will follow underlying block layer limit,
>  		whereas, if it has a certain bytes value, f2fs won't submit a
>  		bio larger than that size.
> +
> +What:		/sys/fs/f2fs/<disk>/stat/sb_status
> +Date:		December 2020
> +Contact:	"Chao Yu" <yuchao0@huawei.com>
> +Description:	Show status of f2fs superblock in real time.
> diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
> index ebca0b4961e8..1b85e6d16a94 100644
> --- a/fs/f2fs/sysfs.c
> +++ b/fs/f2fs/sysfs.c
> @@ -101,6 +101,40 @@ static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
>  				sbi->sectors_written_start) >> 1)));
>  }
>  
> +#define	SB_STATUS(s)	(s ? "yes" : "no")
> +static ssize_t sb_status_show(struct f2fs_attr *a,
> +		struct f2fs_sb_info *sbi, char *buf)
> +{
> +	return sprintf(buf, "IS_DIRTY:		%s\n"
> +				"IS_CLOSE:		%s\n"
> +				"IS_SHUTDOWN:		%s\n"
> +				"IS_RECOVERED:		%s\n"
> +				"IS_RESIZEFS:		%s\n"
> +				"NEED_FSCK:		%s\n"
> +				"POR_DOING:		%s\n"
> +				"NEED_SB_WRITE:		%s\n"
> +				"NEED_CP:		%s\n"
> +				"CP_DISABLED:		%s\n"
> +				"CP_DISABLED_QUICK:	%s\n"
> +				"QUOTA_NEED_FLUSH:	%s\n"
> +				"QUOTA_SKIP_FLUSH:	%s\n"
> +				"QUOTA_NEED_REPAIR:	%s\n",
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_DIRTY)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_CLOSE)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_RECOVERED)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_NEED_FSCK)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_POR_DOING)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_NEED_CP)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_CP_DISABLED)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)),
> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)));
> +}
> +
>  static ssize_t features_show(struct f2fs_attr *a,
>  		struct f2fs_sb_info *sbi, char *buf)
>  {
> @@ -711,7 +745,9 @@ static struct attribute *f2fs_feat_attrs[] = {
>  };
>  ATTRIBUTE_GROUPS(f2fs_feat);
>  
> +F2FS_GENERAL_RO_ATTR(sb_status);
>  static struct attribute *f2fs_stat_attrs[] = {
> +	ATTR_LIST(sb_status),
>  	NULL,
>  };
>  ATTRIBUTE_GROUPS(f2fs_stat);
> -- 
> 2.29.2

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH RESEND v2 5/5] f2fs: introduce sb_status sysfs node
  2020-12-09 16:04   ` Jaegeuk Kim
@ 2020-12-10  1:40     ` Chao Yu
  0 siblings, 0 replies; 11+ messages in thread
From: Chao Yu @ 2020-12-10  1:40 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, chao

On 2020/12/10 0:04, Jaegeuk Kim wrote:
> On 12/09, Chao Yu wrote:
>> Introduce /sys/fs/f2fs/<devname>/stat/sb_status to show superblock
>> status in real time as below:
>>
>> IS_DIRTY:		no
>> IS_CLOSE:		no
>> IS_SHUTDOWN:		no
>> IS_RECOVERED:		no
>> IS_RESIZEFS:		no
>> NEED_FSCK:		no
>> POR_DOING:		no
>> NEED_SB_WRITE:		no
>> NEED_CP:		no
>> CP_DISABLED:		no
>> CP_DISABLED_QUICK:	no
>> QUOTA_NEED_FLUSH:	no
>> QUOTA_SKIP_FLUSH:	no
>> QUOTA_NEED_REPAIR:	no
> 
> Wait, this is breaking a sysfs rule where one entry should show one value.

Hmm.. let me change to show the sb status value directly, but then we needs extra
tool or a mapping table to parse the real status every bit means.

Thanks,

> 
>>
>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>> ---
>>   Documentation/ABI/testing/sysfs-fs-f2fs |  5 ++++
>>   fs/f2fs/sysfs.c                         | 36 +++++++++++++++++++++++++
>>   2 files changed, 41 insertions(+)
>>
>> diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
>> index 3dfee94e0618..57ab839dc3a2 100644
>> --- a/Documentation/ABI/testing/sysfs-fs-f2fs
>> +++ b/Documentation/ABI/testing/sysfs-fs-f2fs
>> @@ -377,3 +377,8 @@ Description:	This gives a control to limit the bio size in f2fs.
>>   		Default is zero, which will follow underlying block layer limit,
>>   		whereas, if it has a certain bytes value, f2fs won't submit a
>>   		bio larger than that size.
>> +
>> +What:		/sys/fs/f2fs/<disk>/stat/sb_status
>> +Date:		December 2020
>> +Contact:	"Chao Yu" <yuchao0@huawei.com>
>> +Description:	Show status of f2fs superblock in real time.
>> diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
>> index ebca0b4961e8..1b85e6d16a94 100644
>> --- a/fs/f2fs/sysfs.c
>> +++ b/fs/f2fs/sysfs.c
>> @@ -101,6 +101,40 @@ static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
>>   				sbi->sectors_written_start) >> 1)));
>>   }
>>   
>> +#define	SB_STATUS(s)	(s ? "yes" : "no")
>> +static ssize_t sb_status_show(struct f2fs_attr *a,
>> +		struct f2fs_sb_info *sbi, char *buf)
>> +{
>> +	return sprintf(buf, "IS_DIRTY:		%s\n"
>> +				"IS_CLOSE:		%s\n"
>> +				"IS_SHUTDOWN:		%s\n"
>> +				"IS_RECOVERED:		%s\n"
>> +				"IS_RESIZEFS:		%s\n"
>> +				"NEED_FSCK:		%s\n"
>> +				"POR_DOING:		%s\n"
>> +				"NEED_SB_WRITE:		%s\n"
>> +				"NEED_CP:		%s\n"
>> +				"CP_DISABLED:		%s\n"
>> +				"CP_DISABLED_QUICK:	%s\n"
>> +				"QUOTA_NEED_FLUSH:	%s\n"
>> +				"QUOTA_SKIP_FLUSH:	%s\n"
>> +				"QUOTA_NEED_REPAIR:	%s\n",
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_DIRTY)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_CLOSE)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_RECOVERED)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_NEED_FSCK)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_POR_DOING)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_NEED_CP)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_CP_DISABLED)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)),
>> +			SB_STATUS(is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)));
>> +}
>> +
>>   static ssize_t features_show(struct f2fs_attr *a,
>>   		struct f2fs_sb_info *sbi, char *buf)
>>   {
>> @@ -711,7 +745,9 @@ static struct attribute *f2fs_feat_attrs[] = {
>>   };
>>   ATTRIBUTE_GROUPS(f2fs_feat);
>>   
>> +F2FS_GENERAL_RO_ATTR(sb_status);
>>   static struct attribute *f2fs_stat_attrs[] = {
>> +	ATTR_LIST(sb_status),
>>   	NULL,
>>   };
>>   ATTRIBUTE_GROUPS(f2fs_stat);
>> -- 
>> 2.29.2
> .
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks
  2020-12-09  8:43 [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
                   ` (3 preceding siblings ...)
  2020-12-09  8:43 ` [PATCH RESEND v2 5/5] f2fs: introduce sb_status sysfs node Chao Yu
@ 2020-12-10  1:56 ` Chao Yu
  2020-12-10  2:15   ` Jaegeuk Kim
  4 siblings, 1 reply; 11+ messages in thread
From: Chao Yu @ 2020-12-10  1:56 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao

Hi Daeho, Jaegeuk

I found one missing place in this patch which should adapt
"compress vs verity race bugfix"

Could you please check and apply below diff?

 From 61a9812944ac2f6f64fb458d5ef8b662c007bc50 Mon Sep 17 00:00:00 2001
From: Chao Yu <yuchao0@huawei.com>
Date: Thu, 10 Dec 2020 09:52:42 +0800
Subject: [PATCH] fix

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
  fs/f2fs/data.c | 7 ++-----
  1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 6787a7a03e86..894c5680db4a 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2271,11 +2271,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
  		f2fs_load_compressed_page(sbi, page, blkaddr);
  		if (PageUptodate(page)) {
  			if (!atomic_dec_return(&dic->pending_pages)) {
-				bool verity =
-					f2fs_need_verity(inode, start_idx);
-
-				f2fs_do_decompress_pages(dic, verity);
-				if (verity) {
+				f2fs_do_decompress_pages(dic, for_verity);
+				if (for_verity) {
  					f2fs_verify_pages(dic->rpages,
  							dic->cluster_size);
  					f2fs_free_dic(dic);
-- 
2.29.2

Thanks,

On 2020/12/9 16:43, Chao Yu wrote:
> Support to use address space of inner inode to cache compressed block,
> in order to improve cache hit ratio of random read.
> 
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>   Documentation/filesystems/f2fs.rst |   3 +
>   fs/f2fs/compress.c                 | 198 +++++++++++++++++++++++++++--
>   fs/f2fs/data.c                     |  29 ++++-
>   fs/f2fs/debug.c                    |  13 ++
>   fs/f2fs/f2fs.h                     |  34 ++++-
>   fs/f2fs/gc.c                       |   1 +
>   fs/f2fs/inode.c                    |  21 ++-
>   fs/f2fs/segment.c                  |   6 +-
>   fs/f2fs/super.c                    |  19 ++-
>   include/linux/f2fs_fs.h            |   1 +
>   10 files changed, 305 insertions(+), 20 deletions(-)
> 
> diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
> index dae15c96e659..5fa45fd8e4af 100644
> --- a/Documentation/filesystems/f2fs.rst
> +++ b/Documentation/filesystems/f2fs.rst
> @@ -268,6 +268,9 @@ compress_mode=%s	 Control file compression mode. This supports "fs" and "user"
>   			 choosing the target file and the timing. The user can do manual
>   			 compression/decompression on the compression enabled files using
>   			 ioctls.
> +compress_cache		 Support to use address space of a filesystem managed inode to
> +			 cache compressed block, in order to improve cache hit ratio of
> +			 random read.
>   inlinecrypt		 When possible, encrypt/decrypt the contents of encrypted
>   			 files using the blk-crypto framework rather than
>   			 filesystem-layer encryption. This allows the use of
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 4bcbacfe3325..446dd41a7bad 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -12,9 +12,11 @@
>   #include <linux/lzo.h>
>   #include <linux/lz4.h>
>   #include <linux/zstd.h>
> +#include <linux/pagevec.h>
>   
>   #include "f2fs.h"
>   #include "node.h"
> +#include "segment.h"
>   #include <trace/events/f2fs.h>
>   
>   static struct kmem_cache *cic_entry_slab;
> @@ -721,25 +723,14 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   	return ret;
>   }
>   
> -void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> +void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity)
>   {
> -	struct decompress_io_ctx *dic =
> -			(struct decompress_io_ctx *)page_private(page);
> -	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
>   	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
>   	const struct f2fs_compress_ops *cops =
>   			f2fs_cops[fi->i_compress_algorithm];
>   	int ret;
>   	int i;
>   
> -	dec_page_count(sbi, F2FS_RD_DATA);
> -
> -	if (bio->bi_status || PageError(page))
> -		dic->failed = true;
> -
> -	if (atomic_dec_return(&dic->pending_pages))
> -		return;
> -
>   	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
>   				dic->cluster_size, fi->i_compress_algorithm);
>   
> @@ -797,6 +788,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
>   	ret = cops->decompress_pages(dic);
>   
>   	if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
> +		struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
>   		u32 provided = le32_to_cpu(dic->cbuf->chksum);
>   		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
>   
> @@ -830,6 +822,30 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
>   		f2fs_free_dic(dic);
>   }
>   
> +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> +						nid_t ino, block_t blkaddr);
> +void f2fs_decompress_pages(struct bio *bio, struct page *page,
> +						bool verity, unsigned int ofs)
> +{
> +	struct decompress_io_ctx *dic =
> +			(struct decompress_io_ctx *)page_private(page);
> +	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
> +	block_t blkaddr;
> +
> +	dec_page_count(sbi, F2FS_RD_DATA);
> +
> +	if (bio->bi_status || PageError(page))
> +		dic->failed = true;
> +
> +	blkaddr = SECTOR_TO_BLOCK(bio->bi_iter.bi_sector) + ofs;
> +	f2fs_cache_compressed_page(sbi, page, dic->inode->i_ino, blkaddr);
> +
> +	if (atomic_dec_return(&dic->pending_pages))
> +		return;
> +
> +	f2fs_do_decompress_pages(dic, verity);
> +}
> +
>   static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
>   {
>   	if (cc->cluster_idx == NULL_CLUSTER)
> @@ -1600,6 +1616,164 @@ void f2fs_decompress_end_io(struct page **rpages,
>   	}
>   }
>   
> +const struct address_space_operations f2fs_compress_aops = {
> +	.releasepage = f2fs_release_page,
> +	.invalidatepage = f2fs_invalidate_page,
> +};
> +
> +struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
> +{
> +	return sbi->compress_inode->i_mapping;
> +}
> +
> +void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
> +{
> +	if (!sbi->compress_inode)
> +		return;
> +	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
> +}
> +
> +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> +						nid_t ino, block_t blkaddr)
> +{
> +	struct page *cpage;
> +	int ret;
> +	struct sysinfo si;
> +	unsigned long free_ram, avail_ram;
> +
> +	if (!test_opt(sbi, COMPRESS_CACHE))
> +		return;
> +
> +	si_meminfo(&si);
> +	free_ram = si.freeram;
> +	avail_ram = si.totalram - si.totalhigh;
> +
> +	/* free memory is lower than watermark, deny caching compress page */
> +	if (free_ram <= sbi->compress_watermark / 100 * avail_ram)
> +		return;
> +
> +	/* cached page count exceed threshold, deny caching compress page */
> +	if (COMPRESS_MAPPING(sbi)->nrpages >=
> +			free_ram / 100 * sbi->compress_percent)
> +		return;
> +
> +	cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
> +	if (cpage) {
> +		f2fs_put_page(cpage, 0);
> +		return;
> +	}
> +
> +	cpage = alloc_page(__GFP_IO);
> +	if (!cpage)
> +		return;
> +
> +	ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
> +						blkaddr, GFP_NOFS);
> +	if (ret) {
> +		f2fs_put_page(cpage, 0);
> +		return;
> +	}
> +
> +	memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
> +	SetPageUptodate(cpage);
> +
> +	f2fs_set_page_private(cpage, ino);
> +
> +	f2fs_put_page(cpage, 1);
> +}
> +
> +void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> +								block_t blkaddr)
> +{
> +	struct page *cpage;
> +
> +	if (!test_opt(sbi, COMPRESS_CACHE))
> +		return;
> +
> +	cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
> +				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
> +	if (cpage) {
> +		if (PageUptodate(cpage)) {
> +			atomic_inc(&sbi->compress_page_hit);
> +			memcpy(page_address(page),
> +				page_address(cpage), PAGE_SIZE);
> +			SetPageUptodate(page);
> +		}
> +		f2fs_put_page(cpage, 1);
> +	}
> +}
> +
> +void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
> +{
> +	struct address_space *mapping = sbi->compress_inode->i_mapping;
> +	struct pagevec pvec;
> +	pgoff_t index = 0;
> +	pgoff_t end = MAX_BLKADDR(sbi);
> +
> +	pagevec_init(&pvec);
> +
> +	do {
> +		unsigned int nr_pages;
> +		int i;
> +
> +		nr_pages = pagevec_lookup_range(&pvec, mapping,
> +						&index, end - 1);
> +		if (!nr_pages)
> +			break;
> +
> +		for (i = 0; i < nr_pages; i++) {
> +			struct page *page = pvec.pages[i];
> +
> +			if (page->index > end)
> +				break;
> +
> +			lock_page(page);
> +			if (page->mapping != mapping) {
> +				unlock_page(page);
> +				continue;
> +			}
> +
> +			if (ino != page_private(page)) {
> +				unlock_page(page);
> +				continue;
> +			}
> +
> +			generic_error_remove_page(mapping, page);
> +			unlock_page(page);
> +		}
> +		pagevec_release(&pvec);
> +		cond_resched();
> +	} while (index < end);
> +}
> +
> +int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
> +{
> +	struct inode *inode;
> +
> +	if (!test_opt(sbi, COMPRESS_CACHE))
> +		return 0;
> +
> +	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
> +	if (IS_ERR(inode))
> +		return PTR_ERR(inode);
> +	sbi->compress_inode = inode;
> +
> +	sbi->compress_percent = COMPRESS_PERCENT;
> +	sbi->compress_watermark = COMPRESS_WATERMARK;
> +
> +	atomic_set(&sbi->compress_page_hit, 0);
> +
> +	return 0;
> +}
> +
> +void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
> +{
> +	if (!sbi->compress_inode)
> +		return;
> +	iput(sbi->compress_inode);
> +	sbi->compress_inode = NULL;
> +}
> +
>   int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
>   {
>   	dev_t dev = sbi->sb->s_bdev->bd_dev;
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index aa34d620bec9..6787a7a03e86 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -133,17 +133,21 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
>   	struct page *page;
>   	struct bio_vec *bv;
>   	struct bvec_iter_all iter_all;
> +	unsigned int ofs = 0;
>   
>   	bio_for_each_segment_all(bv, bio, iter_all) {
>   		page = bv->bv_page;
>   
>   #ifdef CONFIG_F2FS_FS_COMPRESSION
>   		if (compr && f2fs_is_compressed_page(page)) {
> -			f2fs_decompress_pages(bio, page, verity);
> +			f2fs_decompress_pages(bio, page, verity, ofs);
> +			ofs++;
>   			continue;
>   		}
> -		if (verity)
> +		if (verity) {
> +			ofs++;
>   			continue;
> +		}
>   #endif
>   
>   		/* PG_error was set if any post_read step failed */
> @@ -156,6 +160,7 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
>   		}
>   		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
>   		unlock_page(page);
> +		ofs++;
>   	}
>   }
>   
> @@ -1421,9 +1426,11 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
>   	old_blkaddr = dn->data_blkaddr;
>   	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
>   				&sum, seg_type, NULL);
> -	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
> +	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
>   		invalidate_mapping_pages(META_MAPPING(sbi),
>   					old_blkaddr, old_blkaddr);
> +		f2fs_invalidate_compress_page(sbi, old_blkaddr);
> +	}
>   	f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
>   
>   	/*
> @@ -2261,6 +2268,22 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>   		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>   						dn.ofs_in_node + i + 1);
>   
> +		f2fs_load_compressed_page(sbi, page, blkaddr);
> +		if (PageUptodate(page)) {
> +			if (!atomic_dec_return(&dic->pending_pages)) {
> +				bool verity =
> +					f2fs_need_verity(inode, start_idx);
> +
> +				f2fs_do_decompress_pages(dic, verity);
> +				if (verity) {
> +					f2fs_verify_pages(dic->rpages,
> +							dic->cluster_size);
> +					f2fs_free_dic(dic);
> +				}
> +			}
> +			continue;
> +		}
> +
>   		if (bio && (!page_is_mergeable(sbi, bio,
>   					*last_block_in_bio, blkaddr) ||
>   		    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
> diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
> index 197c914119da..f1f8714066c5 100644
> --- a/fs/f2fs/debug.c
> +++ b/fs/f2fs/debug.c
> @@ -145,6 +145,12 @@ static void update_general_status(struct f2fs_sb_info *sbi)
>   		si->node_pages = NODE_MAPPING(sbi)->nrpages;
>   	if (sbi->meta_inode)
>   		si->meta_pages = META_MAPPING(sbi)->nrpages;
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (sbi->compress_inode) {
> +		si->compress_pages = COMPRESS_MAPPING(sbi)->nrpages;
> +		si->compress_page_hit = atomic_read(&sbi->compress_page_hit);
> +	}
> +#endif
>   	si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
>   	si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
>   	si->sits = MAIN_SEGS(sbi);
> @@ -299,6 +305,12 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
>   		unsigned npages = META_MAPPING(sbi)->nrpages;
>   		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
>   	}
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (sbi->compress_inode) {
> +		unsigned npages = COMPRESS_MAPPING(sbi)->nrpages;
> +		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
> +	}
> +#endif
>   }
>   
>   static int stat_show(struct seq_file *s, void *v)
> @@ -461,6 +473,7 @@ static int stat_show(struct seq_file *s, void *v)
>   			"volatile IO: %4d (Max. %4d)\n",
>   			   si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
>   			   si->vw_cnt, si->max_vw_cnt);
> +		seq_printf(s, "  - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
>   		seq_printf(s, "  - nodes: %4d in %4d\n",
>   			   si->ndirty_node, si->node_pages);
>   		seq_printf(s, "  - dents: %4d in dirs:%4d (%4d)\n",
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 7364d453783f..0ff8b18eda05 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -97,6 +97,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
>   #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
>   #define F2FS_MOUNT_NORECOVERY		0x04000000
>   #define F2FS_MOUNT_ATGC			0x08000000
> +#define F2FS_MOUNT_COMPRESS_CACHE	0x10000000
>   
>   #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
>   #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
> @@ -1298,6 +1299,9 @@ enum compress_flag {
>   	COMPRESS_MAX_FLAG,
>   };
>   
> +#define	COMPRESS_WATERMARK			20
> +#define	COMPRESS_PERCENT			20
> +
>   #define COMPRESS_DATA_RESERVED_SIZE		4
>   struct compress_data {
>   	__le32 clen;			/* compressed data size */
> @@ -1571,6 +1575,11 @@ struct f2fs_sb_info {
>   #ifdef CONFIG_F2FS_FS_COMPRESSION
>   	struct kmem_cache *page_array_slab;	/* page array entry */
>   	unsigned int page_array_slab_size;	/* default page array slab size */
> +
> +	struct inode *compress_inode;		/* cache compressed blocks */
> +	unsigned int compress_percent;		/* cache page percentage */
> +	unsigned int compress_watermark;	/* cache page watermark */
> +	atomic_t compress_page_hit;		/* cache hit count */
>   #endif
>   };
>   
> @@ -3536,7 +3545,8 @@ struct f2fs_stat_info {
>   	unsigned int bimodal, avg_vblocks;
>   	int util_free, util_valid, util_invalid;
>   	int rsvd_segs, overp_segs;
> -	int dirty_count, node_pages, meta_pages;
> +	int dirty_count, node_pages, meta_pages, compress_pages;
> +	int compress_page_hit;
>   	int prefree_count, call_count, cp_count, bg_cp_count;
>   	int tot_segs, node_segs, data_segs, free_segs, free_secs;
>   	int bg_node_segs, bg_data_segs;
> @@ -3874,7 +3884,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
>   bool f2fs_is_compress_backend_ready(struct inode *inode);
>   int f2fs_init_compress_mempool(void);
>   void f2fs_destroy_compress_mempool(void);
> -void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
> +void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity);
> +void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity, unsigned int ofs);
>   bool f2fs_cluster_is_empty(struct compress_ctx *cc);
>   bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
>   void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
> @@ -3893,10 +3904,19 @@ void f2fs_decompress_end_io(struct page **rpages,
>   int f2fs_init_compress_ctx(struct compress_ctx *cc);
>   void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
>   void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
> +int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
> +void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
>   int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
>   void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
>   int __init f2fs_init_compress_cache(void);
>   void f2fs_destroy_compress_cache(void);
> +struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
> +void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
> +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> +						nid_t ino, block_t blkaddr);
> +void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> +								block_t blkaddr);
> +void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
>   #else
>   static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
>   static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
> @@ -3913,10 +3933,20 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
>   }
>   static inline int f2fs_init_compress_mempool(void) { return 0; }
>   static inline void f2fs_destroy_compress_mempool(void) { }
> +static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
> +static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
>   static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
>   static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
>   static inline int __init f2fs_init_compress_cache(void) { return 0; }
>   static inline void f2fs_destroy_compress_cache(void) { }
> +static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
> +				block_t blkaddr) { }
> +static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
> +				struct page *page, nid_t ino, block_t blkaddr) { }
> +static inline void f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
> +				struct page *page, block_t blkaddr) { }
> +static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
> +							nid_t ino) { }
>   #endif
>   
>   static inline void set_compress_context(struct inode *inode)
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index 3ef84e6ded41..43919a3ae6a6 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -1225,6 +1225,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
>   	f2fs_put_page(mpage, 1);
>   	invalidate_mapping_pages(META_MAPPING(fio.sbi),
>   				fio.old_blkaddr, fio.old_blkaddr);
> +	f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
>   
>   	set_page_dirty(fio.encrypted_page);
>   	if (clear_page_dirty_for_io(fio.encrypted_page))
> diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
> index 349d9cb933ee..f030b9b79202 100644
> --- a/fs/f2fs/inode.c
> +++ b/fs/f2fs/inode.c
> @@ -18,6 +18,10 @@
>   
>   #include <trace/events/f2fs.h>
>   
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +extern const struct address_space_operations f2fs_compress_aops;
> +#endif
> +
>   void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
>   {
>   	if (is_inode_flag_set(inode, FI_NEW_INODE))
> @@ -494,6 +498,11 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
>   	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
>   		goto make_now;
>   
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +	if (ino == F2FS_COMPRESS_INO(sbi))
> +		goto make_now;
> +#endif
> +
>   	ret = do_read_inode(inode);
>   	if (ret)
>   		goto bad_inode;
> @@ -504,6 +513,12 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
>   	} else if (ino == F2FS_META_INO(sbi)) {
>   		inode->i_mapping->a_ops = &f2fs_meta_aops;
>   		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
> +	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> +		inode->i_mapping->a_ops = &f2fs_compress_aops;
> +#endif
> +		mapping_set_gfp_mask(inode->i_mapping,
> +			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
>   	} else if (S_ISREG(inode->i_mode)) {
>   		inode->i_op = &f2fs_file_inode_operations;
>   		inode->i_fop = &f2fs_file_operations;
> @@ -722,8 +737,12 @@ void f2fs_evict_inode(struct inode *inode)
>   	trace_f2fs_evict_inode(inode);
>   	truncate_inode_pages_final(&inode->i_data);
>   
> +	if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
> +		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
> +
>   	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
> -			inode->i_ino == F2FS_META_INO(sbi))
> +			inode->i_ino == F2FS_META_INO(sbi) ||
> +			inode->i_ino == F2FS_COMPRESS_INO(sbi))
>   		goto out_clear;
>   
>   	f2fs_bug_on(sbi, get_dirty_pages(inode));
> diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
> index deca74cb17df..d8570b0359f5 100644
> --- a/fs/f2fs/segment.c
> +++ b/fs/f2fs/segment.c
> @@ -2305,6 +2305,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
>   		return;
>   
>   	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
> +	f2fs_invalidate_compress_page(sbi, addr);
>   
>   	/* add it into sit main buffer */
>   	down_write(&sit_i->sentry_lock);
> @@ -3432,9 +3433,11 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
>   reallocate:
>   	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
>   			&fio->new_blkaddr, sum, type, fio);
> -	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
> +	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
>   		invalidate_mapping_pages(META_MAPPING(fio->sbi),
>   					fio->old_blkaddr, fio->old_blkaddr);
> +		f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
> +	}
>   
>   	/* writeout dirty page into bdev */
>   	f2fs_submit_page_write(fio);
> @@ -3607,6 +3610,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
>   	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
>   		invalidate_mapping_pages(META_MAPPING(sbi),
>   					old_blkaddr, old_blkaddr);
> +		f2fs_invalidate_compress_page(sbi, old_blkaddr);
>   		if (!from_gc)
>   			update_segment_mtime(sbi, old_blkaddr, 0);
>   		update_sit_entry(sbi, old_blkaddr, -1);
> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> index 8a82721b69ef..50e749169841 100644
> --- a/fs/f2fs/super.c
> +++ b/fs/f2fs/super.c
> @@ -148,6 +148,7 @@ enum {
>   	Opt_compress_extension,
>   	Opt_compress_chksum,
>   	Opt_compress_mode,
> +	Opt_compress_cache,
>   	Opt_atgc,
>   	Opt_err,
>   };
> @@ -218,6 +219,7 @@ static match_table_t f2fs_tokens = {
>   	{Opt_compress_extension, "compress_extension=%s"},
>   	{Opt_compress_chksum, "compress_chksum"},
>   	{Opt_compress_mode, "compress_mode=%s"},
> +	{Opt_compress_cache, "compress_cache"},
>   	{Opt_atgc, "atgc"},
>   	{Opt_err, NULL},
>   };
> @@ -955,12 +957,16 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
>   			}
>   			kfree(name);
>   			break;
> +		case Opt_compress_cache:
> +			set_opt(sbi, COMPRESS_CACHE);
> +			break;
>   #else
>   		case Opt_compress_algorithm:
>   		case Opt_compress_log_size:
>   		case Opt_compress_extension:
>   		case Opt_compress_chksum:
>   		case Opt_compress_mode:
> +		case Opt_compress_cache:
>   			f2fs_info(sbi, "compression options not supported");
>   			break;
>   #endif
> @@ -1285,6 +1291,8 @@ static void f2fs_put_super(struct super_block *sb)
>   
>   	f2fs_bug_on(sbi, sbi->fsync_node_num);
>   
> +	f2fs_destroy_compress_inode(sbi);
> +
>   	iput(sbi->node_inode);
>   	sbi->node_inode = NULL;
>   
> @@ -1554,6 +1562,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
>   		seq_printf(seq, ",compress_mode=%s", "fs");
>   	else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
>   		seq_printf(seq, ",compress_mode=%s", "user");
> +
> +	if (test_opt(sbi, COMPRESS_CACHE))
> +		seq_puts(seq, ",compress_cache");
>   }
>   
>   static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
> @@ -3759,10 +3770,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
>   		goto free_node_inode;
>   	}
>   
> -	err = f2fs_register_sysfs(sbi);
> +	err = f2fs_init_compress_inode(sbi);
>   	if (err)
>   		goto free_root_inode;
>   
> +	err = f2fs_register_sysfs(sbi);
> +	if (err)
> +		goto free_compress_inode;
> +
>   #ifdef CONFIG_QUOTA
>   	/* Enable quota usage during mount */
>   	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
> @@ -3896,6 +3911,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
>   	/* evict some inodes being cached by GC */
>   	evict_inodes(sb);
>   	f2fs_unregister_sysfs(sbi);
> +free_compress_inode:
> +	f2fs_destroy_compress_inode(sbi);
>   free_root_inode:
>   	dput(sb->s_root);
>   	sb->s_root = NULL;
> diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
> index 7dc2a06cf19a..55be7afeee90 100644
> --- a/include/linux/f2fs_fs.h
> +++ b/include/linux/f2fs_fs.h
> @@ -34,6 +34,7 @@
>   #define F2FS_ROOT_INO(sbi)	((sbi)->root_ino_num)
>   #define F2FS_NODE_INO(sbi)	((sbi)->node_ino_num)
>   #define F2FS_META_INO(sbi)	((sbi)->meta_ino_num)
> +#define F2FS_COMPRESS_INO(sbi)	(NM_I(sbi)->max_nid)
>   
>   #define F2FS_MAX_QUOTAS		3
>   
> 

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks
  2020-12-10  1:56 ` [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
@ 2020-12-10  2:15   ` Jaegeuk Kim
  2021-01-06 20:22     ` [f2fs-dev] " Jaegeuk Kim
  0 siblings, 1 reply; 11+ messages in thread
From: Jaegeuk Kim @ 2020-12-10  2:15 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, chao

On 12/10, Chao Yu wrote:
> Hi Daeho, Jaegeuk
> 
> I found one missing place in this patch which should adapt
> "compress vs verity race bugfix"
> 
> Could you please check and apply below diff?

Applied.

> 
> From 61a9812944ac2f6f64fb458d5ef8b662c007bc50 Mon Sep 17 00:00:00 2001
> From: Chao Yu <yuchao0@huawei.com>
> Date: Thu, 10 Dec 2020 09:52:42 +0800
> Subject: [PATCH] fix
> 
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  fs/f2fs/data.c | 7 ++-----
>  1 file changed, 2 insertions(+), 5 deletions(-)
> 
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 6787a7a03e86..894c5680db4a 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2271,11 +2271,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  		f2fs_load_compressed_page(sbi, page, blkaddr);
>  		if (PageUptodate(page)) {
>  			if (!atomic_dec_return(&dic->pending_pages)) {
> -				bool verity =
> -					f2fs_need_verity(inode, start_idx);
> -
> -				f2fs_do_decompress_pages(dic, verity);
> -				if (verity) {
> +				f2fs_do_decompress_pages(dic, for_verity);
> +				if (for_verity) {
>  					f2fs_verify_pages(dic->rpages,
>  							dic->cluster_size);
>  					f2fs_free_dic(dic);
> -- 
> 2.29.2
> 
> Thanks,
> 
> On 2020/12/9 16:43, Chao Yu wrote:
> > Support to use address space of inner inode to cache compressed block,
> > in order to improve cache hit ratio of random read.
> > 
> > Signed-off-by: Chao Yu <yuchao0@huawei.com>
> > ---
> >   Documentation/filesystems/f2fs.rst |   3 +
> >   fs/f2fs/compress.c                 | 198 +++++++++++++++++++++++++++--
> >   fs/f2fs/data.c                     |  29 ++++-
> >   fs/f2fs/debug.c                    |  13 ++
> >   fs/f2fs/f2fs.h                     |  34 ++++-
> >   fs/f2fs/gc.c                       |   1 +
> >   fs/f2fs/inode.c                    |  21 ++-
> >   fs/f2fs/segment.c                  |   6 +-
> >   fs/f2fs/super.c                    |  19 ++-
> >   include/linux/f2fs_fs.h            |   1 +
> >   10 files changed, 305 insertions(+), 20 deletions(-)
> > 
> > diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
> > index dae15c96e659..5fa45fd8e4af 100644
> > --- a/Documentation/filesystems/f2fs.rst
> > +++ b/Documentation/filesystems/f2fs.rst
> > @@ -268,6 +268,9 @@ compress_mode=%s	 Control file compression mode. This supports "fs" and "user"
> >   			 choosing the target file and the timing. The user can do manual
> >   			 compression/decompression on the compression enabled files using
> >   			 ioctls.
> > +compress_cache		 Support to use address space of a filesystem managed inode to
> > +			 cache compressed block, in order to improve cache hit ratio of
> > +			 random read.
> >   inlinecrypt		 When possible, encrypt/decrypt the contents of encrypted
> >   			 files using the blk-crypto framework rather than
> >   			 filesystem-layer encryption. This allows the use of
> > diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> > index 4bcbacfe3325..446dd41a7bad 100644
> > --- a/fs/f2fs/compress.c
> > +++ b/fs/f2fs/compress.c
> > @@ -12,9 +12,11 @@
> >   #include <linux/lzo.h>
> >   #include <linux/lz4.h>
> >   #include <linux/zstd.h>
> > +#include <linux/pagevec.h>
> >   #include "f2fs.h"
> >   #include "node.h"
> > +#include "segment.h"
> >   #include <trace/events/f2fs.h>
> >   static struct kmem_cache *cic_entry_slab;
> > @@ -721,25 +723,14 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> >   	return ret;
> >   }
> > -void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> > +void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity)
> >   {
> > -	struct decompress_io_ctx *dic =
> > -			(struct decompress_io_ctx *)page_private(page);
> > -	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
> >   	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
> >   	const struct f2fs_compress_ops *cops =
> >   			f2fs_cops[fi->i_compress_algorithm];
> >   	int ret;
> >   	int i;
> > -	dec_page_count(sbi, F2FS_RD_DATA);
> > -
> > -	if (bio->bi_status || PageError(page))
> > -		dic->failed = true;
> > -
> > -	if (atomic_dec_return(&dic->pending_pages))
> > -		return;
> > -
> >   	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
> >   				dic->cluster_size, fi->i_compress_algorithm);
> > @@ -797,6 +788,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> >   	ret = cops->decompress_pages(dic);
> >   	if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
> > +		struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
> >   		u32 provided = le32_to_cpu(dic->cbuf->chksum);
> >   		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
> > @@ -830,6 +822,30 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> >   		f2fs_free_dic(dic);
> >   }
> > +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > +						nid_t ino, block_t blkaddr);
> > +void f2fs_decompress_pages(struct bio *bio, struct page *page,
> > +						bool verity, unsigned int ofs)
> > +{
> > +	struct decompress_io_ctx *dic =
> > +			(struct decompress_io_ctx *)page_private(page);
> > +	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
> > +	block_t blkaddr;
> > +
> > +	dec_page_count(sbi, F2FS_RD_DATA);
> > +
> > +	if (bio->bi_status || PageError(page))
> > +		dic->failed = true;
> > +
> > +	blkaddr = SECTOR_TO_BLOCK(bio->bi_iter.bi_sector) + ofs;
> > +	f2fs_cache_compressed_page(sbi, page, dic->inode->i_ino, blkaddr);
> > +
> > +	if (atomic_dec_return(&dic->pending_pages))
> > +		return;
> > +
> > +	f2fs_do_decompress_pages(dic, verity);
> > +}
> > +
> >   static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
> >   {
> >   	if (cc->cluster_idx == NULL_CLUSTER)
> > @@ -1600,6 +1616,164 @@ void f2fs_decompress_end_io(struct page **rpages,
> >   	}
> >   }
> > +const struct address_space_operations f2fs_compress_aops = {
> > +	.releasepage = f2fs_release_page,
> > +	.invalidatepage = f2fs_invalidate_page,
> > +};
> > +
> > +struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
> > +{
> > +	return sbi->compress_inode->i_mapping;
> > +}
> > +
> > +void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
> > +{
> > +	if (!sbi->compress_inode)
> > +		return;
> > +	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
> > +}
> > +
> > +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > +						nid_t ino, block_t blkaddr)
> > +{
> > +	struct page *cpage;
> > +	int ret;
> > +	struct sysinfo si;
> > +	unsigned long free_ram, avail_ram;
> > +
> > +	if (!test_opt(sbi, COMPRESS_CACHE))
> > +		return;
> > +
> > +	si_meminfo(&si);
> > +	free_ram = si.freeram;
> > +	avail_ram = si.totalram - si.totalhigh;
> > +
> > +	/* free memory is lower than watermark, deny caching compress page */
> > +	if (free_ram <= sbi->compress_watermark / 100 * avail_ram)
> > +		return;
> > +
> > +	/* cached page count exceed threshold, deny caching compress page */
> > +	if (COMPRESS_MAPPING(sbi)->nrpages >=
> > +			free_ram / 100 * sbi->compress_percent)
> > +		return;
> > +
> > +	cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
> > +	if (cpage) {
> > +		f2fs_put_page(cpage, 0);
> > +		return;
> > +	}
> > +
> > +	cpage = alloc_page(__GFP_IO);
> > +	if (!cpage)
> > +		return;
> > +
> > +	ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
> > +						blkaddr, GFP_NOFS);
> > +	if (ret) {
> > +		f2fs_put_page(cpage, 0);
> > +		return;
> > +	}
> > +
> > +	memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
> > +	SetPageUptodate(cpage);
> > +
> > +	f2fs_set_page_private(cpage, ino);
> > +
> > +	f2fs_put_page(cpage, 1);
> > +}
> > +
> > +void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > +								block_t blkaddr)
> > +{
> > +	struct page *cpage;
> > +
> > +	if (!test_opt(sbi, COMPRESS_CACHE))
> > +		return;
> > +
> > +	cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
> > +				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
> > +	if (cpage) {
> > +		if (PageUptodate(cpage)) {
> > +			atomic_inc(&sbi->compress_page_hit);
> > +			memcpy(page_address(page),
> > +				page_address(cpage), PAGE_SIZE);
> > +			SetPageUptodate(page);
> > +		}
> > +		f2fs_put_page(cpage, 1);
> > +	}
> > +}
> > +
> > +void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
> > +{
> > +	struct address_space *mapping = sbi->compress_inode->i_mapping;
> > +	struct pagevec pvec;
> > +	pgoff_t index = 0;
> > +	pgoff_t end = MAX_BLKADDR(sbi);
> > +
> > +	pagevec_init(&pvec);
> > +
> > +	do {
> > +		unsigned int nr_pages;
> > +		int i;
> > +
> > +		nr_pages = pagevec_lookup_range(&pvec, mapping,
> > +						&index, end - 1);
> > +		if (!nr_pages)
> > +			break;
> > +
> > +		for (i = 0; i < nr_pages; i++) {
> > +			struct page *page = pvec.pages[i];
> > +
> > +			if (page->index > end)
> > +				break;
> > +
> > +			lock_page(page);
> > +			if (page->mapping != mapping) {
> > +				unlock_page(page);
> > +				continue;
> > +			}
> > +
> > +			if (ino != page_private(page)) {
> > +				unlock_page(page);
> > +				continue;
> > +			}
> > +
> > +			generic_error_remove_page(mapping, page);
> > +			unlock_page(page);
> > +		}
> > +		pagevec_release(&pvec);
> > +		cond_resched();
> > +	} while (index < end);
> > +}
> > +
> > +int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
> > +{
> > +	struct inode *inode;
> > +
> > +	if (!test_opt(sbi, COMPRESS_CACHE))
> > +		return 0;
> > +
> > +	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
> > +	if (IS_ERR(inode))
> > +		return PTR_ERR(inode);
> > +	sbi->compress_inode = inode;
> > +
> > +	sbi->compress_percent = COMPRESS_PERCENT;
> > +	sbi->compress_watermark = COMPRESS_WATERMARK;
> > +
> > +	atomic_set(&sbi->compress_page_hit, 0);
> > +
> > +	return 0;
> > +}
> > +
> > +void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
> > +{
> > +	if (!sbi->compress_inode)
> > +		return;
> > +	iput(sbi->compress_inode);
> > +	sbi->compress_inode = NULL;
> > +}
> > +
> >   int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
> >   {
> >   	dev_t dev = sbi->sb->s_bdev->bd_dev;
> > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> > index aa34d620bec9..6787a7a03e86 100644
> > --- a/fs/f2fs/data.c
> > +++ b/fs/f2fs/data.c
> > @@ -133,17 +133,21 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
> >   	struct page *page;
> >   	struct bio_vec *bv;
> >   	struct bvec_iter_all iter_all;
> > +	unsigned int ofs = 0;
> >   	bio_for_each_segment_all(bv, bio, iter_all) {
> >   		page = bv->bv_page;
> >   #ifdef CONFIG_F2FS_FS_COMPRESSION
> >   		if (compr && f2fs_is_compressed_page(page)) {
> > -			f2fs_decompress_pages(bio, page, verity);
> > +			f2fs_decompress_pages(bio, page, verity, ofs);
> > +			ofs++;
> >   			continue;
> >   		}
> > -		if (verity)
> > +		if (verity) {
> > +			ofs++;
> >   			continue;
> > +		}
> >   #endif
> >   		/* PG_error was set if any post_read step failed */
> > @@ -156,6 +160,7 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
> >   		}
> >   		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
> >   		unlock_page(page);
> > +		ofs++;
> >   	}
> >   }
> > @@ -1421,9 +1426,11 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
> >   	old_blkaddr = dn->data_blkaddr;
> >   	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
> >   				&sum, seg_type, NULL);
> > -	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
> > +	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
> >   		invalidate_mapping_pages(META_MAPPING(sbi),
> >   					old_blkaddr, old_blkaddr);
> > +		f2fs_invalidate_compress_page(sbi, old_blkaddr);
> > +	}
> >   	f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
> >   	/*
> > @@ -2261,6 +2268,22 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> >   		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> >   						dn.ofs_in_node + i + 1);
> > +		f2fs_load_compressed_page(sbi, page, blkaddr);
> > +		if (PageUptodate(page)) {
> > +			if (!atomic_dec_return(&dic->pending_pages)) {
> > +				bool verity =
> > +					f2fs_need_verity(inode, start_idx);
> > +
> > +				f2fs_do_decompress_pages(dic, verity);
> > +				if (verity) {
> > +					f2fs_verify_pages(dic->rpages,
> > +							dic->cluster_size);
> > +					f2fs_free_dic(dic);
> > +				}
> > +			}
> > +			continue;
> > +		}
> > +
> >   		if (bio && (!page_is_mergeable(sbi, bio,
> >   					*last_block_in_bio, blkaddr) ||
> >   		    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
> > diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
> > index 197c914119da..f1f8714066c5 100644
> > --- a/fs/f2fs/debug.c
> > +++ b/fs/f2fs/debug.c
> > @@ -145,6 +145,12 @@ static void update_general_status(struct f2fs_sb_info *sbi)
> >   		si->node_pages = NODE_MAPPING(sbi)->nrpages;
> >   	if (sbi->meta_inode)
> >   		si->meta_pages = META_MAPPING(sbi)->nrpages;
> > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > +	if (sbi->compress_inode) {
> > +		si->compress_pages = COMPRESS_MAPPING(sbi)->nrpages;
> > +		si->compress_page_hit = atomic_read(&sbi->compress_page_hit);
> > +	}
> > +#endif
> >   	si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
> >   	si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
> >   	si->sits = MAIN_SEGS(sbi);
> > @@ -299,6 +305,12 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
> >   		unsigned npages = META_MAPPING(sbi)->nrpages;
> >   		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
> >   	}
> > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > +	if (sbi->compress_inode) {
> > +		unsigned npages = COMPRESS_MAPPING(sbi)->nrpages;
> > +		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
> > +	}
> > +#endif
> >   }
> >   static int stat_show(struct seq_file *s, void *v)
> > @@ -461,6 +473,7 @@ static int stat_show(struct seq_file *s, void *v)
> >   			"volatile IO: %4d (Max. %4d)\n",
> >   			   si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
> >   			   si->vw_cnt, si->max_vw_cnt);
> > +		seq_printf(s, "  - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
> >   		seq_printf(s, "  - nodes: %4d in %4d\n",
> >   			   si->ndirty_node, si->node_pages);
> >   		seq_printf(s, "  - dents: %4d in dirs:%4d (%4d)\n",
> > diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> > index 7364d453783f..0ff8b18eda05 100644
> > --- a/fs/f2fs/f2fs.h
> > +++ b/fs/f2fs/f2fs.h
> > @@ -97,6 +97,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
> >   #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
> >   #define F2FS_MOUNT_NORECOVERY		0x04000000
> >   #define F2FS_MOUNT_ATGC			0x08000000
> > +#define F2FS_MOUNT_COMPRESS_CACHE	0x10000000
> >   #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
> >   #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
> > @@ -1298,6 +1299,9 @@ enum compress_flag {
> >   	COMPRESS_MAX_FLAG,
> >   };
> > +#define	COMPRESS_WATERMARK			20
> > +#define	COMPRESS_PERCENT			20
> > +
> >   #define COMPRESS_DATA_RESERVED_SIZE		4
> >   struct compress_data {
> >   	__le32 clen;			/* compressed data size */
> > @@ -1571,6 +1575,11 @@ struct f2fs_sb_info {
> >   #ifdef CONFIG_F2FS_FS_COMPRESSION
> >   	struct kmem_cache *page_array_slab;	/* page array entry */
> >   	unsigned int page_array_slab_size;	/* default page array slab size */
> > +
> > +	struct inode *compress_inode;		/* cache compressed blocks */
> > +	unsigned int compress_percent;		/* cache page percentage */
> > +	unsigned int compress_watermark;	/* cache page watermark */
> > +	atomic_t compress_page_hit;		/* cache hit count */
> >   #endif
> >   };
> > @@ -3536,7 +3545,8 @@ struct f2fs_stat_info {
> >   	unsigned int bimodal, avg_vblocks;
> >   	int util_free, util_valid, util_invalid;
> >   	int rsvd_segs, overp_segs;
> > -	int dirty_count, node_pages, meta_pages;
> > +	int dirty_count, node_pages, meta_pages, compress_pages;
> > +	int compress_page_hit;
> >   	int prefree_count, call_count, cp_count, bg_cp_count;
> >   	int tot_segs, node_segs, data_segs, free_segs, free_secs;
> >   	int bg_node_segs, bg_data_segs;
> > @@ -3874,7 +3884,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
> >   bool f2fs_is_compress_backend_ready(struct inode *inode);
> >   int f2fs_init_compress_mempool(void);
> >   void f2fs_destroy_compress_mempool(void);
> > -void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
> > +void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity);
> > +void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity, unsigned int ofs);
> >   bool f2fs_cluster_is_empty(struct compress_ctx *cc);
> >   bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
> >   void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
> > @@ -3893,10 +3904,19 @@ void f2fs_decompress_end_io(struct page **rpages,
> >   int f2fs_init_compress_ctx(struct compress_ctx *cc);
> >   void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
> >   void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
> > +int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
> > +void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
> >   int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
> >   void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
> >   int __init f2fs_init_compress_cache(void);
> >   void f2fs_destroy_compress_cache(void);
> > +struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
> > +void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
> > +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > +						nid_t ino, block_t blkaddr);
> > +void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > +								block_t blkaddr);
> > +void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
> >   #else
> >   static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
> >   static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
> > @@ -3913,10 +3933,20 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
> >   }
> >   static inline int f2fs_init_compress_mempool(void) { return 0; }
> >   static inline void f2fs_destroy_compress_mempool(void) { }
> > +static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
> > +static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
> >   static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
> >   static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
> >   static inline int __init f2fs_init_compress_cache(void) { return 0; }
> >   static inline void f2fs_destroy_compress_cache(void) { }
> > +static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
> > +				block_t blkaddr) { }
> > +static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
> > +				struct page *page, nid_t ino, block_t blkaddr) { }
> > +static inline void f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
> > +				struct page *page, block_t blkaddr) { }
> > +static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
> > +							nid_t ino) { }
> >   #endif
> >   static inline void set_compress_context(struct inode *inode)
> > diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> > index 3ef84e6ded41..43919a3ae6a6 100644
> > --- a/fs/f2fs/gc.c
> > +++ b/fs/f2fs/gc.c
> > @@ -1225,6 +1225,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
> >   	f2fs_put_page(mpage, 1);
> >   	invalidate_mapping_pages(META_MAPPING(fio.sbi),
> >   				fio.old_blkaddr, fio.old_blkaddr);
> > +	f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
> >   	set_page_dirty(fio.encrypted_page);
> >   	if (clear_page_dirty_for_io(fio.encrypted_page))
> > diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
> > index 349d9cb933ee..f030b9b79202 100644
> > --- a/fs/f2fs/inode.c
> > +++ b/fs/f2fs/inode.c
> > @@ -18,6 +18,10 @@
> >   #include <trace/events/f2fs.h>
> > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > +extern const struct address_space_operations f2fs_compress_aops;
> > +#endif
> > +
> >   void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
> >   {
> >   	if (is_inode_flag_set(inode, FI_NEW_INODE))
> > @@ -494,6 +498,11 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
> >   	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
> >   		goto make_now;
> > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > +	if (ino == F2FS_COMPRESS_INO(sbi))
> > +		goto make_now;
> > +#endif
> > +
> >   	ret = do_read_inode(inode);
> >   	if (ret)
> >   		goto bad_inode;
> > @@ -504,6 +513,12 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
> >   	} else if (ino == F2FS_META_INO(sbi)) {
> >   		inode->i_mapping->a_ops = &f2fs_meta_aops;
> >   		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
> > +	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
> > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > +		inode->i_mapping->a_ops = &f2fs_compress_aops;
> > +#endif
> > +		mapping_set_gfp_mask(inode->i_mapping,
> > +			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
> >   	} else if (S_ISREG(inode->i_mode)) {
> >   		inode->i_op = &f2fs_file_inode_operations;
> >   		inode->i_fop = &f2fs_file_operations;
> > @@ -722,8 +737,12 @@ void f2fs_evict_inode(struct inode *inode)
> >   	trace_f2fs_evict_inode(inode);
> >   	truncate_inode_pages_final(&inode->i_data);
> > +	if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
> > +		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
> > +
> >   	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
> > -			inode->i_ino == F2FS_META_INO(sbi))
> > +			inode->i_ino == F2FS_META_INO(sbi) ||
> > +			inode->i_ino == F2FS_COMPRESS_INO(sbi))
> >   		goto out_clear;
> >   	f2fs_bug_on(sbi, get_dirty_pages(inode));
> > diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
> > index deca74cb17df..d8570b0359f5 100644
> > --- a/fs/f2fs/segment.c
> > +++ b/fs/f2fs/segment.c
> > @@ -2305,6 +2305,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
> >   		return;
> >   	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
> > +	f2fs_invalidate_compress_page(sbi, addr);
> >   	/* add it into sit main buffer */
> >   	down_write(&sit_i->sentry_lock);
> > @@ -3432,9 +3433,11 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
> >   reallocate:
> >   	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
> >   			&fio->new_blkaddr, sum, type, fio);
> > -	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
> > +	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
> >   		invalidate_mapping_pages(META_MAPPING(fio->sbi),
> >   					fio->old_blkaddr, fio->old_blkaddr);
> > +		f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
> > +	}
> >   	/* writeout dirty page into bdev */
> >   	f2fs_submit_page_write(fio);
> > @@ -3607,6 +3610,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
> >   	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
> >   		invalidate_mapping_pages(META_MAPPING(sbi),
> >   					old_blkaddr, old_blkaddr);
> > +		f2fs_invalidate_compress_page(sbi, old_blkaddr);
> >   		if (!from_gc)
> >   			update_segment_mtime(sbi, old_blkaddr, 0);
> >   		update_sit_entry(sbi, old_blkaddr, -1);
> > diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> > index 8a82721b69ef..50e749169841 100644
> > --- a/fs/f2fs/super.c
> > +++ b/fs/f2fs/super.c
> > @@ -148,6 +148,7 @@ enum {
> >   	Opt_compress_extension,
> >   	Opt_compress_chksum,
> >   	Opt_compress_mode,
> > +	Opt_compress_cache,
> >   	Opt_atgc,
> >   	Opt_err,
> >   };
> > @@ -218,6 +219,7 @@ static match_table_t f2fs_tokens = {
> >   	{Opt_compress_extension, "compress_extension=%s"},
> >   	{Opt_compress_chksum, "compress_chksum"},
> >   	{Opt_compress_mode, "compress_mode=%s"},
> > +	{Opt_compress_cache, "compress_cache"},
> >   	{Opt_atgc, "atgc"},
> >   	{Opt_err, NULL},
> >   };
> > @@ -955,12 +957,16 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
> >   			}
> >   			kfree(name);
> >   			break;
> > +		case Opt_compress_cache:
> > +			set_opt(sbi, COMPRESS_CACHE);
> > +			break;
> >   #else
> >   		case Opt_compress_algorithm:
> >   		case Opt_compress_log_size:
> >   		case Opt_compress_extension:
> >   		case Opt_compress_chksum:
> >   		case Opt_compress_mode:
> > +		case Opt_compress_cache:
> >   			f2fs_info(sbi, "compression options not supported");
> >   			break;
> >   #endif
> > @@ -1285,6 +1291,8 @@ static void f2fs_put_super(struct super_block *sb)
> >   	f2fs_bug_on(sbi, sbi->fsync_node_num);
> > +	f2fs_destroy_compress_inode(sbi);
> > +
> >   	iput(sbi->node_inode);
> >   	sbi->node_inode = NULL;
> > @@ -1554,6 +1562,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
> >   		seq_printf(seq, ",compress_mode=%s", "fs");
> >   	else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
> >   		seq_printf(seq, ",compress_mode=%s", "user");
> > +
> > +	if (test_opt(sbi, COMPRESS_CACHE))
> > +		seq_puts(seq, ",compress_cache");
> >   }
> >   static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
> > @@ -3759,10 +3770,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
> >   		goto free_node_inode;
> >   	}
> > -	err = f2fs_register_sysfs(sbi);
> > +	err = f2fs_init_compress_inode(sbi);
> >   	if (err)
> >   		goto free_root_inode;
> > +	err = f2fs_register_sysfs(sbi);
> > +	if (err)
> > +		goto free_compress_inode;
> > +
> >   #ifdef CONFIG_QUOTA
> >   	/* Enable quota usage during mount */
> >   	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
> > @@ -3896,6 +3911,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
> >   	/* evict some inodes being cached by GC */
> >   	evict_inodes(sb);
> >   	f2fs_unregister_sysfs(sbi);
> > +free_compress_inode:
> > +	f2fs_destroy_compress_inode(sbi);
> >   free_root_inode:
> >   	dput(sb->s_root);
> >   	sb->s_root = NULL;
> > diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
> > index 7dc2a06cf19a..55be7afeee90 100644
> > --- a/include/linux/f2fs_fs.h
> > +++ b/include/linux/f2fs_fs.h
> > @@ -34,6 +34,7 @@
> >   #define F2FS_ROOT_INO(sbi)	((sbi)->root_ino_num)
> >   #define F2FS_NODE_INO(sbi)	((sbi)->node_ino_num)
> >   #define F2FS_META_INO(sbi)	((sbi)->meta_ino_num)
> > +#define F2FS_COMPRESS_INO(sbi)	(NM_I(sbi)->max_nid)
> >   #define F2FS_MAX_QUOTAS		3
> > 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [f2fs-dev] [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks
  2020-12-10  2:15   ` Jaegeuk Kim
@ 2021-01-06 20:22     ` Jaegeuk Kim
  2021-01-07 11:48       ` Chao Yu
  0 siblings, 1 reply; 11+ messages in thread
From: Jaegeuk Kim @ 2021-01-06 20:22 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-kernel, linux-f2fs-devel

On 12/09, Jaegeuk Kim wrote:
> On 12/10, Chao Yu wrote:
> > Hi Daeho, Jaegeuk
> > 
> > I found one missing place in this patch which should adapt
> > "compress vs verity race bugfix"
> > 
> > Could you please check and apply below diff?
> 
> Applied.

Hi Chao,

Could you please rebase this patch on top of Eric's cleanup?

Thanks,

> 
> > 
> > From 61a9812944ac2f6f64fb458d5ef8b662c007bc50 Mon Sep 17 00:00:00 2001
> > From: Chao Yu <yuchao0@huawei.com>
> > Date: Thu, 10 Dec 2020 09:52:42 +0800
> > Subject: [PATCH] fix
> > 
> > Signed-off-by: Chao Yu <yuchao0@huawei.com>
> > ---
> >  fs/f2fs/data.c | 7 ++-----
> >  1 file changed, 2 insertions(+), 5 deletions(-)
> > 
> > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> > index 6787a7a03e86..894c5680db4a 100644
> > --- a/fs/f2fs/data.c
> > +++ b/fs/f2fs/data.c
> > @@ -2271,11 +2271,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> >  		f2fs_load_compressed_page(sbi, page, blkaddr);
> >  		if (PageUptodate(page)) {
> >  			if (!atomic_dec_return(&dic->pending_pages)) {
> > -				bool verity =
> > -					f2fs_need_verity(inode, start_idx);
> > -
> > -				f2fs_do_decompress_pages(dic, verity);
> > -				if (verity) {
> > +				f2fs_do_decompress_pages(dic, for_verity);
> > +				if (for_verity) {
> >  					f2fs_verify_pages(dic->rpages,
> >  							dic->cluster_size);
> >  					f2fs_free_dic(dic);
> > -- 
> > 2.29.2
> > 
> > Thanks,
> > 
> > On 2020/12/9 16:43, Chao Yu wrote:
> > > Support to use address space of inner inode to cache compressed block,
> > > in order to improve cache hit ratio of random read.
> > > 
> > > Signed-off-by: Chao Yu <yuchao0@huawei.com>
> > > ---
> > >   Documentation/filesystems/f2fs.rst |   3 +
> > >   fs/f2fs/compress.c                 | 198 +++++++++++++++++++++++++++--
> > >   fs/f2fs/data.c                     |  29 ++++-
> > >   fs/f2fs/debug.c                    |  13 ++
> > >   fs/f2fs/f2fs.h                     |  34 ++++-
> > >   fs/f2fs/gc.c                       |   1 +
> > >   fs/f2fs/inode.c                    |  21 ++-
> > >   fs/f2fs/segment.c                  |   6 +-
> > >   fs/f2fs/super.c                    |  19 ++-
> > >   include/linux/f2fs_fs.h            |   1 +
> > >   10 files changed, 305 insertions(+), 20 deletions(-)
> > > 
> > > diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
> > > index dae15c96e659..5fa45fd8e4af 100644
> > > --- a/Documentation/filesystems/f2fs.rst
> > > +++ b/Documentation/filesystems/f2fs.rst
> > > @@ -268,6 +268,9 @@ compress_mode=%s	 Control file compression mode. This supports "fs" and "user"
> > >   			 choosing the target file and the timing. The user can do manual
> > >   			 compression/decompression on the compression enabled files using
> > >   			 ioctls.
> > > +compress_cache		 Support to use address space of a filesystem managed inode to
> > > +			 cache compressed block, in order to improve cache hit ratio of
> > > +			 random read.
> > >   inlinecrypt		 When possible, encrypt/decrypt the contents of encrypted
> > >   			 files using the blk-crypto framework rather than
> > >   			 filesystem-layer encryption. This allows the use of
> > > diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> > > index 4bcbacfe3325..446dd41a7bad 100644
> > > --- a/fs/f2fs/compress.c
> > > +++ b/fs/f2fs/compress.c
> > > @@ -12,9 +12,11 @@
> > >   #include <linux/lzo.h>
> > >   #include <linux/lz4.h>
> > >   #include <linux/zstd.h>
> > > +#include <linux/pagevec.h>
> > >   #include "f2fs.h"
> > >   #include "node.h"
> > > +#include "segment.h"
> > >   #include <trace/events/f2fs.h>
> > >   static struct kmem_cache *cic_entry_slab;
> > > @@ -721,25 +723,14 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> > >   	return ret;
> > >   }
> > > -void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> > > +void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity)
> > >   {
> > > -	struct decompress_io_ctx *dic =
> > > -			(struct decompress_io_ctx *)page_private(page);
> > > -	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
> > >   	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
> > >   	const struct f2fs_compress_ops *cops =
> > >   			f2fs_cops[fi->i_compress_algorithm];
> > >   	int ret;
> > >   	int i;
> > > -	dec_page_count(sbi, F2FS_RD_DATA);
> > > -
> > > -	if (bio->bi_status || PageError(page))
> > > -		dic->failed = true;
> > > -
> > > -	if (atomic_dec_return(&dic->pending_pages))
> > > -		return;
> > > -
> > >   	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
> > >   				dic->cluster_size, fi->i_compress_algorithm);
> > > @@ -797,6 +788,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> > >   	ret = cops->decompress_pages(dic);
> > >   	if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
> > > +		struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
> > >   		u32 provided = le32_to_cpu(dic->cbuf->chksum);
> > >   		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
> > > @@ -830,6 +822,30 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> > >   		f2fs_free_dic(dic);
> > >   }
> > > +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > > +						nid_t ino, block_t blkaddr);
> > > +void f2fs_decompress_pages(struct bio *bio, struct page *page,
> > > +						bool verity, unsigned int ofs)
> > > +{
> > > +	struct decompress_io_ctx *dic =
> > > +			(struct decompress_io_ctx *)page_private(page);
> > > +	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
> > > +	block_t blkaddr;
> > > +
> > > +	dec_page_count(sbi, F2FS_RD_DATA);
> > > +
> > > +	if (bio->bi_status || PageError(page))
> > > +		dic->failed = true;
> > > +
> > > +	blkaddr = SECTOR_TO_BLOCK(bio->bi_iter.bi_sector) + ofs;
> > > +	f2fs_cache_compressed_page(sbi, page, dic->inode->i_ino, blkaddr);
> > > +
> > > +	if (atomic_dec_return(&dic->pending_pages))
> > > +		return;
> > > +
> > > +	f2fs_do_decompress_pages(dic, verity);
> > > +}
> > > +
> > >   static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
> > >   {
> > >   	if (cc->cluster_idx == NULL_CLUSTER)
> > > @@ -1600,6 +1616,164 @@ void f2fs_decompress_end_io(struct page **rpages,
> > >   	}
> > >   }
> > > +const struct address_space_operations f2fs_compress_aops = {
> > > +	.releasepage = f2fs_release_page,
> > > +	.invalidatepage = f2fs_invalidate_page,
> > > +};
> > > +
> > > +struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
> > > +{
> > > +	return sbi->compress_inode->i_mapping;
> > > +}
> > > +
> > > +void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
> > > +{
> > > +	if (!sbi->compress_inode)
> > > +		return;
> > > +	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
> > > +}
> > > +
> > > +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > > +						nid_t ino, block_t blkaddr)
> > > +{
> > > +	struct page *cpage;
> > > +	int ret;
> > > +	struct sysinfo si;
> > > +	unsigned long free_ram, avail_ram;
> > > +
> > > +	if (!test_opt(sbi, COMPRESS_CACHE))
> > > +		return;
> > > +
> > > +	si_meminfo(&si);
> > > +	free_ram = si.freeram;
> > > +	avail_ram = si.totalram - si.totalhigh;
> > > +
> > > +	/* free memory is lower than watermark, deny caching compress page */
> > > +	if (free_ram <= sbi->compress_watermark / 100 * avail_ram)
> > > +		return;
> > > +
> > > +	/* cached page count exceed threshold, deny caching compress page */
> > > +	if (COMPRESS_MAPPING(sbi)->nrpages >=
> > > +			free_ram / 100 * sbi->compress_percent)
> > > +		return;
> > > +
> > > +	cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
> > > +	if (cpage) {
> > > +		f2fs_put_page(cpage, 0);
> > > +		return;
> > > +	}
> > > +
> > > +	cpage = alloc_page(__GFP_IO);
> > > +	if (!cpage)
> > > +		return;
> > > +
> > > +	ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
> > > +						blkaddr, GFP_NOFS);
> > > +	if (ret) {
> > > +		f2fs_put_page(cpage, 0);
> > > +		return;
> > > +	}
> > > +
> > > +	memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
> > > +	SetPageUptodate(cpage);
> > > +
> > > +	f2fs_set_page_private(cpage, ino);
> > > +
> > > +	f2fs_put_page(cpage, 1);
> > > +}
> > > +
> > > +void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > > +								block_t blkaddr)
> > > +{
> > > +	struct page *cpage;
> > > +
> > > +	if (!test_opt(sbi, COMPRESS_CACHE))
> > > +		return;
> > > +
> > > +	cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
> > > +				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
> > > +	if (cpage) {
> > > +		if (PageUptodate(cpage)) {
> > > +			atomic_inc(&sbi->compress_page_hit);
> > > +			memcpy(page_address(page),
> > > +				page_address(cpage), PAGE_SIZE);
> > > +			SetPageUptodate(page);
> > > +		}
> > > +		f2fs_put_page(cpage, 1);
> > > +	}
> > > +}
> > > +
> > > +void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
> > > +{
> > > +	struct address_space *mapping = sbi->compress_inode->i_mapping;
> > > +	struct pagevec pvec;
> > > +	pgoff_t index = 0;
> > > +	pgoff_t end = MAX_BLKADDR(sbi);
> > > +
> > > +	pagevec_init(&pvec);
> > > +
> > > +	do {
> > > +		unsigned int nr_pages;
> > > +		int i;
> > > +
> > > +		nr_pages = pagevec_lookup_range(&pvec, mapping,
> > > +						&index, end - 1);
> > > +		if (!nr_pages)
> > > +			break;
> > > +
> > > +		for (i = 0; i < nr_pages; i++) {
> > > +			struct page *page = pvec.pages[i];
> > > +
> > > +			if (page->index > end)
> > > +				break;
> > > +
> > > +			lock_page(page);
> > > +			if (page->mapping != mapping) {
> > > +				unlock_page(page);
> > > +				continue;
> > > +			}
> > > +
> > > +			if (ino != page_private(page)) {
> > > +				unlock_page(page);
> > > +				continue;
> > > +			}
> > > +
> > > +			generic_error_remove_page(mapping, page);
> > > +			unlock_page(page);
> > > +		}
> > > +		pagevec_release(&pvec);
> > > +		cond_resched();
> > > +	} while (index < end);
> > > +}
> > > +
> > > +int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
> > > +{
> > > +	struct inode *inode;
> > > +
> > > +	if (!test_opt(sbi, COMPRESS_CACHE))
> > > +		return 0;
> > > +
> > > +	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
> > > +	if (IS_ERR(inode))
> > > +		return PTR_ERR(inode);
> > > +	sbi->compress_inode = inode;
> > > +
> > > +	sbi->compress_percent = COMPRESS_PERCENT;
> > > +	sbi->compress_watermark = COMPRESS_WATERMARK;
> > > +
> > > +	atomic_set(&sbi->compress_page_hit, 0);
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
> > > +{
> > > +	if (!sbi->compress_inode)
> > > +		return;
> > > +	iput(sbi->compress_inode);
> > > +	sbi->compress_inode = NULL;
> > > +}
> > > +
> > >   int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
> > >   {
> > >   	dev_t dev = sbi->sb->s_bdev->bd_dev;
> > > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> > > index aa34d620bec9..6787a7a03e86 100644
> > > --- a/fs/f2fs/data.c
> > > +++ b/fs/f2fs/data.c
> > > @@ -133,17 +133,21 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
> > >   	struct page *page;
> > >   	struct bio_vec *bv;
> > >   	struct bvec_iter_all iter_all;
> > > +	unsigned int ofs = 0;
> > >   	bio_for_each_segment_all(bv, bio, iter_all) {
> > >   		page = bv->bv_page;
> > >   #ifdef CONFIG_F2FS_FS_COMPRESSION
> > >   		if (compr && f2fs_is_compressed_page(page)) {
> > > -			f2fs_decompress_pages(bio, page, verity);
> > > +			f2fs_decompress_pages(bio, page, verity, ofs);
> > > +			ofs++;
> > >   			continue;
> > >   		}
> > > -		if (verity)
> > > +		if (verity) {
> > > +			ofs++;
> > >   			continue;
> > > +		}
> > >   #endif
> > >   		/* PG_error was set if any post_read step failed */
> > > @@ -156,6 +160,7 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
> > >   		}
> > >   		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
> > >   		unlock_page(page);
> > > +		ofs++;
> > >   	}
> > >   }
> > > @@ -1421,9 +1426,11 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
> > >   	old_blkaddr = dn->data_blkaddr;
> > >   	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
> > >   				&sum, seg_type, NULL);
> > > -	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
> > > +	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
> > >   		invalidate_mapping_pages(META_MAPPING(sbi),
> > >   					old_blkaddr, old_blkaddr);
> > > +		f2fs_invalidate_compress_page(sbi, old_blkaddr);
> > > +	}
> > >   	f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
> > >   	/*
> > > @@ -2261,6 +2268,22 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> > >   		blkaddr = data_blkaddr(dn.inode, dn.node_page,
> > >   						dn.ofs_in_node + i + 1);
> > > +		f2fs_load_compressed_page(sbi, page, blkaddr);
> > > +		if (PageUptodate(page)) {
> > > +			if (!atomic_dec_return(&dic->pending_pages)) {
> > > +				bool verity =
> > > +					f2fs_need_verity(inode, start_idx);
> > > +
> > > +				f2fs_do_decompress_pages(dic, verity);
> > > +				if (verity) {
> > > +					f2fs_verify_pages(dic->rpages,
> > > +							dic->cluster_size);
> > > +					f2fs_free_dic(dic);
> > > +				}
> > > +			}
> > > +			continue;
> > > +		}
> > > +
> > >   		if (bio && (!page_is_mergeable(sbi, bio,
> > >   					*last_block_in_bio, blkaddr) ||
> > >   		    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
> > > diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
> > > index 197c914119da..f1f8714066c5 100644
> > > --- a/fs/f2fs/debug.c
> > > +++ b/fs/f2fs/debug.c
> > > @@ -145,6 +145,12 @@ static void update_general_status(struct f2fs_sb_info *sbi)
> > >   		si->node_pages = NODE_MAPPING(sbi)->nrpages;
> > >   	if (sbi->meta_inode)
> > >   		si->meta_pages = META_MAPPING(sbi)->nrpages;
> > > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > > +	if (sbi->compress_inode) {
> > > +		si->compress_pages = COMPRESS_MAPPING(sbi)->nrpages;
> > > +		si->compress_page_hit = atomic_read(&sbi->compress_page_hit);
> > > +	}
> > > +#endif
> > >   	si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
> > >   	si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
> > >   	si->sits = MAIN_SEGS(sbi);
> > > @@ -299,6 +305,12 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
> > >   		unsigned npages = META_MAPPING(sbi)->nrpages;
> > >   		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
> > >   	}
> > > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > > +	if (sbi->compress_inode) {
> > > +		unsigned npages = COMPRESS_MAPPING(sbi)->nrpages;
> > > +		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
> > > +	}
> > > +#endif
> > >   }
> > >   static int stat_show(struct seq_file *s, void *v)
> > > @@ -461,6 +473,7 @@ static int stat_show(struct seq_file *s, void *v)
> > >   			"volatile IO: %4d (Max. %4d)\n",
> > >   			   si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
> > >   			   si->vw_cnt, si->max_vw_cnt);
> > > +		seq_printf(s, "  - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
> > >   		seq_printf(s, "  - nodes: %4d in %4d\n",
> > >   			   si->ndirty_node, si->node_pages);
> > >   		seq_printf(s, "  - dents: %4d in dirs:%4d (%4d)\n",
> > > diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> > > index 7364d453783f..0ff8b18eda05 100644
> > > --- a/fs/f2fs/f2fs.h
> > > +++ b/fs/f2fs/f2fs.h
> > > @@ -97,6 +97,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
> > >   #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
> > >   #define F2FS_MOUNT_NORECOVERY		0x04000000
> > >   #define F2FS_MOUNT_ATGC			0x08000000
> > > +#define F2FS_MOUNT_COMPRESS_CACHE	0x10000000
> > >   #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
> > >   #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
> > > @@ -1298,6 +1299,9 @@ enum compress_flag {
> > >   	COMPRESS_MAX_FLAG,
> > >   };
> > > +#define	COMPRESS_WATERMARK			20
> > > +#define	COMPRESS_PERCENT			20
> > > +
> > >   #define COMPRESS_DATA_RESERVED_SIZE		4
> > >   struct compress_data {
> > >   	__le32 clen;			/* compressed data size */
> > > @@ -1571,6 +1575,11 @@ struct f2fs_sb_info {
> > >   #ifdef CONFIG_F2FS_FS_COMPRESSION
> > >   	struct kmem_cache *page_array_slab;	/* page array entry */
> > >   	unsigned int page_array_slab_size;	/* default page array slab size */
> > > +
> > > +	struct inode *compress_inode;		/* cache compressed blocks */
> > > +	unsigned int compress_percent;		/* cache page percentage */
> > > +	unsigned int compress_watermark;	/* cache page watermark */
> > > +	atomic_t compress_page_hit;		/* cache hit count */
> > >   #endif
> > >   };
> > > @@ -3536,7 +3545,8 @@ struct f2fs_stat_info {
> > >   	unsigned int bimodal, avg_vblocks;
> > >   	int util_free, util_valid, util_invalid;
> > >   	int rsvd_segs, overp_segs;
> > > -	int dirty_count, node_pages, meta_pages;
> > > +	int dirty_count, node_pages, meta_pages, compress_pages;
> > > +	int compress_page_hit;
> > >   	int prefree_count, call_count, cp_count, bg_cp_count;
> > >   	int tot_segs, node_segs, data_segs, free_segs, free_secs;
> > >   	int bg_node_segs, bg_data_segs;
> > > @@ -3874,7 +3884,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
> > >   bool f2fs_is_compress_backend_ready(struct inode *inode);
> > >   int f2fs_init_compress_mempool(void);
> > >   void f2fs_destroy_compress_mempool(void);
> > > -void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
> > > +void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity);
> > > +void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity, unsigned int ofs);
> > >   bool f2fs_cluster_is_empty(struct compress_ctx *cc);
> > >   bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
> > >   void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
> > > @@ -3893,10 +3904,19 @@ void f2fs_decompress_end_io(struct page **rpages,
> > >   int f2fs_init_compress_ctx(struct compress_ctx *cc);
> > >   void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
> > >   void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
> > > +int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
> > > +void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
> > >   int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
> > >   void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
> > >   int __init f2fs_init_compress_cache(void);
> > >   void f2fs_destroy_compress_cache(void);
> > > +struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
> > > +void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
> > > +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > > +						nid_t ino, block_t blkaddr);
> > > +void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
> > > +								block_t blkaddr);
> > > +void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
> > >   #else
> > >   static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
> > >   static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
> > > @@ -3913,10 +3933,20 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
> > >   }
> > >   static inline int f2fs_init_compress_mempool(void) { return 0; }
> > >   static inline void f2fs_destroy_compress_mempool(void) { }
> > > +static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
> > > +static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
> > >   static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
> > >   static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
> > >   static inline int __init f2fs_init_compress_cache(void) { return 0; }
> > >   static inline void f2fs_destroy_compress_cache(void) { }
> > > +static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
> > > +				block_t blkaddr) { }
> > > +static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
> > > +				struct page *page, nid_t ino, block_t blkaddr) { }
> > > +static inline void f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
> > > +				struct page *page, block_t blkaddr) { }
> > > +static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
> > > +							nid_t ino) { }
> > >   #endif
> > >   static inline void set_compress_context(struct inode *inode)
> > > diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> > > index 3ef84e6ded41..43919a3ae6a6 100644
> > > --- a/fs/f2fs/gc.c
> > > +++ b/fs/f2fs/gc.c
> > > @@ -1225,6 +1225,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
> > >   	f2fs_put_page(mpage, 1);
> > >   	invalidate_mapping_pages(META_MAPPING(fio.sbi),
> > >   				fio.old_blkaddr, fio.old_blkaddr);
> > > +	f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
> > >   	set_page_dirty(fio.encrypted_page);
> > >   	if (clear_page_dirty_for_io(fio.encrypted_page))
> > > diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
> > > index 349d9cb933ee..f030b9b79202 100644
> > > --- a/fs/f2fs/inode.c
> > > +++ b/fs/f2fs/inode.c
> > > @@ -18,6 +18,10 @@
> > >   #include <trace/events/f2fs.h>
> > > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > > +extern const struct address_space_operations f2fs_compress_aops;
> > > +#endif
> > > +
> > >   void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
> > >   {
> > >   	if (is_inode_flag_set(inode, FI_NEW_INODE))
> > > @@ -494,6 +498,11 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
> > >   	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
> > >   		goto make_now;
> > > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > > +	if (ino == F2FS_COMPRESS_INO(sbi))
> > > +		goto make_now;
> > > +#endif
> > > +
> > >   	ret = do_read_inode(inode);
> > >   	if (ret)
> > >   		goto bad_inode;
> > > @@ -504,6 +513,12 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
> > >   	} else if (ino == F2FS_META_INO(sbi)) {
> > >   		inode->i_mapping->a_ops = &f2fs_meta_aops;
> > >   		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
> > > +	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
> > > +#ifdef CONFIG_F2FS_FS_COMPRESSION
> > > +		inode->i_mapping->a_ops = &f2fs_compress_aops;
> > > +#endif
> > > +		mapping_set_gfp_mask(inode->i_mapping,
> > > +			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
> > >   	} else if (S_ISREG(inode->i_mode)) {
> > >   		inode->i_op = &f2fs_file_inode_operations;
> > >   		inode->i_fop = &f2fs_file_operations;
> > > @@ -722,8 +737,12 @@ void f2fs_evict_inode(struct inode *inode)
> > >   	trace_f2fs_evict_inode(inode);
> > >   	truncate_inode_pages_final(&inode->i_data);
> > > +	if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
> > > +		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
> > > +
> > >   	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
> > > -			inode->i_ino == F2FS_META_INO(sbi))
> > > +			inode->i_ino == F2FS_META_INO(sbi) ||
> > > +			inode->i_ino == F2FS_COMPRESS_INO(sbi))
> > >   		goto out_clear;
> > >   	f2fs_bug_on(sbi, get_dirty_pages(inode));
> > > diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
> > > index deca74cb17df..d8570b0359f5 100644
> > > --- a/fs/f2fs/segment.c
> > > +++ b/fs/f2fs/segment.c
> > > @@ -2305,6 +2305,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
> > >   		return;
> > >   	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
> > > +	f2fs_invalidate_compress_page(sbi, addr);
> > >   	/* add it into sit main buffer */
> > >   	down_write(&sit_i->sentry_lock);
> > > @@ -3432,9 +3433,11 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
> > >   reallocate:
> > >   	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
> > >   			&fio->new_blkaddr, sum, type, fio);
> > > -	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
> > > +	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
> > >   		invalidate_mapping_pages(META_MAPPING(fio->sbi),
> > >   					fio->old_blkaddr, fio->old_blkaddr);
> > > +		f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
> > > +	}
> > >   	/* writeout dirty page into bdev */
> > >   	f2fs_submit_page_write(fio);
> > > @@ -3607,6 +3610,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
> > >   	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
> > >   		invalidate_mapping_pages(META_MAPPING(sbi),
> > >   					old_blkaddr, old_blkaddr);
> > > +		f2fs_invalidate_compress_page(sbi, old_blkaddr);
> > >   		if (!from_gc)
> > >   			update_segment_mtime(sbi, old_blkaddr, 0);
> > >   		update_sit_entry(sbi, old_blkaddr, -1);
> > > diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> > > index 8a82721b69ef..50e749169841 100644
> > > --- a/fs/f2fs/super.c
> > > +++ b/fs/f2fs/super.c
> > > @@ -148,6 +148,7 @@ enum {
> > >   	Opt_compress_extension,
> > >   	Opt_compress_chksum,
> > >   	Opt_compress_mode,
> > > +	Opt_compress_cache,
> > >   	Opt_atgc,
> > >   	Opt_err,
> > >   };
> > > @@ -218,6 +219,7 @@ static match_table_t f2fs_tokens = {
> > >   	{Opt_compress_extension, "compress_extension=%s"},
> > >   	{Opt_compress_chksum, "compress_chksum"},
> > >   	{Opt_compress_mode, "compress_mode=%s"},
> > > +	{Opt_compress_cache, "compress_cache"},
> > >   	{Opt_atgc, "atgc"},
> > >   	{Opt_err, NULL},
> > >   };
> > > @@ -955,12 +957,16 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
> > >   			}
> > >   			kfree(name);
> > >   			break;
> > > +		case Opt_compress_cache:
> > > +			set_opt(sbi, COMPRESS_CACHE);
> > > +			break;
> > >   #else
> > >   		case Opt_compress_algorithm:
> > >   		case Opt_compress_log_size:
> > >   		case Opt_compress_extension:
> > >   		case Opt_compress_chksum:
> > >   		case Opt_compress_mode:
> > > +		case Opt_compress_cache:
> > >   			f2fs_info(sbi, "compression options not supported");
> > >   			break;
> > >   #endif
> > > @@ -1285,6 +1291,8 @@ static void f2fs_put_super(struct super_block *sb)
> > >   	f2fs_bug_on(sbi, sbi->fsync_node_num);
> > > +	f2fs_destroy_compress_inode(sbi);
> > > +
> > >   	iput(sbi->node_inode);
> > >   	sbi->node_inode = NULL;
> > > @@ -1554,6 +1562,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
> > >   		seq_printf(seq, ",compress_mode=%s", "fs");
> > >   	else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
> > >   		seq_printf(seq, ",compress_mode=%s", "user");
> > > +
> > > +	if (test_opt(sbi, COMPRESS_CACHE))
> > > +		seq_puts(seq, ",compress_cache");
> > >   }
> > >   static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
> > > @@ -3759,10 +3770,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
> > >   		goto free_node_inode;
> > >   	}
> > > -	err = f2fs_register_sysfs(sbi);
> > > +	err = f2fs_init_compress_inode(sbi);
> > >   	if (err)
> > >   		goto free_root_inode;
> > > +	err = f2fs_register_sysfs(sbi);
> > > +	if (err)
> > > +		goto free_compress_inode;
> > > +
> > >   #ifdef CONFIG_QUOTA
> > >   	/* Enable quota usage during mount */
> > >   	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
> > > @@ -3896,6 +3911,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
> > >   	/* evict some inodes being cached by GC */
> > >   	evict_inodes(sb);
> > >   	f2fs_unregister_sysfs(sbi);
> > > +free_compress_inode:
> > > +	f2fs_destroy_compress_inode(sbi);
> > >   free_root_inode:
> > >   	dput(sb->s_root);
> > >   	sb->s_root = NULL;
> > > diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
> > > index 7dc2a06cf19a..55be7afeee90 100644
> > > --- a/include/linux/f2fs_fs.h
> > > +++ b/include/linux/f2fs_fs.h
> > > @@ -34,6 +34,7 @@
> > >   #define F2FS_ROOT_INO(sbi)	((sbi)->root_ino_num)
> > >   #define F2FS_NODE_INO(sbi)	((sbi)->node_ino_num)
> > >   #define F2FS_META_INO(sbi)	((sbi)->meta_ino_num)
> > > +#define F2FS_COMPRESS_INO(sbi)	(NM_I(sbi)->max_nid)
> > >   #define F2FS_MAX_QUOTAS		3
> > > 
> 
> 
> _______________________________________________
> Linux-f2fs-devel mailing list
> Linux-f2fs-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [f2fs-dev] [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks
  2021-01-06 20:22     ` [f2fs-dev] " Jaegeuk Kim
@ 2021-01-07 11:48       ` Chao Yu
  0 siblings, 0 replies; 11+ messages in thread
From: Chao Yu @ 2021-01-07 11:48 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-kernel, linux-f2fs-devel

On 2021/1/7 4:22, Jaegeuk Kim wrote:
> On 12/09, Jaegeuk Kim wrote:
>> On 12/10, Chao Yu wrote:
>>> Hi Daeho, Jaegeuk
>>>
>>> I found one missing place in this patch which should adapt
>>> "compress vs verity race bugfix"
>>>
>>> Could you please check and apply below diff?
>>
>> Applied.
> 
> Hi Chao,
> 
> Could you please rebase this patch on top of Eric's cleanup?

Done, :)

Thanks,

> 
> Thanks,
> 
>>
>>>
>>>  From 61a9812944ac2f6f64fb458d5ef8b662c007bc50 Mon Sep 17 00:00:00 2001
>>> From: Chao Yu <yuchao0@huawei.com>
>>> Date: Thu, 10 Dec 2020 09:52:42 +0800
>>> Subject: [PATCH] fix
>>>
>>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>>> ---
>>>   fs/f2fs/data.c | 7 ++-----
>>>   1 file changed, 2 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>>> index 6787a7a03e86..894c5680db4a 100644
>>> --- a/fs/f2fs/data.c
>>> +++ b/fs/f2fs/data.c
>>> @@ -2271,11 +2271,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>   		f2fs_load_compressed_page(sbi, page, blkaddr);
>>>   		if (PageUptodate(page)) {
>>>   			if (!atomic_dec_return(&dic->pending_pages)) {
>>> -				bool verity =
>>> -					f2fs_need_verity(inode, start_idx);
>>> -
>>> -				f2fs_do_decompress_pages(dic, verity);
>>> -				if (verity) {
>>> +				f2fs_do_decompress_pages(dic, for_verity);
>>> +				if (for_verity) {
>>>   					f2fs_verify_pages(dic->rpages,
>>>   							dic->cluster_size);
>>>   					f2fs_free_dic(dic);
>>> -- 
>>> 2.29.2
>>>
>>> Thanks,
>>>
>>> On 2020/12/9 16:43, Chao Yu wrote:
>>>> Support to use address space of inner inode to cache compressed block,
>>>> in order to improve cache hit ratio of random read.
>>>>
>>>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>>>> ---
>>>>    Documentation/filesystems/f2fs.rst |   3 +
>>>>    fs/f2fs/compress.c                 | 198 +++++++++++++++++++++++++++--
>>>>    fs/f2fs/data.c                     |  29 ++++-
>>>>    fs/f2fs/debug.c                    |  13 ++
>>>>    fs/f2fs/f2fs.h                     |  34 ++++-
>>>>    fs/f2fs/gc.c                       |   1 +
>>>>    fs/f2fs/inode.c                    |  21 ++-
>>>>    fs/f2fs/segment.c                  |   6 +-
>>>>    fs/f2fs/super.c                    |  19 ++-
>>>>    include/linux/f2fs_fs.h            |   1 +
>>>>    10 files changed, 305 insertions(+), 20 deletions(-)
>>>>
>>>> diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
>>>> index dae15c96e659..5fa45fd8e4af 100644
>>>> --- a/Documentation/filesystems/f2fs.rst
>>>> +++ b/Documentation/filesystems/f2fs.rst
>>>> @@ -268,6 +268,9 @@ compress_mode=%s	 Control file compression mode. This supports "fs" and "user"
>>>>    			 choosing the target file and the timing. The user can do manual
>>>>    			 compression/decompression on the compression enabled files using
>>>>    			 ioctls.
>>>> +compress_cache		 Support to use address space of a filesystem managed inode to
>>>> +			 cache compressed block, in order to improve cache hit ratio of
>>>> +			 random read.
>>>>    inlinecrypt		 When possible, encrypt/decrypt the contents of encrypted
>>>>    			 files using the blk-crypto framework rather than
>>>>    			 filesystem-layer encryption. This allows the use of
>>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>>> index 4bcbacfe3325..446dd41a7bad 100644
>>>> --- a/fs/f2fs/compress.c
>>>> +++ b/fs/f2fs/compress.c
>>>> @@ -12,9 +12,11 @@
>>>>    #include <linux/lzo.h>
>>>>    #include <linux/lz4.h>
>>>>    #include <linux/zstd.h>
>>>> +#include <linux/pagevec.h>
>>>>    #include "f2fs.h"
>>>>    #include "node.h"
>>>> +#include "segment.h"
>>>>    #include <trace/events/f2fs.h>
>>>>    static struct kmem_cache *cic_entry_slab;
>>>> @@ -721,25 +723,14 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>>>>    	return ret;
>>>>    }
>>>> -void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
>>>> +void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity)
>>>>    {
>>>> -	struct decompress_io_ctx *dic =
>>>> -			(struct decompress_io_ctx *)page_private(page);
>>>> -	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
>>>>    	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
>>>>    	const struct f2fs_compress_ops *cops =
>>>>    			f2fs_cops[fi->i_compress_algorithm];
>>>>    	int ret;
>>>>    	int i;
>>>> -	dec_page_count(sbi, F2FS_RD_DATA);
>>>> -
>>>> -	if (bio->bi_status || PageError(page))
>>>> -		dic->failed = true;
>>>> -
>>>> -	if (atomic_dec_return(&dic->pending_pages))
>>>> -		return;
>>>> -
>>>>    	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
>>>>    				dic->cluster_size, fi->i_compress_algorithm);
>>>> @@ -797,6 +788,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
>>>>    	ret = cops->decompress_pages(dic);
>>>>    	if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
>>>> +		struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
>>>>    		u32 provided = le32_to_cpu(dic->cbuf->chksum);
>>>>    		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
>>>> @@ -830,6 +822,30 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
>>>>    		f2fs_free_dic(dic);
>>>>    }
>>>> +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
>>>> +						nid_t ino, block_t blkaddr);
>>>> +void f2fs_decompress_pages(struct bio *bio, struct page *page,
>>>> +						bool verity, unsigned int ofs)
>>>> +{
>>>> +	struct decompress_io_ctx *dic =
>>>> +			(struct decompress_io_ctx *)page_private(page);
>>>> +	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
>>>> +	block_t blkaddr;
>>>> +
>>>> +	dec_page_count(sbi, F2FS_RD_DATA);
>>>> +
>>>> +	if (bio->bi_status || PageError(page))
>>>> +		dic->failed = true;
>>>> +
>>>> +	blkaddr = SECTOR_TO_BLOCK(bio->bi_iter.bi_sector) + ofs;
>>>> +	f2fs_cache_compressed_page(sbi, page, dic->inode->i_ino, blkaddr);
>>>> +
>>>> +	if (atomic_dec_return(&dic->pending_pages))
>>>> +		return;
>>>> +
>>>> +	f2fs_do_decompress_pages(dic, verity);
>>>> +}
>>>> +
>>>>    static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
>>>>    {
>>>>    	if (cc->cluster_idx == NULL_CLUSTER)
>>>> @@ -1600,6 +1616,164 @@ void f2fs_decompress_end_io(struct page **rpages,
>>>>    	}
>>>>    }
>>>> +const struct address_space_operations f2fs_compress_aops = {
>>>> +	.releasepage = f2fs_release_page,
>>>> +	.invalidatepage = f2fs_invalidate_page,
>>>> +};
>>>> +
>>>> +struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
>>>> +{
>>>> +	return sbi->compress_inode->i_mapping;
>>>> +}
>>>> +
>>>> +void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
>>>> +{
>>>> +	if (!sbi->compress_inode)
>>>> +		return;
>>>> +	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
>>>> +}
>>>> +
>>>> +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
>>>> +						nid_t ino, block_t blkaddr)
>>>> +{
>>>> +	struct page *cpage;
>>>> +	int ret;
>>>> +	struct sysinfo si;
>>>> +	unsigned long free_ram, avail_ram;
>>>> +
>>>> +	if (!test_opt(sbi, COMPRESS_CACHE))
>>>> +		return;
>>>> +
>>>> +	si_meminfo(&si);
>>>> +	free_ram = si.freeram;
>>>> +	avail_ram = si.totalram - si.totalhigh;
>>>> +
>>>> +	/* free memory is lower than watermark, deny caching compress page */
>>>> +	if (free_ram <= sbi->compress_watermark / 100 * avail_ram)
>>>> +		return;
>>>> +
>>>> +	/* cached page count exceed threshold, deny caching compress page */
>>>> +	if (COMPRESS_MAPPING(sbi)->nrpages >=
>>>> +			free_ram / 100 * sbi->compress_percent)
>>>> +		return;
>>>> +
>>>> +	cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
>>>> +	if (cpage) {
>>>> +		f2fs_put_page(cpage, 0);
>>>> +		return;
>>>> +	}
>>>> +
>>>> +	cpage = alloc_page(__GFP_IO);
>>>> +	if (!cpage)
>>>> +		return;
>>>> +
>>>> +	ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
>>>> +						blkaddr, GFP_NOFS);
>>>> +	if (ret) {
>>>> +		f2fs_put_page(cpage, 0);
>>>> +		return;
>>>> +	}
>>>> +
>>>> +	memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
>>>> +	SetPageUptodate(cpage);
>>>> +
>>>> +	f2fs_set_page_private(cpage, ino);
>>>> +
>>>> +	f2fs_put_page(cpage, 1);
>>>> +}
>>>> +
>>>> +void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
>>>> +								block_t blkaddr)
>>>> +{
>>>> +	struct page *cpage;
>>>> +
>>>> +	if (!test_opt(sbi, COMPRESS_CACHE))
>>>> +		return;
>>>> +
>>>> +	cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
>>>> +				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
>>>> +	if (cpage) {
>>>> +		if (PageUptodate(cpage)) {
>>>> +			atomic_inc(&sbi->compress_page_hit);
>>>> +			memcpy(page_address(page),
>>>> +				page_address(cpage), PAGE_SIZE);
>>>> +			SetPageUptodate(page);
>>>> +		}
>>>> +		f2fs_put_page(cpage, 1);
>>>> +	}
>>>> +}
>>>> +
>>>> +void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
>>>> +{
>>>> +	struct address_space *mapping = sbi->compress_inode->i_mapping;
>>>> +	struct pagevec pvec;
>>>> +	pgoff_t index = 0;
>>>> +	pgoff_t end = MAX_BLKADDR(sbi);
>>>> +
>>>> +	pagevec_init(&pvec);
>>>> +
>>>> +	do {
>>>> +		unsigned int nr_pages;
>>>> +		int i;
>>>> +
>>>> +		nr_pages = pagevec_lookup_range(&pvec, mapping,
>>>> +						&index, end - 1);
>>>> +		if (!nr_pages)
>>>> +			break;
>>>> +
>>>> +		for (i = 0; i < nr_pages; i++) {
>>>> +			struct page *page = pvec.pages[i];
>>>> +
>>>> +			if (page->index > end)
>>>> +				break;
>>>> +
>>>> +			lock_page(page);
>>>> +			if (page->mapping != mapping) {
>>>> +				unlock_page(page);
>>>> +				continue;
>>>> +			}
>>>> +
>>>> +			if (ino != page_private(page)) {
>>>> +				unlock_page(page);
>>>> +				continue;
>>>> +			}
>>>> +
>>>> +			generic_error_remove_page(mapping, page);
>>>> +			unlock_page(page);
>>>> +		}
>>>> +		pagevec_release(&pvec);
>>>> +		cond_resched();
>>>> +	} while (index < end);
>>>> +}
>>>> +
>>>> +int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
>>>> +{
>>>> +	struct inode *inode;
>>>> +
>>>> +	if (!test_opt(sbi, COMPRESS_CACHE))
>>>> +		return 0;
>>>> +
>>>> +	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
>>>> +	if (IS_ERR(inode))
>>>> +		return PTR_ERR(inode);
>>>> +	sbi->compress_inode = inode;
>>>> +
>>>> +	sbi->compress_percent = COMPRESS_PERCENT;
>>>> +	sbi->compress_watermark = COMPRESS_WATERMARK;
>>>> +
>>>> +	atomic_set(&sbi->compress_page_hit, 0);
>>>> +
>>>> +	return 0;
>>>> +}
>>>> +
>>>> +void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
>>>> +{
>>>> +	if (!sbi->compress_inode)
>>>> +		return;
>>>> +	iput(sbi->compress_inode);
>>>> +	sbi->compress_inode = NULL;
>>>> +}
>>>> +
>>>>    int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
>>>>    {
>>>>    	dev_t dev = sbi->sb->s_bdev->bd_dev;
>>>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>>>> index aa34d620bec9..6787a7a03e86 100644
>>>> --- a/fs/f2fs/data.c
>>>> +++ b/fs/f2fs/data.c
>>>> @@ -133,17 +133,21 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
>>>>    	struct page *page;
>>>>    	struct bio_vec *bv;
>>>>    	struct bvec_iter_all iter_all;
>>>> +	unsigned int ofs = 0;
>>>>    	bio_for_each_segment_all(bv, bio, iter_all) {
>>>>    		page = bv->bv_page;
>>>>    #ifdef CONFIG_F2FS_FS_COMPRESSION
>>>>    		if (compr && f2fs_is_compressed_page(page)) {
>>>> -			f2fs_decompress_pages(bio, page, verity);
>>>> +			f2fs_decompress_pages(bio, page, verity, ofs);
>>>> +			ofs++;
>>>>    			continue;
>>>>    		}
>>>> -		if (verity)
>>>> +		if (verity) {
>>>> +			ofs++;
>>>>    			continue;
>>>> +		}
>>>>    #endif
>>>>    		/* PG_error was set if any post_read step failed */
>>>> @@ -156,6 +160,7 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
>>>>    		}
>>>>    		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
>>>>    		unlock_page(page);
>>>> +		ofs++;
>>>>    	}
>>>>    }
>>>> @@ -1421,9 +1426,11 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
>>>>    	old_blkaddr = dn->data_blkaddr;
>>>>    	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
>>>>    				&sum, seg_type, NULL);
>>>> -	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
>>>> +	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
>>>>    		invalidate_mapping_pages(META_MAPPING(sbi),
>>>>    					old_blkaddr, old_blkaddr);
>>>> +		f2fs_invalidate_compress_page(sbi, old_blkaddr);
>>>> +	}
>>>>    	f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
>>>>    	/*
>>>> @@ -2261,6 +2268,22 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>>    		blkaddr = data_blkaddr(dn.inode, dn.node_page,
>>>>    						dn.ofs_in_node + i + 1);
>>>> +		f2fs_load_compressed_page(sbi, page, blkaddr);
>>>> +		if (PageUptodate(page)) {
>>>> +			if (!atomic_dec_return(&dic->pending_pages)) {
>>>> +				bool verity =
>>>> +					f2fs_need_verity(inode, start_idx);
>>>> +
>>>> +				f2fs_do_decompress_pages(dic, verity);
>>>> +				if (verity) {
>>>> +					f2fs_verify_pages(dic->rpages,
>>>> +							dic->cluster_size);
>>>> +					f2fs_free_dic(dic);
>>>> +				}
>>>> +			}
>>>> +			continue;
>>>> +		}
>>>> +
>>>>    		if (bio && (!page_is_mergeable(sbi, bio,
>>>>    					*last_block_in_bio, blkaddr) ||
>>>>    		    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
>>>> diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
>>>> index 197c914119da..f1f8714066c5 100644
>>>> --- a/fs/f2fs/debug.c
>>>> +++ b/fs/f2fs/debug.c
>>>> @@ -145,6 +145,12 @@ static void update_general_status(struct f2fs_sb_info *sbi)
>>>>    		si->node_pages = NODE_MAPPING(sbi)->nrpages;
>>>>    	if (sbi->meta_inode)
>>>>    		si->meta_pages = META_MAPPING(sbi)->nrpages;
>>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>>> +	if (sbi->compress_inode) {
>>>> +		si->compress_pages = COMPRESS_MAPPING(sbi)->nrpages;
>>>> +		si->compress_page_hit = atomic_read(&sbi->compress_page_hit);
>>>> +	}
>>>> +#endif
>>>>    	si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
>>>>    	si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
>>>>    	si->sits = MAIN_SEGS(sbi);
>>>> @@ -299,6 +305,12 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
>>>>    		unsigned npages = META_MAPPING(sbi)->nrpages;
>>>>    		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
>>>>    	}
>>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>>> +	if (sbi->compress_inode) {
>>>> +		unsigned npages = COMPRESS_MAPPING(sbi)->nrpages;
>>>> +		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
>>>> +	}
>>>> +#endif
>>>>    }
>>>>    static int stat_show(struct seq_file *s, void *v)
>>>> @@ -461,6 +473,7 @@ static int stat_show(struct seq_file *s, void *v)
>>>>    			"volatile IO: %4d (Max. %4d)\n",
>>>>    			   si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
>>>>    			   si->vw_cnt, si->max_vw_cnt);
>>>> +		seq_printf(s, "  - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
>>>>    		seq_printf(s, "  - nodes: %4d in %4d\n",
>>>>    			   si->ndirty_node, si->node_pages);
>>>>    		seq_printf(s, "  - dents: %4d in dirs:%4d (%4d)\n",
>>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>>> index 7364d453783f..0ff8b18eda05 100644
>>>> --- a/fs/f2fs/f2fs.h
>>>> +++ b/fs/f2fs/f2fs.h
>>>> @@ -97,6 +97,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
>>>>    #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
>>>>    #define F2FS_MOUNT_NORECOVERY		0x04000000
>>>>    #define F2FS_MOUNT_ATGC			0x08000000
>>>> +#define F2FS_MOUNT_COMPRESS_CACHE	0x10000000
>>>>    #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
>>>>    #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
>>>> @@ -1298,6 +1299,9 @@ enum compress_flag {
>>>>    	COMPRESS_MAX_FLAG,
>>>>    };
>>>> +#define	COMPRESS_WATERMARK			20
>>>> +#define	COMPRESS_PERCENT			20
>>>> +
>>>>    #define COMPRESS_DATA_RESERVED_SIZE		4
>>>>    struct compress_data {
>>>>    	__le32 clen;			/* compressed data size */
>>>> @@ -1571,6 +1575,11 @@ struct f2fs_sb_info {
>>>>    #ifdef CONFIG_F2FS_FS_COMPRESSION
>>>>    	struct kmem_cache *page_array_slab;	/* page array entry */
>>>>    	unsigned int page_array_slab_size;	/* default page array slab size */
>>>> +
>>>> +	struct inode *compress_inode;		/* cache compressed blocks */
>>>> +	unsigned int compress_percent;		/* cache page percentage */
>>>> +	unsigned int compress_watermark;	/* cache page watermark */
>>>> +	atomic_t compress_page_hit;		/* cache hit count */
>>>>    #endif
>>>>    };
>>>> @@ -3536,7 +3545,8 @@ struct f2fs_stat_info {
>>>>    	unsigned int bimodal, avg_vblocks;
>>>>    	int util_free, util_valid, util_invalid;
>>>>    	int rsvd_segs, overp_segs;
>>>> -	int dirty_count, node_pages, meta_pages;
>>>> +	int dirty_count, node_pages, meta_pages, compress_pages;
>>>> +	int compress_page_hit;
>>>>    	int prefree_count, call_count, cp_count, bg_cp_count;
>>>>    	int tot_segs, node_segs, data_segs, free_segs, free_secs;
>>>>    	int bg_node_segs, bg_data_segs;
>>>> @@ -3874,7 +3884,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
>>>>    bool f2fs_is_compress_backend_ready(struct inode *inode);
>>>>    int f2fs_init_compress_mempool(void);
>>>>    void f2fs_destroy_compress_mempool(void);
>>>> -void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
>>>> +void f2fs_do_decompress_pages(struct decompress_io_ctx *dic, bool verity);
>>>> +void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity, unsigned int ofs);
>>>>    bool f2fs_cluster_is_empty(struct compress_ctx *cc);
>>>>    bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
>>>>    void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
>>>> @@ -3893,10 +3904,19 @@ void f2fs_decompress_end_io(struct page **rpages,
>>>>    int f2fs_init_compress_ctx(struct compress_ctx *cc);
>>>>    void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
>>>>    void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
>>>> +int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
>>>> +void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
>>>>    int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
>>>>    void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
>>>>    int __init f2fs_init_compress_cache(void);
>>>>    void f2fs_destroy_compress_cache(void);
>>>> +struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
>>>> +void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
>>>> +void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
>>>> +						nid_t ino, block_t blkaddr);
>>>> +void f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
>>>> +								block_t blkaddr);
>>>> +void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
>>>>    #else
>>>>    static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
>>>>    static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
>>>> @@ -3913,10 +3933,20 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
>>>>    }
>>>>    static inline int f2fs_init_compress_mempool(void) { return 0; }
>>>>    static inline void f2fs_destroy_compress_mempool(void) { }
>>>> +static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
>>>> +static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
>>>>    static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
>>>>    static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
>>>>    static inline int __init f2fs_init_compress_cache(void) { return 0; }
>>>>    static inline void f2fs_destroy_compress_cache(void) { }
>>>> +static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
>>>> +				block_t blkaddr) { }
>>>> +static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
>>>> +				struct page *page, nid_t ino, block_t blkaddr) { }
>>>> +static inline void f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
>>>> +				struct page *page, block_t blkaddr) { }
>>>> +static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
>>>> +							nid_t ino) { }
>>>>    #endif
>>>>    static inline void set_compress_context(struct inode *inode)
>>>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
>>>> index 3ef84e6ded41..43919a3ae6a6 100644
>>>> --- a/fs/f2fs/gc.c
>>>> +++ b/fs/f2fs/gc.c
>>>> @@ -1225,6 +1225,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
>>>>    	f2fs_put_page(mpage, 1);
>>>>    	invalidate_mapping_pages(META_MAPPING(fio.sbi),
>>>>    				fio.old_blkaddr, fio.old_blkaddr);
>>>> +	f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
>>>>    	set_page_dirty(fio.encrypted_page);
>>>>    	if (clear_page_dirty_for_io(fio.encrypted_page))
>>>> diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
>>>> index 349d9cb933ee..f030b9b79202 100644
>>>> --- a/fs/f2fs/inode.c
>>>> +++ b/fs/f2fs/inode.c
>>>> @@ -18,6 +18,10 @@
>>>>    #include <trace/events/f2fs.h>
>>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>>> +extern const struct address_space_operations f2fs_compress_aops;
>>>> +#endif
>>>> +
>>>>    void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
>>>>    {
>>>>    	if (is_inode_flag_set(inode, FI_NEW_INODE))
>>>> @@ -494,6 +498,11 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
>>>>    	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
>>>>    		goto make_now;
>>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>>> +	if (ino == F2FS_COMPRESS_INO(sbi))
>>>> +		goto make_now;
>>>> +#endif
>>>> +
>>>>    	ret = do_read_inode(inode);
>>>>    	if (ret)
>>>>    		goto bad_inode;
>>>> @@ -504,6 +513,12 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
>>>>    	} else if (ino == F2FS_META_INO(sbi)) {
>>>>    		inode->i_mapping->a_ops = &f2fs_meta_aops;
>>>>    		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
>>>> +	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
>>>> +#ifdef CONFIG_F2FS_FS_COMPRESSION
>>>> +		inode->i_mapping->a_ops = &f2fs_compress_aops;
>>>> +#endif
>>>> +		mapping_set_gfp_mask(inode->i_mapping,
>>>> +			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
>>>>    	} else if (S_ISREG(inode->i_mode)) {
>>>>    		inode->i_op = &f2fs_file_inode_operations;
>>>>    		inode->i_fop = &f2fs_file_operations;
>>>> @@ -722,8 +737,12 @@ void f2fs_evict_inode(struct inode *inode)
>>>>    	trace_f2fs_evict_inode(inode);
>>>>    	truncate_inode_pages_final(&inode->i_data);
>>>> +	if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
>>>> +		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
>>>> +
>>>>    	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
>>>> -			inode->i_ino == F2FS_META_INO(sbi))
>>>> +			inode->i_ino == F2FS_META_INO(sbi) ||
>>>> +			inode->i_ino == F2FS_COMPRESS_INO(sbi))
>>>>    		goto out_clear;
>>>>    	f2fs_bug_on(sbi, get_dirty_pages(inode));
>>>> diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
>>>> index deca74cb17df..d8570b0359f5 100644
>>>> --- a/fs/f2fs/segment.c
>>>> +++ b/fs/f2fs/segment.c
>>>> @@ -2305,6 +2305,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
>>>>    		return;
>>>>    	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
>>>> +	f2fs_invalidate_compress_page(sbi, addr);
>>>>    	/* add it into sit main buffer */
>>>>    	down_write(&sit_i->sentry_lock);
>>>> @@ -3432,9 +3433,11 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
>>>>    reallocate:
>>>>    	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
>>>>    			&fio->new_blkaddr, sum, type, fio);
>>>> -	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
>>>> +	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
>>>>    		invalidate_mapping_pages(META_MAPPING(fio->sbi),
>>>>    					fio->old_blkaddr, fio->old_blkaddr);
>>>> +		f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
>>>> +	}
>>>>    	/* writeout dirty page into bdev */
>>>>    	f2fs_submit_page_write(fio);
>>>> @@ -3607,6 +3610,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
>>>>    	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
>>>>    		invalidate_mapping_pages(META_MAPPING(sbi),
>>>>    					old_blkaddr, old_blkaddr);
>>>> +		f2fs_invalidate_compress_page(sbi, old_blkaddr);
>>>>    		if (!from_gc)
>>>>    			update_segment_mtime(sbi, old_blkaddr, 0);
>>>>    		update_sit_entry(sbi, old_blkaddr, -1);
>>>> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
>>>> index 8a82721b69ef..50e749169841 100644
>>>> --- a/fs/f2fs/super.c
>>>> +++ b/fs/f2fs/super.c
>>>> @@ -148,6 +148,7 @@ enum {
>>>>    	Opt_compress_extension,
>>>>    	Opt_compress_chksum,
>>>>    	Opt_compress_mode,
>>>> +	Opt_compress_cache,
>>>>    	Opt_atgc,
>>>>    	Opt_err,
>>>>    };
>>>> @@ -218,6 +219,7 @@ static match_table_t f2fs_tokens = {
>>>>    	{Opt_compress_extension, "compress_extension=%s"},
>>>>    	{Opt_compress_chksum, "compress_chksum"},
>>>>    	{Opt_compress_mode, "compress_mode=%s"},
>>>> +	{Opt_compress_cache, "compress_cache"},
>>>>    	{Opt_atgc, "atgc"},
>>>>    	{Opt_err, NULL},
>>>>    };
>>>> @@ -955,12 +957,16 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
>>>>    			}
>>>>    			kfree(name);
>>>>    			break;
>>>> +		case Opt_compress_cache:
>>>> +			set_opt(sbi, COMPRESS_CACHE);
>>>> +			break;
>>>>    #else
>>>>    		case Opt_compress_algorithm:
>>>>    		case Opt_compress_log_size:
>>>>    		case Opt_compress_extension:
>>>>    		case Opt_compress_chksum:
>>>>    		case Opt_compress_mode:
>>>> +		case Opt_compress_cache:
>>>>    			f2fs_info(sbi, "compression options not supported");
>>>>    			break;
>>>>    #endif
>>>> @@ -1285,6 +1291,8 @@ static void f2fs_put_super(struct super_block *sb)
>>>>    	f2fs_bug_on(sbi, sbi->fsync_node_num);
>>>> +	f2fs_destroy_compress_inode(sbi);
>>>> +
>>>>    	iput(sbi->node_inode);
>>>>    	sbi->node_inode = NULL;
>>>> @@ -1554,6 +1562,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
>>>>    		seq_printf(seq, ",compress_mode=%s", "fs");
>>>>    	else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
>>>>    		seq_printf(seq, ",compress_mode=%s", "user");
>>>> +
>>>> +	if (test_opt(sbi, COMPRESS_CACHE))
>>>> +		seq_puts(seq, ",compress_cache");
>>>>    }
>>>>    static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
>>>> @@ -3759,10 +3770,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
>>>>    		goto free_node_inode;
>>>>    	}
>>>> -	err = f2fs_register_sysfs(sbi);
>>>> +	err = f2fs_init_compress_inode(sbi);
>>>>    	if (err)
>>>>    		goto free_root_inode;
>>>> +	err = f2fs_register_sysfs(sbi);
>>>> +	if (err)
>>>> +		goto free_compress_inode;
>>>> +
>>>>    #ifdef CONFIG_QUOTA
>>>>    	/* Enable quota usage during mount */
>>>>    	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
>>>> @@ -3896,6 +3911,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
>>>>    	/* evict some inodes being cached by GC */
>>>>    	evict_inodes(sb);
>>>>    	f2fs_unregister_sysfs(sbi);
>>>> +free_compress_inode:
>>>> +	f2fs_destroy_compress_inode(sbi);
>>>>    free_root_inode:
>>>>    	dput(sb->s_root);
>>>>    	sb->s_root = NULL;
>>>> diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
>>>> index 7dc2a06cf19a..55be7afeee90 100644
>>>> --- a/include/linux/f2fs_fs.h
>>>> +++ b/include/linux/f2fs_fs.h
>>>> @@ -34,6 +34,7 @@
>>>>    #define F2FS_ROOT_INO(sbi)	((sbi)->root_ino_num)
>>>>    #define F2FS_NODE_INO(sbi)	((sbi)->node_ino_num)
>>>>    #define F2FS_META_INO(sbi)	((sbi)->meta_ino_num)
>>>> +#define F2FS_COMPRESS_INO(sbi)	(NM_I(sbi)->max_nid)
>>>>    #define F2FS_MAX_QUOTAS		3
>>>>
>>
>>
>> _______________________________________________
>> Linux-f2fs-devel mailing list
>> Linux-f2fs-devel@lists.sourceforge.net
>> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
> .
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2021-01-07 11:49 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-09  8:43 [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
2020-12-09  8:43 ` [PATCH RESEND v2 2/5] f2fs: compress: support compress level Chao Yu
2020-12-09  8:43 ` [PATCH RESEND v2 3/5] f2fs: compress: deny setting unsupported compress algorithm Chao Yu
2020-12-09  8:43 ` [PATCH RESEND v2 4/5] f2fs: introduce a new per-sb directory in sysfs Chao Yu
2020-12-09  8:43 ` [PATCH RESEND v2 5/5] f2fs: introduce sb_status sysfs node Chao Yu
2020-12-09 16:04   ` Jaegeuk Kim
2020-12-10  1:40     ` Chao Yu
2020-12-10  1:56 ` [PATCH RESEND v2 1/5] f2fs: compress: add compress_inode to cache compressed blocks Chao Yu
2020-12-10  2:15   ` Jaegeuk Kim
2021-01-06 20:22     ` [f2fs-dev] " Jaegeuk Kim
2021-01-07 11:48       ` Chao Yu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).