linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v6] f2fs: compress: support compress level
@ 2020-12-03  6:17 Chao Yu
  2020-12-03 19:32 ` [f2fs-dev] " Eric Biggers
  0 siblings, 1 reply; 14+ messages in thread
From: Chao Yu @ 2020-12-03  6:17 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao, Chao Yu

Expand 'compress_algorithm' mount option to accept parameter as format of
<algorithm>:<level>, by this way, it gives a way to allow user to do more
specified config on lz4 and zstd compression level, then f2fs compression
can provide higher compress ratio.

In order to set compress level for lz4 algorithm, it needs to set
CONFIG_LZ4HC_COMPRESS and CONFIG_F2FS_FS_LZ4HC config to enable lz4hc
compress algorithm.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
v6:
- fix warning reported by checkpatch.pl
 Documentation/filesystems/f2fs.rst |  5 +++
 fs/f2fs/Kconfig                    |  9 ++++
 fs/f2fs/compress.c                 | 40 +++++++++++++++--
 fs/f2fs/f2fs.h                     |  9 ++++
 fs/f2fs/super.c                    | 72 +++++++++++++++++++++++++++++-
 include/linux/f2fs_fs.h            |  3 ++
 6 files changed, 133 insertions(+), 5 deletions(-)

diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index fd413d319e93..73ab31bbf694 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -249,6 +249,11 @@ checkpoint=%s[:%u[%]]	 Set to "disable" to turn off checkpointing. Set to "enabl
 			 This space is reclaimed once checkpoint=enable.
 compress_algorithm=%s	 Control compress algorithm, currently f2fs supports "lzo",
 			 "lz4", "zstd" and "lzo-rle" algorithm.
+compress_algorithm=%s:%d Control compress algorithm and its compress level, now, only
+			 "lz4" and "zstd" support compress level config.
+			 algorithm	level range
+			 lz4		3 - 16
+			 zstd		1 - 22
 compress_log_size=%u	 Support configuring compress cluster size, the size will
 			 be 4KB * (1 << %u), 16KB is minimum size, also it's
 			 default size.
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index d13c5c6a9787..8134b145ae4f 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -119,6 +119,15 @@ config F2FS_FS_LZ4
 	help
 	  Support LZ4 compress algorithm, if unsure, say Y.
 
+config F2FS_FS_LZ4HC
+	bool "LZ4HC compression support"
+	depends on F2FS_FS_COMPRESSION
+	depends on F2FS_FS_LZ4
+	select LZ4HC_COMPRESS
+	default y
+	help
+	  Support LZ4HC compress algorithm, if unsure, say Y.
+
 config F2FS_FS_ZSTD
 	bool "ZSTD compression support"
 	depends on F2FS_FS_COMPRESSION
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index db82da311fe4..dfadbc78946c 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -254,8 +254,13 @@ static const struct f2fs_compress_ops f2fs_lzo_ops = {
 #ifdef CONFIG_F2FS_FS_LZ4
 static int lz4_init_compress_ctx(struct compress_ctx *cc)
 {
-	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
-				LZ4_MEM_COMPRESS, GFP_NOFS);
+	unsigned int size;
+	unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
+						COMPRESS_LEVEL_OFFSET;
+
+	size = level ? LZ4HC_MEM_COMPRESS : LZ4_MEM_COMPRESS;
+
+	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
 	if (!cc->private)
 		return -ENOMEM;
 
@@ -274,10 +279,34 @@ static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
 	cc->private = NULL;
 }
 
+#ifdef CONFIG_F2FS_FS_LZ4HC
+static int lz4hc_compress_pages(struct compress_ctx *cc)
+{
+	unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
+						COMPRESS_LEVEL_OFFSET;
+	int len;
+
+	if (level)
+		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
+					cc->clen, level, cc->private);
+	else
+		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
+						cc->clen, cc->private);
+	if (!len)
+		return -EAGAIN;
+
+	cc->clen = len;
+	return 0;
+}
+#endif
+
 static int lz4_compress_pages(struct compress_ctx *cc)
 {
 	int len;
 
+#ifdef CONFIG_F2FS_FS_LZ4HC
+	return lz4hc_compress_pages(cc);
+#endif
 	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 						cc->clen, cc->private);
 	if (!len)
@@ -327,8 +356,13 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
 	ZSTD_CStream *stream;
 	void *workspace;
 	unsigned int workspace_size;
+	unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
+						COMPRESS_LEVEL_OFFSET;
+
+	if (!level)
+		level = F2FS_ZSTD_DEFAULT_CLEVEL;
 
-	params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
+	params = ZSTD_getParams(level, cc->rlen, 0);
 	workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
 
 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 377a2e2bf466..76edec7483f3 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -147,6 +147,7 @@ struct f2fs_mount_info {
 	/* For compression */
 	unsigned char compress_algorithm;	/* algorithm type */
 	unsigned char compress_log_size;	/* cluster log size */
+	unsigned char compress_level;		/* compress level */
 	bool compress_chksum;			/* compressed data chksum */
 	unsigned char compress_ext_cnt;		/* extension count */
 	int compress_mode;			/* compression mode */
@@ -736,6 +737,7 @@ struct f2fs_inode_info {
 	atomic_t i_compr_blocks;		/* # of compressed blocks */
 	unsigned char i_compress_algorithm;	/* algorithm type */
 	unsigned char i_log_cluster_size;	/* log of cluster size */
+	unsigned char i_compress_level;		/* compress level (lz4hc,zstd) */
 	unsigned short i_compress_flag;		/* compress flag */
 	unsigned int i_cluster_size;		/* cluster size */
 };
@@ -1308,6 +1310,8 @@ struct compress_data {
 
 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
 
+#define	COMPRESS_LEVEL_OFFSET	8
+
 /* compress context */
 struct compress_ctx {
 	struct inode *inode;		/* inode the context belong to */
@@ -3959,6 +3963,11 @@ static inline void set_compress_context(struct inode *inode)
 				1 << COMPRESS_CHKSUM : 0;
 	F2FS_I(inode)->i_cluster_size =
 			1 << F2FS_I(inode)->i_log_cluster_size;
+	if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
+			F2FS_OPTION(sbi).compress_level)
+		F2FS_I(inode)->i_compress_flag |=
+				F2FS_OPTION(sbi).compress_level <<
+				COMPRESS_LEVEL_OFFSET;
 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
 	set_inode_flag(inode, FI_COMPRESSED_FILE);
 	stat_inc_compr_inode(inode);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 8442333ca0e2..44ba870bb352 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -25,6 +25,8 @@
 #include <linux/quota.h>
 #include <linux/unicode.h>
 #include <linux/part_stat.h>
+#include <linux/zstd.h>
+#include <linux/lz4.h>
 
 #include "f2fs.h"
 #include "node.h"
@@ -466,6 +468,57 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
 	return 0;
 }
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+static int f2fs_compress_set_level(struct f2fs_sb_info *sbi, const char *str,
+						int type)
+{
+	unsigned int level;
+	int len;
+
+	if (type == COMPRESS_LZ4)
+		len = 3;
+	else if (type == COMPRESS_ZSTD)
+		len = 4;
+	else
+		return 0;
+
+	if (strlen(str) == len)
+		return 0;
+
+	str += len;
+
+	if (str[0] != ':') {
+		f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+		return -EINVAL;
+	}
+	if (kstrtouint(str + 1, 10, &level))
+		return -EINVAL;
+	if (type == COMPRESS_LZ4) {
+#ifdef CONFIG_F2FS_FS_LZ4HC
+		if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
+			f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
+			return -EINVAL;
+		}
+#else
+		f2fs_info(sbi, "doesn't support lz4hc compression");
+		return 0;
+#endif
+	} else if (type == COMPRESS_ZSTD) {
+#ifdef CONFIG_F2FS_FS_ZSTD
+		if (!level || level > ZSTD_maxCLevel()) {
+			f2fs_info(sbi, "invalid zstd compress level: %d", level);
+			return -EINVAL;
+		}
+#else
+		f2fs_info(sbi, "doesn't support zstd compression");
+		return 0;
+#endif
+	}
+	F2FS_OPTION(sbi).compress_level = level;
+	return 0;
+}
+#endif
+
 static int parse_options(struct super_block *sb, char *options, bool is_remount)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -886,10 +939,22 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
 			if (!strcmp(name, "lzo")) {
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_LZO;
-			} else if (!strcmp(name, "lz4")) {
+			} else if (!strncmp(name, "lz4", 3)) {
+				ret = f2fs_compress_set_level(sbi, name,
+								COMPRESS_LZ4);
+				if (ret) {
+					kfree(name);
+					return -EINVAL;
+				}
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_LZ4;
-			} else if (!strcmp(name, "zstd")) {
+			} else if (!strncmp(name, "zstd", 4)) {
+				ret = f2fs_compress_set_level(sbi, name,
+								COMPRESS_ZSTD);
+				if (ret) {
+					kfree(name);
+					return -EINVAL;
+				}
 				F2FS_OPTION(sbi).compress_algorithm =
 								COMPRESS_ZSTD;
 			} else if (!strcmp(name, "lzo-rle")) {
@@ -1547,6 +1612,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
 	}
 	seq_printf(seq, ",compress_algorithm=%s", algtype);
 
+	if (!F2FS_OPTION(sbi).compress_level)
+		seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
+
 	seq_printf(seq, ",compress_log_size=%u",
 			F2FS_OPTION(sbi).compress_log_size);
 
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 55be7afeee90..2dcc63fe8494 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -275,6 +275,9 @@ struct f2fs_inode {
 			__u8 i_compress_algorithm;	/* compress algorithm */
 			__u8 i_log_cluster_size;	/* log of cluster size */
 			__le16 i_compress_flag;		/* compress flag */
+						/* 0 bit: chksum flag
+						 * [10,15] bits: compress level
+						 */
 			__le32 i_extra_end[0];	/* for attribute size calculation */
 		} __packed;
 		__le32 i_addr[DEF_ADDRS_PER_INODE];	/* Pointers to data blocks */
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-03  6:17 [PATCH v6] f2fs: compress: support compress level Chao Yu
@ 2020-12-03 19:32 ` Eric Biggers
  2020-12-04  0:31   ` Gao Xiang
  2020-12-04  1:18   ` Chao Yu
  0 siblings, 2 replies; 14+ messages in thread
From: Eric Biggers @ 2020-12-03 19:32 UTC (permalink / raw)
  To: Chao Yu; +Cc: jaegeuk, linux-kernel, linux-f2fs-devel

On Thu, Dec 03, 2020 at 02:17:15PM +0800, Chao Yu wrote:
> +config F2FS_FS_LZ4HC
> +	bool "LZ4HC compression support"
> +	depends on F2FS_FS_COMPRESSION
> +	depends on F2FS_FS_LZ4
> +	select LZ4HC_COMPRESS
> +	default y
> +	help
> +	  Support LZ4HC compress algorithm, if unsure, say Y.
> +

It would be helpful to mention that LZ4HC is on-disk compatible with LZ4.

>  static int lz4_compress_pages(struct compress_ctx *cc)
>  {
>  	int len;
>  
> +#ifdef CONFIG_F2FS_FS_LZ4HC
> +	return lz4hc_compress_pages(cc);
> +#endif

This looks wrong; it always calls lz4hc compression even for regular lz4.

>  static int parse_options(struct super_block *sb, char *options, bool is_remount)
>  {
>  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
> @@ -886,10 +939,22 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
>  			if (!strcmp(name, "lzo")) {
>  				F2FS_OPTION(sbi).compress_algorithm =
>  								COMPRESS_LZO;
> -			} else if (!strcmp(name, "lz4")) {
> +			} else if (!strncmp(name, "lz4", 3)) {

strcmp() is fine, no need for strncmp().

> @@ -1547,6 +1612,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
>  	}
>  	seq_printf(seq, ",compress_algorithm=%s", algtype);
>  
> +	if (!F2FS_OPTION(sbi).compress_level)
> +		seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
> +

This looks wrong; it only prints compress_level if it is 0.

> diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
> index 55be7afeee90..2dcc63fe8494 100644
> --- a/include/linux/f2fs_fs.h
> +++ b/include/linux/f2fs_fs.h
> @@ -275,6 +275,9 @@ struct f2fs_inode {
>  			__u8 i_compress_algorithm;	/* compress algorithm */
>  			__u8 i_log_cluster_size;	/* log of cluster size */
>  			__le16 i_compress_flag;		/* compress flag */
> +						/* 0 bit: chksum flag
> +						 * [10,15] bits: compress level
> +						 */

What is the use case for storing the compression level on-disk?

Keep in mind that compression levels are an implementation detail; the exact
compressed data that is produced by a particular algorithm at a particular
compression level is *not* a stable interface.  It can change when the
compressor is updated, as long as the output continues to be compatible with the
decompressor.

So does compression level really belong in the on-disk format?

- Eric

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-03 19:32 ` [f2fs-dev] " Eric Biggers
@ 2020-12-04  0:31   ` Gao Xiang
  2020-12-04  1:56     ` Chao Yu
  2020-12-04  7:09     ` Chao Yu
  2020-12-04  1:18   ` Chao Yu
  1 sibling, 2 replies; 14+ messages in thread
From: Gao Xiang @ 2020-12-04  0:31 UTC (permalink / raw)
  To: Chao Yu; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

Hi Chao,

On Thu, Dec 03, 2020 at 11:32:34AM -0800, Eric Biggers wrote:

...

> 
> What is the use case for storing the compression level on-disk?
> 
> Keep in mind that compression levels are an implementation detail; the exact
> compressed data that is produced by a particular algorithm at a particular
> compression level is *not* a stable interface.  It can change when the
> compressor is updated, as long as the output continues to be compatible with the
> decompressor.
> 
> So does compression level really belong in the on-disk format?
> 

Curious about this, since f2fs compression uses 16k f2fs compress cluster
by default (doesn't do sub-block compression by design as what btrfs did),
so is there significant CR difference between lz4 and lz4hc on 16k
configuration (I guess using zstd or lz4hc for 128k cluster like btrfs
could make more sense), could you leave some CR numbers about these
algorithms on typical datasets (enwik9, silisia.tar or else.) with 16k
cluster size?

As you may noticed, lz4hc is much slower than lz4, so if it's used online,
it's a good way to keep all CPUs busy (under writeback) with unprivileged
users. I'm not sure if it does matter. (Ok, it'll give users more options
at least, yet I'm not sure end users are quite understand what these
algorithms really mean, I guess it spends more CPU time but without much
more storage saving by the default 16k configuration.)

from https://github.com/lz4/lz4    Core i7-9700K CPU @ 4.9GHz
Silesia Corpus

Compressor              Ratio   Compression     Decompression
memcpy                  1.000   13700 MB/s      13700 MB/s
Zstandard 1.4.0 -1      2.883   515 MB/s	1380 MB/s
LZ4 HC -9 (v1.9.0)      2.721   41 MB/s         4900 MB/s

Also a minor thing is lzo-rle, initially it was only used for in-memory
anonymous pages and it won't be kept on-disk so that's fine. I'm not sure
if lzo original author want to support it or not. It'd be better to get
some opinion before keeping it on-disk.

Thanks,
Gao Xiang

> - Eric
> 
> 
> _______________________________________________
> Linux-f2fs-devel mailing list
> Linux-f2fs-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-03 19:32 ` [f2fs-dev] " Eric Biggers
  2020-12-04  0:31   ` Gao Xiang
@ 2020-12-04  1:18   ` Chao Yu
  1 sibling, 0 replies; 14+ messages in thread
From: Chao Yu @ 2020-12-04  1:18 UTC (permalink / raw)
  To: Eric Biggers; +Cc: jaegeuk, linux-kernel, linux-f2fs-devel

On 2020/12/4 3:32, Eric Biggers wrote:
> On Thu, Dec 03, 2020 at 02:17:15PM +0800, Chao Yu wrote:
>> +config F2FS_FS_LZ4HC
>> +	bool "LZ4HC compression support"
>> +	depends on F2FS_FS_COMPRESSION
>> +	depends on F2FS_FS_LZ4
>> +	select LZ4HC_COMPRESS
>> +	default y
>> +	help
>> +	  Support LZ4HC compress algorithm, if unsure, say Y.
>> +
> 
> It would be helpful to mention that LZ4HC is on-disk compatible with LZ4.

Sure, let me update description.

> 
>>   static int lz4_compress_pages(struct compress_ctx *cc)
>>   {
>>   	int len;
>>   
>> +#ifdef CONFIG_F2FS_FS_LZ4HC
>> +	return lz4hc_compress_pages(cc);
>> +#endif
> 
> This looks wrong; it always calls lz4hc compression even for regular lz4.

It's fine. lz4hc_compress_pages() will call LZ4_compress_HC() only when
compress level is set.

> 
>>   static int parse_options(struct super_block *sb, char *options, bool is_remount)
>>   {
>>   	struct f2fs_sb_info *sbi = F2FS_SB(sb);
>> @@ -886,10 +939,22 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
>>   			if (!strcmp(name, "lzo")) {
>>   				F2FS_OPTION(sbi).compress_algorithm =
>>   								COMPRESS_LZO;
>> -			} else if (!strcmp(name, "lz4")) {
>> +			} else if (!strncmp(name, "lz4", 3)) {
> 
> strcmp() is fine, no need for strncmp().

Actually, I reuse "lz4" mount option parameter, to enable lz4hc algorithm, user
only need to specify compress level after "lz4" string, e.g.
compress_algorithm=lz4, f2fs use lz4 algorithm
compress_algorithm=lz4:xx, f2fs use lz4hc algorithm with compress level xx.

So, I use !strncmp(name, "lz4", 3) here.

> 
>> @@ -1547,6 +1612,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
>>   	}
>>   	seq_printf(seq, ",compress_algorithm=%s", algtype);
>>   
>> +	if (!F2FS_OPTION(sbi).compress_level)
>> +		seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
>> +
> 
> This looks wrong; it only prints compress_level if it is 0.

Yes, will fix.

> 
>> diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
>> index 55be7afeee90..2dcc63fe8494 100644
>> --- a/include/linux/f2fs_fs.h
>> +++ b/include/linux/f2fs_fs.h
>> @@ -275,6 +275,9 @@ struct f2fs_inode {
>>   			__u8 i_compress_algorithm;	/* compress algorithm */
>>   			__u8 i_log_cluster_size;	/* log of cluster size */
>>   			__le16 i_compress_flag;		/* compress flag */
>> +						/* 0 bit: chksum flag
>> +						 * [10,15] bits: compress level
>> +						 */
> 
> What is the use case for storing the compression level on-disk?

One case I can image is if user wants to specify different compress level for
different algorithm in separated files.

e.g.
mount -o comrpess_algorithm=zstd:10 /dev/sdc /f2fs
touch fileA;
write fileA;
mount -o remount,comrpess_algorithm=lz4:8 /f2fs
write fileA;
touch fileB;
write fileB;

Thanks,

> 
> Keep in mind that compression levels are an implementation detail; the exact
> compressed data that is produced by a particular algorithm at a particular
> compression level is *not* a stable interface.  It can change when the
> compressor is updated, as long as the output continues to be compatible with the
> decompressor.
> 
> So does compression level really belong in the on-disk format?
> 
> - Eric
> .
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  0:31   ` Gao Xiang
@ 2020-12-04  1:56     ` Chao Yu
  2020-12-04  2:06       ` Gao Xiang
  2020-12-04  7:09     ` Chao Yu
  1 sibling, 1 reply; 14+ messages in thread
From: Chao Yu @ 2020-12-04  1:56 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

Hi Xiang,

On 2020/12/4 8:31, Gao Xiang wrote:
> Hi Chao,
> 
> On Thu, Dec 03, 2020 at 11:32:34AM -0800, Eric Biggers wrote:
> 
> ...
> 
>>
>> What is the use case for storing the compression level on-disk?
>>
>> Keep in mind that compression levels are an implementation detail; the exact
>> compressed data that is produced by a particular algorithm at a particular
>> compression level is *not* a stable interface.  It can change when the
>> compressor is updated, as long as the output continues to be compatible with the
>> decompressor.
>>
>> So does compression level really belong in the on-disk format?
>>
> 
> Curious about this, since f2fs compression uses 16k f2fs compress cluster
> by default (doesn't do sub-block compression by design as what btrfs did),
> so is there significant CR difference between lz4 and lz4hc on 16k
> configuration (I guess using zstd or lz4hc for 128k cluster like btrfs
> could make more sense), could you leave some CR numbers about these
> algorithms on typical datasets (enwik9, silisia.tar or else.) with 16k
> cluster size?

Yup, I can figure out some numbers later. :)

> 
> As you may noticed, lz4hc is much slower than lz4, so if it's used online,
> it's a good way to keep all CPUs busy (under writeback) with unprivileged
> users. I'm not sure if it does matter. (Ok, it'll give users more options
> at least, yet I'm not sure end users are quite understand what these
> algorithms really mean, I guess it spends more CPU time but without much
> more storage saving by the default 16k configuration.)
> 
> from https://github.com/lz4/lz4    Core i7-9700K CPU @ 4.9GHz
> Silesia Corpus
> 
> Compressor              Ratio   Compression     Decompression
> memcpy                  1.000   13700 MB/s      13700 MB/s
> Zstandard 1.4.0 -1      2.883   515 MB/s	1380 MB/s
> LZ4 HC -9 (v1.9.0)      2.721   41 MB/s         4900 MB/s

There is one solutions now, Daeho has submitted two patches:

f2fs: add compress_mode mount option
f2fs: add F2FS_IOC_DECOMPRESS_FILE and F2FS_IOC_COMPRESS_FILE

Which allows to specify all files in data partition be compressible, by default,
all files are written as non-compressed one, at free time of system, we can use
ioctl to reload and compress data for specific files.

> 
> Also a minor thing is lzo-rle, initially it was only used for in-memory
> anonymous pages and it won't be kept on-disk so that's fine. I'm not sure
> if lzo original author want to support it or not. It'd be better to get


Hmm.. that's a problem, as there may be existed potential users who are
using lzo-rle, remove lzo-rle support will cause compatibility issue...

IMO, the condition "f2fs may has persisted lzo-rle compress format data already"
may affect the decision of not supporting that algorithm from author.

> some opinion before keeping it on-disk.

Yes, I can try to ask... :)

Thanks,

> 
> Thanks,
> Gao Xiang
> 
>> - Eric
>>
>>
>> _______________________________________________
>> Linux-f2fs-devel mailing list
>> Linux-f2fs-devel@lists.sourceforge.net
>> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
> 
> .
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  1:56     ` Chao Yu
@ 2020-12-04  2:06       ` Gao Xiang
  2020-12-04  2:38         ` Chao Yu
  0 siblings, 1 reply; 14+ messages in thread
From: Gao Xiang @ 2020-12-04  2:06 UTC (permalink / raw)
  To: Chao Yu; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

On Fri, Dec 04, 2020 at 09:56:27AM +0800, Chao Yu wrote:
> Hi Xiang,
> 
> On 2020/12/4 8:31, Gao Xiang wrote:
> > Hi Chao,
> > 
> > On Thu, Dec 03, 2020 at 11:32:34AM -0800, Eric Biggers wrote:
> > 
> > ...
> > 
> > > 
> > > What is the use case for storing the compression level on-disk?
> > > 
> > > Keep in mind that compression levels are an implementation detail; the exact
> > > compressed data that is produced by a particular algorithm at a particular
> > > compression level is *not* a stable interface.  It can change when the
> > > compressor is updated, as long as the output continues to be compatible with the
> > > decompressor.
> > > 
> > > So does compression level really belong in the on-disk format?
> > > 
> > 
> > Curious about this, since f2fs compression uses 16k f2fs compress cluster
> > by default (doesn't do sub-block compression by design as what btrfs did),
> > so is there significant CR difference between lz4 and lz4hc on 16k
> > configuration (I guess using zstd or lz4hc for 128k cluster like btrfs
> > could make more sense), could you leave some CR numbers about these
> > algorithms on typical datasets (enwik9, silisia.tar or else.) with 16k
> > cluster size?
> 
> Yup, I can figure out some numbers later. :)
> 
> > 
> > As you may noticed, lz4hc is much slower than lz4, so if it's used online,
> > it's a good way to keep all CPUs busy (under writeback) with unprivileged
> > users. I'm not sure if it does matter. (Ok, it'll give users more options
> > at least, yet I'm not sure end users are quite understand what these
> > algorithms really mean, I guess it spends more CPU time but without much
> > more storage saving by the default 16k configuration.)
> > 
> > from https://github.com/lz4/lz4    Core i7-9700K CPU @ 4.9GHz
> > Silesia Corpus
> > 
> > Compressor              Ratio   Compression     Decompression
> > memcpy                  1.000   13700 MB/s      13700 MB/s
> > Zstandard 1.4.0 -1      2.883   515 MB/s	1380 MB/s
> > LZ4 HC -9 (v1.9.0)      2.721   41 MB/s         4900 MB/s
> 
> There is one solutions now, Daeho has submitted two patches:
> 
> f2fs: add compress_mode mount option
> f2fs: add F2FS_IOC_DECOMPRESS_FILE and F2FS_IOC_COMPRESS_FILE
> 
> Which allows to specify all files in data partition be compressible, by default,
> all files are written as non-compressed one, at free time of system, we can use
> ioctl to reload and compress data for specific files.
> 

Yeah, my own premature suggestion is there are many compression options
exist in f2fs compression, but end users are not compression experts.
So it'd better to leave advantage options to users (or users might be
confused or select wrong algorithm or make potential complaint...)

Keep lz4hc dirty data under writeback could block writeback, make kswapd
busy, and direct memory reclaim path, I guess that's why rare online
compression chooses it. My own premature suggestion is that it'd better
to show the CR or performance benefits in advance, and prevent unprivileged
users from using high-level lz4hc algorithm (to avoid potential system attack.)
either from mount options or ioctl.

> > 
> > Also a minor thing is lzo-rle, initially it was only used for in-memory
> > anonymous pages and it won't be kept on-disk so that's fine. I'm not sure
> > if lzo original author want to support it or not. It'd be better to get
> 
> 
> Hmm.. that's a problem, as there may be existed potential users who are
> using lzo-rle, remove lzo-rle support will cause compatibility issue...
> 
> IMO, the condition "f2fs may has persisted lzo-rle compress format data already"
> may affect the decision of not supporting that algorithm from author.
> 
> > some opinion before keeping it on-disk.
> 
> Yes, I can try to ask... :)

Yeah, it'd be better to ask the author first, or it may have to maintain
a private lz4-rle folk...

Thanks,
Gao Xiang

> 
> Thanks,
> 
> > 
> > Thanks,
> > Gao Xiang
> > 
> > > - Eric
> > > 
> > > 
> > > _______________________________________________
> > > Linux-f2fs-devel mailing list
> > > Linux-f2fs-devel@lists.sourceforge.net
> > > https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
> > 
> > .
> > 
> 


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  2:06       ` Gao Xiang
@ 2020-12-04  2:38         ` Chao Yu
  2020-12-04  2:47           ` Gao Xiang
  0 siblings, 1 reply; 14+ messages in thread
From: Chao Yu @ 2020-12-04  2:38 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

On 2020/12/4 10:06, Gao Xiang wrote:
> On Fri, Dec 04, 2020 at 09:56:27AM +0800, Chao Yu wrote:
>> Hi Xiang,
>>
>> On 2020/12/4 8:31, Gao Xiang wrote:
>>> Hi Chao,
>>>
>>> On Thu, Dec 03, 2020 at 11:32:34AM -0800, Eric Biggers wrote:
>>>
>>> ...
>>>
>>>>
>>>> What is the use case for storing the compression level on-disk?
>>>>
>>>> Keep in mind that compression levels are an implementation detail; the exact
>>>> compressed data that is produced by a particular algorithm at a particular
>>>> compression level is *not* a stable interface.  It can change when the
>>>> compressor is updated, as long as the output continues to be compatible with the
>>>> decompressor.
>>>>
>>>> So does compression level really belong in the on-disk format?
>>>>
>>>
>>> Curious about this, since f2fs compression uses 16k f2fs compress cluster
>>> by default (doesn't do sub-block compression by design as what btrfs did),
>>> so is there significant CR difference between lz4 and lz4hc on 16k
>>> configuration (I guess using zstd or lz4hc for 128k cluster like btrfs
>>> could make more sense), could you leave some CR numbers about these
>>> algorithms on typical datasets (enwik9, silisia.tar or else.) with 16k
>>> cluster size?
>>
>> Yup, I can figure out some numbers later. :)
>>
>>>
>>> As you may noticed, lz4hc is much slower than lz4, so if it's used online,
>>> it's a good way to keep all CPUs busy (under writeback) with unprivileged
>>> users. I'm not sure if it does matter. (Ok, it'll give users more options
>>> at least, yet I'm not sure end users are quite understand what these
>>> algorithms really mean, I guess it spends more CPU time but without much
>>> more storage saving by the default 16k configuration.)
>>>
>>> from https://github.com/lz4/lz4    Core i7-9700K CPU @ 4.9GHz
>>> Silesia Corpus
>>>
>>> Compressor              Ratio   Compression     Decompression
>>> memcpy                  1.000   13700 MB/s      13700 MB/s
>>> Zstandard 1.4.0 -1      2.883   515 MB/s	1380 MB/s
>>> LZ4 HC -9 (v1.9.0)      2.721   41 MB/s         4900 MB/s
>>
>> There is one solutions now, Daeho has submitted two patches:
>>
>> f2fs: add compress_mode mount option
>> f2fs: add F2FS_IOC_DECOMPRESS_FILE and F2FS_IOC_COMPRESS_FILE
>>
>> Which allows to specify all files in data partition be compressible, by default,
>> all files are written as non-compressed one, at free time of system, we can use
>> ioctl to reload and compress data for specific files.
>>
> 
> Yeah, my own premature suggestion is there are many compression options
> exist in f2fs compression, but end users are not compression experts.
> So it'd better to leave advantage options to users (or users might be
> confused or select wrong algorithm or make potential complaint...)

Yes, I agree.

> 
> Keep lz4hc dirty data under writeback could block writeback, make kswapd
> busy, and direct memory reclaim path, I guess that's why rare online
> compression chooses it. My own premature suggestion is that it'd better
> to show the CR or performance benefits in advance, and prevent unprivileged
> users from using high-level lz4hc algorithm (to avoid potential system attack.)
> either from mount options or ioctl.

Yes, I guess you are worry about destop/server scenario, as for android scenario,
all compression related flow can be customized, and I don't think we will use
online lz4hc compress; for other scenario, except the numbers, I need to add the
risk of using lz4hc algorithm in document.

Thanks,

> 
>>>
>>> Also a minor thing is lzo-rle, initially it was only used for in-memory
>>> anonymous pages and it won't be kept on-disk so that's fine. I'm not sure
>>> if lzo original author want to support it or not. It'd be better to get
>>
>>
>> Hmm.. that's a problem, as there may be existed potential users who are
>> using lzo-rle, remove lzo-rle support will cause compatibility issue...
>>
>> IMO, the condition "f2fs may has persisted lzo-rle compress format data already"
>> may affect the decision of not supporting that algorithm from author.
>>
>>> some opinion before keeping it on-disk.
>>
>> Yes, I can try to ask... :)
> 
> Yeah, it'd be better to ask the author first, or it may have to maintain
> a private lz4-rle folk...
> 
> Thanks,
> Gao Xiang
> 
>>
>> Thanks,
>>
>>>
>>> Thanks,
>>> Gao Xiang
>>>
>>>> - Eric
>>>>
>>>>
>>>> _______________________________________________
>>>> Linux-f2fs-devel mailing list
>>>> Linux-f2fs-devel@lists.sourceforge.net
>>>> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
>>>
>>> .
>>>
>>
> 
> .
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  2:38         ` Chao Yu
@ 2020-12-04  2:47           ` Gao Xiang
  2020-12-04  3:11             ` Chao Yu
  0 siblings, 1 reply; 14+ messages in thread
From: Gao Xiang @ 2020-12-04  2:47 UTC (permalink / raw)
  To: Chao Yu; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

On Fri, Dec 04, 2020 at 10:38:08AM +0800, Chao Yu wrote:
> On 2020/12/4 10:06, Gao Xiang wrote:
> > On Fri, Dec 04, 2020 at 09:56:27AM +0800, Chao Yu wrote:

...

> 
> > 
> > Keep lz4hc dirty data under writeback could block writeback, make kswapd
> > busy, and direct memory reclaim path, I guess that's why rare online
> > compression chooses it. My own premature suggestion is that it'd better
> > to show the CR or performance benefits in advance, and prevent unprivileged
> > users from using high-level lz4hc algorithm (to avoid potential system attack.)
> > either from mount options or ioctl.
> 
> Yes, I guess you are worry about destop/server scenario, as for android scenario,
> all compression related flow can be customized, and I don't think we will use
> online lz4hc compress; for other scenario, except the numbers, I need to add the
> risk of using lz4hc algorithm in document.

Yes, I was saying the general scenario. My overall premature thought is that
before releasing some brand new algorithm, it may be better to evaluate first
it'd benefit to some scenarios first (either on CR or performance side, or
why adding this?), or it would might cause lzo-rle likewise situation in the
future (and add more dependency to algorithms, you might see BWT-based bzip2
removal patch
https://lore.kernel.org/r/20201117223253.65920-1-alex_y_xu@yahoo.ca
(since I personally don't think BWT is a good algorithm as well)... Just FYI
... If i'm wrong, kindly ignore me :)

Thanks,
Gao Xiang

> 
> Thanks,


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  2:47           ` Gao Xiang
@ 2020-12-04  3:11             ` Chao Yu
  2020-12-04  3:21               ` Gao Xiang
  0 siblings, 1 reply; 14+ messages in thread
From: Chao Yu @ 2020-12-04  3:11 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

On 2020/12/4 10:47, Gao Xiang wrote:
> On Fri, Dec 04, 2020 at 10:38:08AM +0800, Chao Yu wrote:
>> On 2020/12/4 10:06, Gao Xiang wrote:
>>> On Fri, Dec 04, 2020 at 09:56:27AM +0800, Chao Yu wrote:
> 
> ...
> 
>>
>>>
>>> Keep lz4hc dirty data under writeback could block writeback, make kswapd
>>> busy, and direct memory reclaim path, I guess that's why rare online
>>> compression chooses it. My own premature suggestion is that it'd better
>>> to show the CR or performance benefits in advance, and prevent unprivileged
>>> users from using high-level lz4hc algorithm (to avoid potential system attack.)
>>> either from mount options or ioctl.
>>
>> Yes, I guess you are worry about destop/server scenario, as for android scenario,
>> all compression related flow can be customized, and I don't think we will use
>> online lz4hc compress; for other scenario, except the numbers, I need to add the
>> risk of using lz4hc algorithm in document.
> 
> Yes, I was saying the general scenario. My overall premature thought is that
> before releasing some brand new algorithm, it may be better to evaluate first
> it'd benefit to some scenarios first (either on CR or performance side, or
> why adding this?), or it would might cause lzo-rle likewise situation in the

Yeah, got your point.

> future (and add more dependency to algorithms, you might see BWT-based bzip2
> removal patch

Oops, is that really allowed? I don't this is a good idea...and I don't see there
are deletions from fs/ due to similar reason...

Thanks,

> https://lore.kernel.org/r/20201117223253.65920-1-alex_y_xu@yahoo.ca
> (since I personally don't think BWT is a good algorithm as well)... Just FYI
> ... If i'm wrong, kindly ignore me :)
> 
> Thanks,
> Gao Xiang
> 
>>
>> Thanks,
> 
> .
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  3:11             ` Chao Yu
@ 2020-12-04  3:21               ` Gao Xiang
  0 siblings, 0 replies; 14+ messages in thread
From: Gao Xiang @ 2020-12-04  3:21 UTC (permalink / raw)
  To: Chao Yu; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

On Fri, Dec 04, 2020 at 11:11:03AM +0800, Chao Yu wrote:
> On 2020/12/4 10:47, Gao Xiang wrote:

...

> 
> > future (and add more dependency to algorithms, you might see BWT-based bzip2
> > removal patch
> 
> Oops, is that really allowed? I don't this is a good idea...and I don't see there
> are deletions from fs/ due to similar reason...

Fortunately, bzip is quite slow algorithm, so not affective at all.

My personal opinion based on compress algorithm principle (just for reference
as well...)
 - zlib should be better replaced with libdeflate if possible, the main point
   is that many hardware accelerator for deflate (LZ77 + huffman) are
   available;

 - lzo is not attractive from its format complexity and its CR/performance
    goal so lz4 is generally better due to its format design;

 - lzo-rle, oops, just introduced for zram I think, not sure quite helpful
   for file data (since anonymous pages are generally RLE-effective due to
   many repeative data.) But it'd be good if lzo author accepts it.

Thanks,
Gao Xiang

> 
> Thanks,
> > 
> 


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  0:31   ` Gao Xiang
  2020-12-04  1:56     ` Chao Yu
@ 2020-12-04  7:09     ` Chao Yu
  2020-12-04  7:43       ` Gao Xiang
  1 sibling, 1 reply; 14+ messages in thread
From: Chao Yu @ 2020-12-04  7:09 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

On 2020/12/4 8:31, Gao Xiang wrote:
> could make more sense), could you leave some CR numbers about these
> algorithms on typical datasets (enwik9, silisia.tar or else.) with 16k
> cluster size?

Just from a quick test with enwik9 on vm:

Original blocks:	244382

			lz4			lz4hc-9
compressed blocks	170647			163270
compress ratio		69.8%			66.8%
speed			16.4207 s, 60.9 MB/s	26.7299 s, 37.4 MB/s

compress ratio = after / before

Thanks,

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  7:09     ` Chao Yu
@ 2020-12-04  7:43       ` Gao Xiang
  2020-12-04  8:50         ` Chao Yu
  0 siblings, 1 reply; 14+ messages in thread
From: Gao Xiang @ 2020-12-04  7:43 UTC (permalink / raw)
  To: Chao Yu; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

Hi Chao,

On Fri, Dec 04, 2020 at 03:09:20PM +0800, Chao Yu wrote:
> On 2020/12/4 8:31, Gao Xiang wrote:
> > could make more sense), could you leave some CR numbers about these
> > algorithms on typical datasets (enwik9, silisia.tar or else.) with 16k
> > cluster size?
> 
> Just from a quick test with enwik9 on vm:
> 
> Original blocks:	244382
> 
> 			lz4			lz4hc-9
> compressed blocks	170647			163270
> compress ratio		69.8%			66.8%
> speed			16.4207 s, 60.9 MB/s	26.7299 s, 37.4 MB/s
> 
> compress ratio = after / before

Thanks for the confirmation. it'd be better to add this to commit message
if needed when adding a new algorithm to show the benefits.

About the speed, I think which is also limited to storage device and other
conditions (I mean the CPU loading during the writeback might be different
between lz4 and lz4hc-9 due to many other bounds, e.g. UFS 3.0 seq
write is somewhat higher vs VM. lz4 may have higher bandwidth on high
level devices since it seems some IO bound here... I guess but not sure,
since pure in-memory lz4 is fast according to lzbench / lz4 homepage.)

Anyway, it's up to f2fs folks if it's useful :) (the CR number is what
I expect though... I'm a bit of afraid the CPU runtime loading.)
Thanks for your time!

Thanks,
Gao Xiang

> 
> Thanks,
> 


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  7:43       ` Gao Xiang
@ 2020-12-04  8:50         ` Chao Yu
  2020-12-04  9:10           ` Gao Xiang
  0 siblings, 1 reply; 14+ messages in thread
From: Chao Yu @ 2020-12-04  8:50 UTC (permalink / raw)
  To: Gao Xiang; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

Hi Xiang,

On 2020/12/4 15:43, Gao Xiang wrote:
> Hi Chao,
> 
> On Fri, Dec 04, 2020 at 03:09:20PM +0800, Chao Yu wrote:
>> On 2020/12/4 8:31, Gao Xiang wrote:
>>> could make more sense), could you leave some CR numbers about these
>>> algorithms on typical datasets (enwik9, silisia.tar or else.) with 16k
>>> cluster size?
>>
>> Just from a quick test with enwik9 on vm:
>>
>> Original blocks:	244382
>>
>> 			lz4			lz4hc-9
>> compressed blocks	170647			163270
>> compress ratio		69.8%			66.8%
>> speed			16.4207 s, 60.9 MB/s	26.7299 s, 37.4 MB/s
>>
>> compress ratio = after / before
> 
> Thanks for the confirmation. it'd be better to add this to commit message
> if needed when adding a new algorithm to show the benefits.

Sure, will add this.

> 
> About the speed, I think which is also limited to storage device and other
> conditions (I mean the CPU loading during the writeback might be different
> between lz4 and lz4hc-9 due to many other bounds, e.g. UFS 3.0 seq
> write is somewhat higher vs VM. lz4 may have higher bandwidth on high

Yeah, I guess my VM have been limited on its storage bandwidth, and its back-end
could be low-end rotating disk...

> level devices since it seems some IO bound here... I guess but not sure,
> since pure in-memory lz4 is fast according to lzbench / lz4 homepage.)
> 
> Anyway, it's up to f2fs folks if it's useful :) (the CR number is what
> I expect though... I'm a bit of afraid the CPU runtime loading.)

I just have a glance at CPU usage numbers (my VM has 16 cores):
lz4hc takes 11% in first half and downgrade to 6% at second half.
lz4 takes 6% in whole process.

But that's not accruate...

Thanks,

> Thanks for your time!
> 
> Thanks,
> Gao Xiang
> 
>>
>> Thanks,
>>
> 
> .
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [f2fs-dev] [PATCH v6] f2fs: compress: support compress level
  2020-12-04  8:50         ` Chao Yu
@ 2020-12-04  9:10           ` Gao Xiang
  0 siblings, 0 replies; 14+ messages in thread
From: Gao Xiang @ 2020-12-04  9:10 UTC (permalink / raw)
  To: Chao Yu; +Cc: Eric Biggers, jaegeuk, linux-kernel, linux-f2fs-devel

On Fri, Dec 04, 2020 at 04:50:14PM +0800, Chao Yu wrote:

...

> 
> > 
> > About the speed, I think which is also limited to storage device and other
> > conditions (I mean the CPU loading during the writeback might be different
> > between lz4 and lz4hc-9 due to many other bounds, e.g. UFS 3.0 seq
> > write is somewhat higher vs VM. lz4 may have higher bandwidth on high
> 
> Yeah, I guess my VM have been limited on its storage bandwidth, and its back-end
> could be low-end rotating disk...

Yeah, anyway that's in IO writeback path (no matter the time was working
on IO or CPU calcualation...)

> 
> > level devices since it seems some IO bound here... I guess but not sure,
> > since pure in-memory lz4 is fast according to lzbench / lz4 homepage.)
> > 
> > Anyway, it's up to f2fs folks if it's useful :) (the CR number is what
> > I expect though... I'm a bit of afraid the CPU runtime loading.)
> 
> I just have a glance at CPU usage numbers (my VM has 16 cores):
> lz4hc takes 11% in first half and downgrade to 6% at second half.
> lz4 takes 6% in whole process.
> 
> But that's not accruate...

There is some userspace lzbench [1] to benchmark lz4/lz4hc completely
in memory. So it's expected that lz4bench will consume all 100% CPU
with maximum bandwidth (but in-kernel lz4 version is lower though):

Intel Core i7-8700K
                Compression     Decompression   C/R
memcpy          10362 MB/s      10790 MB/s      100.00
lz4 1.9.2       737 MB/s        4448 MB/s       47.60
lz4hc 1.9.2 -9  33 MB/s         4378 MB/s       36.75

So adding more IO time (due to storage device difference) could make
CPU loading lower (also could make the whole process IO bound) but
the overall write bandwidth will be lower as well.

[1] https://github.com/inikep/lzbench

Thanks,
Gao Xiang

> 
> Thanks,


^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2020-12-04  9:12 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-03  6:17 [PATCH v6] f2fs: compress: support compress level Chao Yu
2020-12-03 19:32 ` [f2fs-dev] " Eric Biggers
2020-12-04  0:31   ` Gao Xiang
2020-12-04  1:56     ` Chao Yu
2020-12-04  2:06       ` Gao Xiang
2020-12-04  2:38         ` Chao Yu
2020-12-04  2:47           ` Gao Xiang
2020-12-04  3:11             ` Chao Yu
2020-12-04  3:21               ` Gao Xiang
2020-12-04  7:09     ` Chao Yu
2020-12-04  7:43       ` Gao Xiang
2020-12-04  8:50         ` Chao Yu
2020-12-04  9:10           ` Gao Xiang
2020-12-04  1:18   ` Chao Yu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).