All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chao Yu <yuchao0@huawei.com>
To: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: <linux-f2fs-devel@lists.sourceforge.net>,
	<linux-kernel@vger.kernel.org>, <chao@kernel.org>
Subject: Re: [RFC PATCH v5] f2fs: support data compression
Date: Tue, 31 Dec 2019 09:45:51 +0800	[thread overview]
Message-ID: <7a579223-39d4-7e51-c361-4aa592b2500d@huawei.com> (raw)
In-Reply-To: <20191231004633.GA85441@jaegeuk-macbookpro.roam.corp.google.com>

On 2019/12/31 8:46, Jaegeuk Kim wrote:
> On 12/23, Chao Yu wrote:
>> Hi Jaegeuk,
>>
>> Sorry for the delay.
>>
>> On 2019/12/19 5:46, Jaegeuk Kim wrote:
>>> Hi Chao,
>>>
>>> I still see some diffs from my latest testing version, so please check anything
>>> that you made additionally from here.
>>>
>>> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
>>
>> I've checked the diff and picked up valid parts, could you please check and
>> comment on it?
>>
>> ---
>>  fs/f2fs/compress.c |  8 ++++----
>>  fs/f2fs/data.c     | 18 +++++++++++++++---
>>  fs/f2fs/f2fs.h     |  3 +++
>>  fs/f2fs/file.c     |  1 -
>>  4 files changed, 22 insertions(+), 8 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index af23ed6deffd..1bc86a54ad71 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>  							fgp_flag, GFP_NOFS);
>>  		if (!page) {
>>  			ret = -ENOMEM;
>> -			goto unlock_pages;
>> +			goto release_pages;
>>  		}
>>
>>  		if (PageUptodate(page))
>> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>  		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
>>  						&last_block_in_bio, false);
>>  		if (ret)
>> -			goto release_pages;
>> +			goto unlock_pages;
>>  		if (bio)
>>  			f2fs_submit_bio(sbi, bio, DATA);
>>
>>  		ret = f2fs_init_compress_ctx(cc);
>>  		if (ret)
>> -			goto release_pages;
>> +			goto unlock_pages;
>>  	}
>>
>>  	for (i = 0; i < cc->cluster_size; i++) {
>> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>  	if (err)
>>  		goto out_unlock_op;
>>
>> -	psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>> +	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>>
>>  	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
>>  	if (err)
>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>> index 19cd03450066..f1f5c701228d 100644
>> --- a/fs/f2fs/data.c
>> +++ b/fs/f2fs/data.c
>> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
>>  }
>>
>>  #ifdef CONFIG_F2FS_FS_COMPRESSION
>> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
>> +{
>> +	f2fs_decompress_end_io(rpages, cluster_size, false, true);
>> +}
>> +
>>  static void f2fs_verify_bio(struct bio *bio)
>>  {
>>  	struct page *page = bio_first_page_all(bio);
>>  	struct decompress_io_ctx *dic =
>>  			(struct decompress_io_ctx *)page_private(page);
>>
>> -	f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
>> +	f2fs_verify_pages(dic->rpages, dic->cluster_size);
>>  	f2fs_free_dic(dic);
>>  }
>>  #endif
>> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
>>  	bio_for_each_segment_all(bvec, bio, iter_all) {
>>  		struct page *target = bvec->bv_page;
>>
>> -		if (fscrypt_is_bounce_page(target))
>> +		if (fscrypt_is_bounce_page(target)) {
>>  			target = fscrypt_pagecache_page(target);
>> -		if (f2fs_is_compressed_page(target))
>> +			if (IS_ERR(target))
>> +				continue;
>> +		}
>> +		if (f2fs_is_compressed_page(target)) {
>>  			target = f2fs_compress_control_page(target);
>> +			if (IS_ERR(target))
>> +				continue;
>> +		}
>>
>>  		if (inode && inode == target->mapping->host)
>>  			return true;
>> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>  	if (ret)
>>  		goto out;
>>
>> +	/* cluster was overwritten as normal cluster */
>>  	if (dn.data_blkaddr != COMPRESS_ADDR)
>>  		goto out;
>>
>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>> index 5d55cef66410..17d2af4eeafb 100644
>> --- a/fs/f2fs/f2fs.h
>> +++ b/fs/f2fs/f2fs.h
>> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
>>  			1 << F2FS_I(inode)->i_log_cluster_size;
>>  	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
>>  	set_inode_flag(inode, FI_COMPRESSED_FILE);
>> +	stat_inc_compr_inode(inode);
>>  }
>>
>>  static inline unsigned int addrs_per_inode(struct inode *inode)
>> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
>>  		return true;
>>  	if (f2fs_is_multi_device(sbi))
>>  		return true;
>> +	if (f2fs_compressed_file(inode))
>> +		return true;
>>  	/*
>>  	 * for blkzoned device, fallback direct IO to buffered IO, so
>>  	 * all IOs can be serialized by log-structured write.
>> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
>> index bde5612f37f5..9aeadf14413c 100644
>> --- a/fs/f2fs/file.c
>> +++ b/fs/f2fs/file.c
>> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
>>  				return -EINVAL;
>>
>>  			set_compress_context(inode);
>> -			stat_inc_compr_inode(inode);
> 
> As this breaks the count, I'll keep as is.

@@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
 			1 << F2FS_I(inode)->i_log_cluster_size;
 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
 	set_inode_flag(inode, FI_COMPRESSED_FILE);
+	stat_inc_compr_inode(inode);

If I'm not missing anything, stat_inc_compr_inode() should be called inside
set_compress_context() in where we convert normal inode to compress one,
right?

Thanks,

> 
> Thanks,
> 
>>  		}
>>  	}
>>  	if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
>> -- 
>> 2.18.0.rc1
>>
>> Thanks,
> .
> 

WARNING: multiple messages have this Message-ID (diff)
From: Chao Yu <yuchao0@huawei.com>
To: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: linux-kernel@vger.kernel.org, linux-f2fs-devel@lists.sourceforge.net
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression
Date: Tue, 31 Dec 2019 09:45:51 +0800	[thread overview]
Message-ID: <7a579223-39d4-7e51-c361-4aa592b2500d@huawei.com> (raw)
In-Reply-To: <20191231004633.GA85441@jaegeuk-macbookpro.roam.corp.google.com>

On 2019/12/31 8:46, Jaegeuk Kim wrote:
> On 12/23, Chao Yu wrote:
>> Hi Jaegeuk,
>>
>> Sorry for the delay.
>>
>> On 2019/12/19 5:46, Jaegeuk Kim wrote:
>>> Hi Chao,
>>>
>>> I still see some diffs from my latest testing version, so please check anything
>>> that you made additionally from here.
>>>
>>> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
>>
>> I've checked the diff and picked up valid parts, could you please check and
>> comment on it?
>>
>> ---
>>  fs/f2fs/compress.c |  8 ++++----
>>  fs/f2fs/data.c     | 18 +++++++++++++++---
>>  fs/f2fs/f2fs.h     |  3 +++
>>  fs/f2fs/file.c     |  1 -
>>  4 files changed, 22 insertions(+), 8 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index af23ed6deffd..1bc86a54ad71 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>  							fgp_flag, GFP_NOFS);
>>  		if (!page) {
>>  			ret = -ENOMEM;
>> -			goto unlock_pages;
>> +			goto release_pages;
>>  		}
>>
>>  		if (PageUptodate(page))
>> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>  		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
>>  						&last_block_in_bio, false);
>>  		if (ret)
>> -			goto release_pages;
>> +			goto unlock_pages;
>>  		if (bio)
>>  			f2fs_submit_bio(sbi, bio, DATA);
>>
>>  		ret = f2fs_init_compress_ctx(cc);
>>  		if (ret)
>> -			goto release_pages;
>> +			goto unlock_pages;
>>  	}
>>
>>  	for (i = 0; i < cc->cluster_size; i++) {
>> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>  	if (err)
>>  		goto out_unlock_op;
>>
>> -	psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>> +	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>>
>>  	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
>>  	if (err)
>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>> index 19cd03450066..f1f5c701228d 100644
>> --- a/fs/f2fs/data.c
>> +++ b/fs/f2fs/data.c
>> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
>>  }
>>
>>  #ifdef CONFIG_F2FS_FS_COMPRESSION
>> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
>> +{
>> +	f2fs_decompress_end_io(rpages, cluster_size, false, true);
>> +}
>> +
>>  static void f2fs_verify_bio(struct bio *bio)
>>  {
>>  	struct page *page = bio_first_page_all(bio);
>>  	struct decompress_io_ctx *dic =
>>  			(struct decompress_io_ctx *)page_private(page);
>>
>> -	f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
>> +	f2fs_verify_pages(dic->rpages, dic->cluster_size);
>>  	f2fs_free_dic(dic);
>>  }
>>  #endif
>> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
>>  	bio_for_each_segment_all(bvec, bio, iter_all) {
>>  		struct page *target = bvec->bv_page;
>>
>> -		if (fscrypt_is_bounce_page(target))
>> +		if (fscrypt_is_bounce_page(target)) {
>>  			target = fscrypt_pagecache_page(target);
>> -		if (f2fs_is_compressed_page(target))
>> +			if (IS_ERR(target))
>> +				continue;
>> +		}
>> +		if (f2fs_is_compressed_page(target)) {
>>  			target = f2fs_compress_control_page(target);
>> +			if (IS_ERR(target))
>> +				continue;
>> +		}
>>
>>  		if (inode && inode == target->mapping->host)
>>  			return true;
>> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>  	if (ret)
>>  		goto out;
>>
>> +	/* cluster was overwritten as normal cluster */
>>  	if (dn.data_blkaddr != COMPRESS_ADDR)
>>  		goto out;
>>
>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>> index 5d55cef66410..17d2af4eeafb 100644
>> --- a/fs/f2fs/f2fs.h
>> +++ b/fs/f2fs/f2fs.h
>> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
>>  			1 << F2FS_I(inode)->i_log_cluster_size;
>>  	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
>>  	set_inode_flag(inode, FI_COMPRESSED_FILE);
>> +	stat_inc_compr_inode(inode);
>>  }
>>
>>  static inline unsigned int addrs_per_inode(struct inode *inode)
>> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
>>  		return true;
>>  	if (f2fs_is_multi_device(sbi))
>>  		return true;
>> +	if (f2fs_compressed_file(inode))
>> +		return true;
>>  	/*
>>  	 * for blkzoned device, fallback direct IO to buffered IO, so
>>  	 * all IOs can be serialized by log-structured write.
>> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
>> index bde5612f37f5..9aeadf14413c 100644
>> --- a/fs/f2fs/file.c
>> +++ b/fs/f2fs/file.c
>> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
>>  				return -EINVAL;
>>
>>  			set_compress_context(inode);
>> -			stat_inc_compr_inode(inode);
> 
> As this breaks the count, I'll keep as is.

@@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
 			1 << F2FS_I(inode)->i_log_cluster_size;
 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
 	set_inode_flag(inode, FI_COMPRESSED_FILE);
+	stat_inc_compr_inode(inode);

If I'm not missing anything, stat_inc_compr_inode() should be called inside
set_compress_context() in where we convert normal inode to compress one,
right?

Thanks,

> 
> Thanks,
> 
>>  		}
>>  	}
>>  	if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
>> -- 
>> 2.18.0.rc1
>>
>> Thanks,
> .
> 


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

  reply	other threads:[~2019-12-31  1:46 UTC|newest]

Thread overview: 71+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-16  6:28 [RFC PATCH v5] f2fs: support data compression Chao Yu
2019-12-16  6:28 ` [f2fs-dev] " Chao Yu
2019-12-16 11:08 ` Markus Elfring
2019-12-16 11:08   ` [f2fs-dev] " Markus Elfring
2019-12-16 13:12   ` Chao Yu
2019-12-16 13:12     ` [f2fs-dev] " Chao Yu
2019-12-16 11:32 ` Markus Elfring
2019-12-16 11:32   ` [f2fs-dev] " Markus Elfring
2019-12-16 13:24   ` Chao Yu
2019-12-16 13:24     ` Chao Yu
2019-12-16 11:51 ` Markus Elfring
2019-12-16 11:51   ` [f2fs-dev] " Markus Elfring
2019-12-16 11:51   ` Markus Elfring
2019-12-16 13:28   ` Chao Yu
2019-12-16 13:28     ` [f2fs-dev] " Chao Yu
2019-12-16 13:28     ` Chao Yu
2019-12-16 13:38     ` [v5] " Markus Elfring
2019-12-16 13:38       ` [f2fs-dev] " Markus Elfring
2019-12-16 13:38       ` Markus Elfring
2019-12-18 21:46 ` [RFC PATCH v5] " Jaegeuk Kim
2019-12-18 21:46   ` [f2fs-dev] " Jaegeuk Kim
2019-12-23  3:32   ` Chao Yu
2019-12-23  3:32     ` [f2fs-dev] " Chao Yu
2019-12-23  3:58     ` Chao Yu
2019-12-23  3:58       ` Chao Yu
2019-12-23 19:29     ` Jaegeuk Kim
2019-12-23 19:29       ` [f2fs-dev] " Jaegeuk Kim
2019-12-31  0:46     ` Jaegeuk Kim
2019-12-31  0:46       ` [f2fs-dev] " Jaegeuk Kim
2019-12-31  1:45       ` Chao Yu [this message]
2019-12-31  1:45         ` Chao Yu
2020-01-02 18:18         ` Jaegeuk Kim
2020-01-02 18:18           ` [f2fs-dev] " Jaegeuk Kim
2020-01-02 19:00           ` Jaegeuk Kim
2020-01-02 19:00             ` [f2fs-dev] " Jaegeuk Kim
2020-01-03  6:50             ` Chao Yu
2020-01-03  6:50               ` [f2fs-dev] " Chao Yu
2020-01-06  9:01               ` Chao Yu
2020-01-06  9:01                 ` Chao Yu
2020-01-06 18:16                 ` Jaegeuk Kim
2020-01-06 18:16                   ` Jaegeuk Kim
2020-01-10 23:52                   ` Jaegeuk Kim
2020-01-10 23:52                     ` Jaegeuk Kim
2020-01-11  9:08                     ` Chao Yu
2020-01-11  9:08                       ` Chao Yu
2020-01-11 18:02                       ` Jaegeuk Kim
2020-01-11 18:02                         ` Jaegeuk Kim
2020-01-13  8:56                         ` Chao Yu
2020-01-13  8:56                           ` Chao Yu
2020-01-13 16:11                           ` Jaegeuk Kim
2020-01-13 16:11                             ` Jaegeuk Kim
2020-01-14  1:52                             ` Chao Yu
2020-01-14  1:52                               ` Chao Yu
2020-01-14 22:48                               ` Jaegeuk Kim
2020-01-14 22:48                                 ` Jaegeuk Kim
2020-01-15 10:12                                 ` Chao Yu
2020-01-15 10:12                                   ` Chao Yu
2020-01-15 21:38                                   ` Jaegeuk Kim
2020-01-15 21:38                                     ` Jaegeuk Kim
2020-01-03  1:19           ` Chao Yu
2020-01-03  1:19             ` [f2fs-dev] " Chao Yu
2019-12-19  9:53 ` Geert Uytterhoeven
2019-12-19  9:53   ` [f2fs-dev] " Geert Uytterhoeven
2019-12-23  3:36   ` Chao Yu
2019-12-23  3:36     ` [f2fs-dev] " Chao Yu
2019-12-19 10:18 ` Geert Uytterhoeven
2019-12-19 10:18   ` [f2fs-dev] " Geert Uytterhoeven
2019-12-19 10:25 ` Geert Uytterhoeven
2019-12-19 10:25   ` [f2fs-dev] " Geert Uytterhoeven
2019-12-20 21:26   ` Jaegeuk Kim
2019-12-20 21:26     ` [f2fs-dev] " Jaegeuk Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7a579223-39d4-7e51-c361-4aa592b2500d@huawei.com \
    --to=yuchao0@huawei.com \
    --cc=chao@kernel.org \
    --cc=jaegeuk@kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.