linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Chao Yu <yuchao0@huawei.com>
To: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: <linux-f2fs-devel@lists.sourceforge.net>,
	<linux-kernel@vger.kernel.org>, <chao@kernel.org>
Subject: Re: [RFC PATCH v5] f2fs: support data compression
Date: Fri, 3 Jan 2020 14:50:51 +0800	[thread overview]
Message-ID: <d51f0325-6879-9aa6-f549-133b96e3eef5@huawei.com> (raw)
In-Reply-To: <20200102190003.GA7597@jaegeuk-macbookpro.roam.corp.google.com>

On 2020/1/3 3:00, Jaegeuk Kim wrote:
> On 01/02, Jaegeuk Kim wrote:
>> On 12/31, Chao Yu wrote:
>>> On 2019/12/31 8:46, Jaegeuk Kim wrote:
>>>> On 12/23, Chao Yu wrote:
>>>>> Hi Jaegeuk,
>>>>>
>>>>> Sorry for the delay.
>>>>>
>>>>> On 2019/12/19 5:46, Jaegeuk Kim wrote:
>>>>>> Hi Chao,
>>>>>>
>>>>>> I still see some diffs from my latest testing version, so please check anything
>>>>>> that you made additionally from here.
>>>>>>
>>>>>> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
>>>>>
>>>>> I've checked the diff and picked up valid parts, could you please check and
>>>>> comment on it?
>>>>>
>>>>> ---
>>>>>  fs/f2fs/compress.c |  8 ++++----
>>>>>  fs/f2fs/data.c     | 18 +++++++++++++++---
>>>>>  fs/f2fs/f2fs.h     |  3 +++
>>>>>  fs/f2fs/file.c     |  1 -
>>>>>  4 files changed, 22 insertions(+), 8 deletions(-)
>>>>>
>>>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>>>> index af23ed6deffd..1bc86a54ad71 100644
>>>>> --- a/fs/f2fs/compress.c
>>>>> +++ b/fs/f2fs/compress.c
>>>>> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>>>  							fgp_flag, GFP_NOFS);
>>>>>  		if (!page) {
>>>>>  			ret = -ENOMEM;
>>>>> -			goto unlock_pages;
>>>>> +			goto release_pages;
>>>>>  		}
>>>>>
>>>>>  		if (PageUptodate(page))
>>>>> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>>>  		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
>>>>>  						&last_block_in_bio, false);
>>>>>  		if (ret)
>>>>> -			goto release_pages;
>>>>> +			goto unlock_pages;
>>>>>  		if (bio)
>>>>>  			f2fs_submit_bio(sbi, bio, DATA);
>>>>>
>>>>>  		ret = f2fs_init_compress_ctx(cc);
>>>>>  		if (ret)
>>>>> -			goto release_pages;
>>>>> +			goto unlock_pages;
>>>>>  	}
>>>>>
>>>>>  	for (i = 0; i < cc->cluster_size; i++) {
>>>>> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>>>>  	if (err)
>>>>>  		goto out_unlock_op;
>>>>>
>>>>> -	psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>>>>> +	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>>>>>
>>>>>  	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
>>>>>  	if (err)
>>>>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>>>>> index 19cd03450066..f1f5c701228d 100644
>>>>> --- a/fs/f2fs/data.c
>>>>> +++ b/fs/f2fs/data.c
>>>>> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
>>>>>  }
>>>>>
>>>>>  #ifdef CONFIG_F2FS_FS_COMPRESSION
>>>>> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
>>>>> +{
>>>>> +	f2fs_decompress_end_io(rpages, cluster_size, false, true);
>>>>> +}
>>>>> +
>>>>>  static void f2fs_verify_bio(struct bio *bio)
>>>>>  {
>>>>>  	struct page *page = bio_first_page_all(bio);
>>>>>  	struct decompress_io_ctx *dic =
>>>>>  			(struct decompress_io_ctx *)page_private(page);
>>>>>
>>>>> -	f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
>>>>> +	f2fs_verify_pages(dic->rpages, dic->cluster_size);
>>>>>  	f2fs_free_dic(dic);
>>>>>  }
>>>>>  #endif
>>>>> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
>>>>>  	bio_for_each_segment_all(bvec, bio, iter_all) {
>>>>>  		struct page *target = bvec->bv_page;
>>>>>
>>>>> -		if (fscrypt_is_bounce_page(target))
>>>>> +		if (fscrypt_is_bounce_page(target)) {
>>>>>  			target = fscrypt_pagecache_page(target);
>>>>> -		if (f2fs_is_compressed_page(target))
>>>>> +			if (IS_ERR(target))
>>>>> +				continue;
>>>>> +		}
>>>>> +		if (f2fs_is_compressed_page(target)) {
>>>>>  			target = f2fs_compress_control_page(target);
>>>>> +			if (IS_ERR(target))
>>>>> +				continue;
>>>>> +		}
>>>>>
>>>>>  		if (inode && inode == target->mapping->host)
>>>>>  			return true;
>>>>> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>>>  	if (ret)
>>>>>  		goto out;
>>>>>
>>>>> +	/* cluster was overwritten as normal cluster */
>>>>>  	if (dn.data_blkaddr != COMPRESS_ADDR)
>>>>>  		goto out;
>>>>>
>>>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>>>> index 5d55cef66410..17d2af4eeafb 100644
>>>>> --- a/fs/f2fs/f2fs.h
>>>>> +++ b/fs/f2fs/f2fs.h
>>>>> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
>>>>>  			1 << F2FS_I(inode)->i_log_cluster_size;
>>>>>  	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
>>>>>  	set_inode_flag(inode, FI_COMPRESSED_FILE);
>>>>> +	stat_inc_compr_inode(inode);
>>>>>  }
>>>>>
>>>>>  static inline unsigned int addrs_per_inode(struct inode *inode)
>>>>> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
>>>>>  		return true;
>>>>>  	if (f2fs_is_multi_device(sbi))
>>>>>  		return true;
>>>>> +	if (f2fs_compressed_file(inode))
>>>>> +		return true;
>>>>>  	/*
>>>>>  	 * for blkzoned device, fallback direct IO to buffered IO, so
>>>>>  	 * all IOs can be serialized by log-structured write.
>>>>> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
>>>>> index bde5612f37f5..9aeadf14413c 100644
>>>>> --- a/fs/f2fs/file.c
>>>>> +++ b/fs/f2fs/file.c
>>>>> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
>>>>>  				return -EINVAL;
>>>>>
>>>>>  			set_compress_context(inode);
>>>>> -			stat_inc_compr_inode(inode);
>>>>
>>>> As this breaks the count, I'll keep as is.
>>>
>>> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
>>>  			1 << F2FS_I(inode)->i_log_cluster_size;
>>>  	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
>>>  	set_inode_flag(inode, FI_COMPRESSED_FILE);
>>> +	stat_inc_compr_inode(inode);
>>>
>>> If I'm not missing anything, stat_inc_compr_inode() should be called inside
>>> set_compress_context() in where we convert normal inode to compress one,
>>> right?
>>
>> I don't care much whether that's right or not. If we want to do that, I found
>> another line to remove in f2fs_create(). Let me give it a try.
>>
>> Thanks,
>>
> 
> This works to me. Could you run fsstress tests on compressed root directory?
> It seems still there are some bugs.

I applied below diff, and reverted ("f2fs: cover f2fs_lock_op in expand_inode_data case"),
then starting running some tests on it.

Thanks,

> 
> ---
>  fs/f2fs/compress.c | 14 ++++++++++----
>  fs/f2fs/data.c     | 25 ++++++++++++++++++++++---
>  fs/f2fs/f2fs.h     | 31 +++++++++++++++++--------------
>  fs/f2fs/file.c     |  1 -
>  fs/f2fs/namei.c    |  1 -
>  5 files changed, 49 insertions(+), 23 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index af23ed6deffd..fa67ffd9d79d 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>  							fgp_flag, GFP_NOFS);
>  		if (!page) {
>  			ret = -ENOMEM;
> -			goto unlock_pages;
> +			goto release_pages;
>  		}
>  
>  		if (PageUptodate(page))
> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>  		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
>  						&last_block_in_bio, false);
>  		if (ret)
> -			goto release_pages;
> +			goto unlock_pages;
>  		if (bio)
>  			f2fs_submit_bio(sbi, bio, DATA);
>  
>  		ret = f2fs_init_compress_ctx(cc);
>  		if (ret)
> -			goto release_pages;
> +			goto unlock_pages;
>  	}
>  
>  	for (i = 0; i < cc->cluster_size; i++) {
> @@ -762,7 +762,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>  	if (err)
>  		goto out_unlock_op;
>  
> -	psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
> +	for (i = 0; i < cc->cluster_size; i++) {
> +		if (datablock_addr(dn.inode, dn.node_page,
> +					dn.ofs_in_node + i) == NULL_ADDR)
> +			goto out_put_dnode;
> +	}
> +
> +	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>  
>  	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
>  	if (err)
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 356642e8c3b3..5476d33f2d76 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
>  }
>  
>  #ifdef CONFIG_F2FS_FS_COMPRESSION
> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
> +{
> +	f2fs_decompress_end_io(rpages, cluster_size, false, true);
> +}
> +
>  static void f2fs_verify_bio(struct bio *bio)
>  {
>  	struct page *page = bio_first_page_all(bio);
>  	struct decompress_io_ctx *dic =
>  			(struct decompress_io_ctx *)page_private(page);
>  
> -	f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
> +	f2fs_verify_pages(dic->rpages, dic->cluster_size);
>  	f2fs_free_dic(dic);
>  }
>  #endif
> @@ -520,10 +525,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
>  	bio_for_each_segment_all(bvec, bio, iter_all) {
>  		struct page *target = bvec->bv_page;
>  
> -		if (fscrypt_is_bounce_page(target))
> +		if (fscrypt_is_bounce_page(target)) {
>  			target = fscrypt_pagecache_page(target);
> -		if (f2fs_is_compressed_page(target))
> +			if (IS_ERR(target))
> +				continue;
> +		}
> +		if (f2fs_is_compressed_page(target)) {
>  			target = f2fs_compress_control_page(target);
> +			if (IS_ERR(target))
> +				continue;
> +		}
>  
>  		if (inode && inode == target->mapping->host)
>  			return true;
> @@ -2049,6 +2060,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>  	if (ret)
>  		goto out;
>  
> +	/* cluster was overwritten as normal cluster */
>  	if (dn.data_blkaddr != COMPRESS_ADDR)
>  		goto out;
>  
> @@ -2694,12 +2706,16 @@ static int f2fs_write_data_page(struct page *page,
>  #ifdef CONFIG_F2FS_FS_COMPRESSION
>  	struct inode *inode = page->mapping->host;
>  
> +	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
> +		goto out;
> +
>  	if (f2fs_compressed_file(inode)) {
>  		if (f2fs_is_compressed_cluster(inode, page->index)) {
>  			redirty_page_for_writepage(wbc, page);
>  			return AOP_WRITEPAGE_ACTIVATE;
>  		}
>  	}
> +out:
>  #endif
>  
>  	return f2fs_write_single_data_page(page, NULL, NULL, NULL,
> @@ -2809,6 +2825,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  					goto result;
>  				}
>  
> +				if (unlikely(f2fs_cp_error(sbi)))
> +					goto lock_page;
> +
>  				if (f2fs_cluster_is_empty(&cc)) {
>  					void *fsdata = NULL;
>  					struct page *pagep;
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index de494fc9d596..a95369e32876 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -2707,20 +2707,6 @@ static inline int f2fs_compressed_file(struct inode *inode)
>  		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
>  }
>  
> -static inline void set_compress_context(struct inode *inode)
> -{
> -	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> -
> -	F2FS_I(inode)->i_compress_algorithm =
> -			F2FS_OPTION(sbi).compress_algorithm;
> -	F2FS_I(inode)->i_log_cluster_size =
> -			F2FS_OPTION(sbi).compress_log_size;
> -	F2FS_I(inode)->i_cluster_size =
> -			1 << F2FS_I(inode)->i_log_cluster_size;
> -	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> -	set_inode_flag(inode, FI_COMPRESSED_FILE);
> -}
> -
>  static inline unsigned int addrs_per_inode(struct inode *inode)
>  {
>  	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
> @@ -3808,6 +3794,21 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
>  }
>  #endif
>  
> +static inline void set_compress_context(struct inode *inode)
> +{
> +	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> +
> +	F2FS_I(inode)->i_compress_algorithm =
> +			F2FS_OPTION(sbi).compress_algorithm;
> +	F2FS_I(inode)->i_log_cluster_size =
> +			F2FS_OPTION(sbi).compress_log_size;
> +	F2FS_I(inode)->i_cluster_size =
> +			1 << F2FS_I(inode)->i_log_cluster_size;
> +	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> +	set_inode_flag(inode, FI_COMPRESSED_FILE);
> +	stat_inc_compr_inode(inode);
> +}
> +
>  static inline u64 f2fs_disable_compressed_file(struct inode *inode)
>  {
>  	struct f2fs_inode_info *fi = F2FS_I(inode);
> @@ -3963,6 +3964,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
>  		return true;
>  	if (f2fs_is_multi_device(sbi))
>  		return true;
> +	if (f2fs_compressed_file(inode))
> +		return true;
>  	/*
>  	 * for blkzoned device, fallback direct IO to buffered IO, so
>  	 * all IOs can be serialized by log-structured write.
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index f66c4cd067f5..cd84b3d9aa17 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -1831,7 +1831,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
>  				return -EINVAL;
>  
>  			set_compress_context(inode);
> -			stat_inc_compr_inode(inode);
>  		}
>  	}
>  	if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
> diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
> index cf3a286106ed..2aa035422c0f 100644
> --- a/fs/f2fs/namei.c
> +++ b/fs/f2fs/namei.c
> @@ -348,7 +348,6 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
>  		goto out;
>  	f2fs_unlock_op(sbi);
>  
> -	stat_inc_compr_inode(inode);
>  	f2fs_alloc_nid_done(sbi, ino);
>  
>  	d_instantiate_new(dentry, inode);
> 

  reply	other threads:[~2020-01-03  6:51 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-16  6:28 [RFC PATCH v5] f2fs: support data compression Chao Yu
2019-12-16 11:08 ` Markus Elfring
2019-12-16 13:12   ` Chao Yu
2019-12-16 11:32 ` Markus Elfring
2019-12-16 13:24   ` [f2fs-dev] " Chao Yu
2019-12-16 11:51 ` Markus Elfring
2019-12-16 13:28   ` Chao Yu
2019-12-16 13:38     ` [v5] " Markus Elfring
2019-12-18 21:46 ` [RFC PATCH v5] " Jaegeuk Kim
2019-12-23  3:32   ` Chao Yu
2019-12-23  3:58     ` [f2fs-dev] " Chao Yu
2019-12-23 19:29     ` Jaegeuk Kim
2019-12-31  0:46     ` Jaegeuk Kim
2019-12-31  1:45       ` Chao Yu
2020-01-02 18:18         ` Jaegeuk Kim
2020-01-02 19:00           ` Jaegeuk Kim
2020-01-03  6:50             ` Chao Yu [this message]
2020-01-06  9:01               ` [f2fs-dev] " Chao Yu
2020-01-06 18:16                 ` Jaegeuk Kim
2020-01-10 23:52                   ` Jaegeuk Kim
2020-01-11  9:08                     ` Chao Yu
2020-01-11 18:02                       ` Jaegeuk Kim
2020-01-13  8:56                         ` Chao Yu
2020-01-13 16:11                           ` Jaegeuk Kim
2020-01-14  1:52                             ` Chao Yu
2020-01-14 22:48                               ` Jaegeuk Kim
2020-01-15 10:12                                 ` Chao Yu
2020-01-15 21:38                                   ` Jaegeuk Kim
2020-01-03  1:19           ` Chao Yu
2019-12-19  9:53 ` Geert Uytterhoeven
2019-12-23  3:36   ` Chao Yu
2019-12-19 10:18 ` Geert Uytterhoeven
2019-12-19 10:25 ` Geert Uytterhoeven
2019-12-20 21:26   ` Jaegeuk Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d51f0325-6879-9aa6-f549-133b96e3eef5@huawei.com \
    --to=yuchao0@huawei.com \
    --cc=chao@kernel.org \
    --cc=jaegeuk@kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).