* [PATCH v2] f2fs: compress: remove unneed check condition
@ 2021-04-27 3:07 ` Chao Yu
0 siblings, 0 replies; 6+ messages in thread
From: Chao Yu @ 2021-04-27 3:07 UTC (permalink / raw)
To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao, Chao Yu
This patch changes as below:
- remove unneeded check condition in __cluster_may_compress()
- rename __cluster_may_compress() to cluster_has_invalid_data() for
better readability
- add cp_error check in f2fs_write_compressed_pages() like we did
in f2fs_write_single_data_page()
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
v2:
- rename function for better readability
- add cp_error check in f2fs_write_compressed_pages()
fs/f2fs/compress.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 6e46a00c1930..53f78befed8f 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -888,9 +888,8 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
return is_page_in_cluster(cc, index);
}
-static bool __cluster_may_compress(struct compress_ctx *cc)
+static bool cluster_has_invalid_data(struct compress_ctx *cc)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
loff_t i_size = i_size_read(cc->inode);
unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
int i;
@@ -898,18 +897,13 @@ static bool __cluster_may_compress(struct compress_ctx *cc)
for (i = 0; i < cc->cluster_size; i++) {
struct page *page = cc->rpages[i];
- f2fs_bug_on(sbi, !page);
-
- if (unlikely(f2fs_cp_error(sbi)))
- return false;
- if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
- return false;
+ f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
/* beyond EOF */
if (page->index >= nr_pages)
- return false;
+ return true;
}
- return true;
+ return false;
}
static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
@@ -985,7 +979,7 @@ static bool cluster_may_compress(struct compress_ctx *cc)
return false;
if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
return false;
- return __cluster_may_compress(cc);
+ return !cluster_has_invalid_data(cc);
}
static void set_cluster_writeback(struct compress_ctx *cc)
@@ -1232,6 +1226,12 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
loff_t psize;
int i, err;
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ mapping_set_error(cc->rpages[0]->mapping, -EIO);
+ goto out_free;
+ }
+
if (IS_NOQUOTA(inode)) {
/*
* We need to wait for node_write to avoid block allocation during
--
2.29.2
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [f2fs-dev] [PATCH v2] f2fs: compress: remove unneed check condition
@ 2021-04-27 3:07 ` Chao Yu
0 siblings, 0 replies; 6+ messages in thread
From: Chao Yu @ 2021-04-27 3:07 UTC (permalink / raw)
To: jaegeuk; +Cc: linux-kernel, linux-f2fs-devel
This patch changes as below:
- remove unneeded check condition in __cluster_may_compress()
- rename __cluster_may_compress() to cluster_has_invalid_data() for
better readability
- add cp_error check in f2fs_write_compressed_pages() like we did
in f2fs_write_single_data_page()
Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
v2:
- rename function for better readability
- add cp_error check in f2fs_write_compressed_pages()
fs/f2fs/compress.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 6e46a00c1930..53f78befed8f 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -888,9 +888,8 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
return is_page_in_cluster(cc, index);
}
-static bool __cluster_may_compress(struct compress_ctx *cc)
+static bool cluster_has_invalid_data(struct compress_ctx *cc)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
loff_t i_size = i_size_read(cc->inode);
unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
int i;
@@ -898,18 +897,13 @@ static bool __cluster_may_compress(struct compress_ctx *cc)
for (i = 0; i < cc->cluster_size; i++) {
struct page *page = cc->rpages[i];
- f2fs_bug_on(sbi, !page);
-
- if (unlikely(f2fs_cp_error(sbi)))
- return false;
- if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
- return false;
+ f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
/* beyond EOF */
if (page->index >= nr_pages)
- return false;
+ return true;
}
- return true;
+ return false;
}
static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
@@ -985,7 +979,7 @@ static bool cluster_may_compress(struct compress_ctx *cc)
return false;
if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
return false;
- return __cluster_may_compress(cc);
+ return !cluster_has_invalid_data(cc);
}
static void set_cluster_writeback(struct compress_ctx *cc)
@@ -1232,6 +1226,12 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
loff_t psize;
int i, err;
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ mapping_set_error(cc->rpages[0]->mapping, -EIO);
+ goto out_free;
+ }
+
if (IS_NOQUOTA(inode)) {
/*
* We need to wait for node_write to avoid block allocation during
--
2.29.2
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v2] f2fs: compress: remove unneed check condition
2021-04-27 3:07 ` [f2fs-dev] " Chao Yu
@ 2021-05-04 14:42 ` Jaegeuk Kim
-1 siblings, 0 replies; 6+ messages in thread
From: Jaegeuk Kim @ 2021-05-04 14:42 UTC (permalink / raw)
To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, chao
Hi Chao,
I split this into two patches along with upstreamed change.
https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/log/?h=dev-test
Thanks,
On 04/27, Chao Yu wrote:
> This patch changes as below:
> - remove unneeded check condition in __cluster_may_compress()
> - rename __cluster_may_compress() to cluster_has_invalid_data() for
> better readability
> - add cp_error check in f2fs_write_compressed_pages() like we did
> in f2fs_write_single_data_page()
>
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
> v2:
> - rename function for better readability
> - add cp_error check in f2fs_write_compressed_pages()
> fs/f2fs/compress.c | 22 +++++++++++-----------
> 1 file changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 6e46a00c1930..53f78befed8f 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -888,9 +888,8 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
> return is_page_in_cluster(cc, index);
> }
>
> -static bool __cluster_may_compress(struct compress_ctx *cc)
> +static bool cluster_has_invalid_data(struct compress_ctx *cc)
> {
> - struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
> loff_t i_size = i_size_read(cc->inode);
> unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
> int i;
> @@ -898,18 +897,13 @@ static bool __cluster_may_compress(struct compress_ctx *cc)
> for (i = 0; i < cc->cluster_size; i++) {
> struct page *page = cc->rpages[i];
>
> - f2fs_bug_on(sbi, !page);
> -
> - if (unlikely(f2fs_cp_error(sbi)))
> - return false;
> - if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
> - return false;
> + f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
>
> /* beyond EOF */
> if (page->index >= nr_pages)
> - return false;
> + return true;
> }
> - return true;
> + return false;
> }
>
> static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
> @@ -985,7 +979,7 @@ static bool cluster_may_compress(struct compress_ctx *cc)
> return false;
> if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
> return false;
> - return __cluster_may_compress(cc);
> + return !cluster_has_invalid_data(cc);
> }
>
> static void set_cluster_writeback(struct compress_ctx *cc)
> @@ -1232,6 +1226,12 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> loff_t psize;
> int i, err;
>
> + /* we should bypass data pages to proceed the kworkder jobs */
> + if (unlikely(f2fs_cp_error(sbi))) {
> + mapping_set_error(cc->rpages[0]->mapping, -EIO);
> + goto out_free;
> + }
> +
> if (IS_NOQUOTA(inode)) {
> /*
> * We need to wait for node_write to avoid block allocation during
> --
> 2.29.2
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [f2fs-dev] [PATCH v2] f2fs: compress: remove unneed check condition
@ 2021-05-04 14:42 ` Jaegeuk Kim
0 siblings, 0 replies; 6+ messages in thread
From: Jaegeuk Kim @ 2021-05-04 14:42 UTC (permalink / raw)
To: Chao Yu; +Cc: linux-kernel, linux-f2fs-devel
Hi Chao,
I split this into two patches along with upstreamed change.
https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/log/?h=dev-test
Thanks,
On 04/27, Chao Yu wrote:
> This patch changes as below:
> - remove unneeded check condition in __cluster_may_compress()
> - rename __cluster_may_compress() to cluster_has_invalid_data() for
> better readability
> - add cp_error check in f2fs_write_compressed_pages() like we did
> in f2fs_write_single_data_page()
>
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
> v2:
> - rename function for better readability
> - add cp_error check in f2fs_write_compressed_pages()
> fs/f2fs/compress.c | 22 +++++++++++-----------
> 1 file changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 6e46a00c1930..53f78befed8f 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -888,9 +888,8 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
> return is_page_in_cluster(cc, index);
> }
>
> -static bool __cluster_may_compress(struct compress_ctx *cc)
> +static bool cluster_has_invalid_data(struct compress_ctx *cc)
> {
> - struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
> loff_t i_size = i_size_read(cc->inode);
> unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
> int i;
> @@ -898,18 +897,13 @@ static bool __cluster_may_compress(struct compress_ctx *cc)
> for (i = 0; i < cc->cluster_size; i++) {
> struct page *page = cc->rpages[i];
>
> - f2fs_bug_on(sbi, !page);
> -
> - if (unlikely(f2fs_cp_error(sbi)))
> - return false;
> - if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
> - return false;
> + f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
>
> /* beyond EOF */
> if (page->index >= nr_pages)
> - return false;
> + return true;
> }
> - return true;
> + return false;
> }
>
> static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
> @@ -985,7 +979,7 @@ static bool cluster_may_compress(struct compress_ctx *cc)
> return false;
> if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
> return false;
> - return __cluster_may_compress(cc);
> + return !cluster_has_invalid_data(cc);
> }
>
> static void set_cluster_writeback(struct compress_ctx *cc)
> @@ -1232,6 +1226,12 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> loff_t psize;
> int i, err;
>
> + /* we should bypass data pages to proceed the kworkder jobs */
> + if (unlikely(f2fs_cp_error(sbi))) {
> + mapping_set_error(cc->rpages[0]->mapping, -EIO);
> + goto out_free;
> + }
> +
> if (IS_NOQUOTA(inode)) {
> /*
> * We need to wait for node_write to avoid block allocation during
> --
> 2.29.2
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2] f2fs: compress: remove unneed check condition
2021-05-04 14:42 ` [f2fs-dev] " Jaegeuk Kim
@ 2021-05-06 2:15 ` Chao Yu
-1 siblings, 0 replies; 6+ messages in thread
From: Chao Yu @ 2021-05-06 2:15 UTC (permalink / raw)
To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, chao
On 2021/5/4 22:42, Jaegeuk Kim wrote:
> Hi Chao,
>
> I split this into two patches along with upstreamed change.
>
> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/log/?h=dev-test
Jaegeuk, it's better, thanks, :)
Thanks,
>
> Thanks,
>
> On 04/27, Chao Yu wrote:
>> This patch changes as below:
>> - remove unneeded check condition in __cluster_may_compress()
>> - rename __cluster_may_compress() to cluster_has_invalid_data() for
>> better readability
>> - add cp_error check in f2fs_write_compressed_pages() like we did
>> in f2fs_write_single_data_page()
>>
>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>> ---
>> v2:
>> - rename function for better readability
>> - add cp_error check in f2fs_write_compressed_pages()
>> fs/f2fs/compress.c | 22 +++++++++++-----------
>> 1 file changed, 11 insertions(+), 11 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index 6e46a00c1930..53f78befed8f 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -888,9 +888,8 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
>> return is_page_in_cluster(cc, index);
>> }
>>
>> -static bool __cluster_may_compress(struct compress_ctx *cc)
>> +static bool cluster_has_invalid_data(struct compress_ctx *cc)
>> {
>> - struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
>> loff_t i_size = i_size_read(cc->inode);
>> unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
>> int i;
>> @@ -898,18 +897,13 @@ static bool __cluster_may_compress(struct compress_ctx *cc)
>> for (i = 0; i < cc->cluster_size; i++) {
>> struct page *page = cc->rpages[i];
>>
>> - f2fs_bug_on(sbi, !page);
>> -
>> - if (unlikely(f2fs_cp_error(sbi)))
>> - return false;
>> - if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
>> - return false;
>> + f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
>>
>> /* beyond EOF */
>> if (page->index >= nr_pages)
>> - return false;
>> + return true;
>> }
>> - return true;
>> + return false;
>> }
>>
>> static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
>> @@ -985,7 +979,7 @@ static bool cluster_may_compress(struct compress_ctx *cc)
>> return false;
>> if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
>> return false;
>> - return __cluster_may_compress(cc);
>> + return !cluster_has_invalid_data(cc);
>> }
>>
>> static void set_cluster_writeback(struct compress_ctx *cc)
>> @@ -1232,6 +1226,12 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>> loff_t psize;
>> int i, err;
>>
>> + /* we should bypass data pages to proceed the kworkder jobs */
>> + if (unlikely(f2fs_cp_error(sbi))) {
>> + mapping_set_error(cc->rpages[0]->mapping, -EIO);
>> + goto out_free;
>> + }
>> +
>> if (IS_NOQUOTA(inode)) {
>> /*
>> * We need to wait for node_write to avoid block allocation during
>> --
>> 2.29.2
> .
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [f2fs-dev] [PATCH v2] f2fs: compress: remove unneed check condition
@ 2021-05-06 2:15 ` Chao Yu
0 siblings, 0 replies; 6+ messages in thread
From: Chao Yu @ 2021-05-06 2:15 UTC (permalink / raw)
To: Jaegeuk Kim; +Cc: linux-kernel, linux-f2fs-devel
On 2021/5/4 22:42, Jaegeuk Kim wrote:
> Hi Chao,
>
> I split this into two patches along with upstreamed change.
>
> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/log/?h=dev-test
Jaegeuk, it's better, thanks, :)
Thanks,
>
> Thanks,
>
> On 04/27, Chao Yu wrote:
>> This patch changes as below:
>> - remove unneeded check condition in __cluster_may_compress()
>> - rename __cluster_may_compress() to cluster_has_invalid_data() for
>> better readability
>> - add cp_error check in f2fs_write_compressed_pages() like we did
>> in f2fs_write_single_data_page()
>>
>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>> ---
>> v2:
>> - rename function for better readability
>> - add cp_error check in f2fs_write_compressed_pages()
>> fs/f2fs/compress.c | 22 +++++++++++-----------
>> 1 file changed, 11 insertions(+), 11 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index 6e46a00c1930..53f78befed8f 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -888,9 +888,8 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
>> return is_page_in_cluster(cc, index);
>> }
>>
>> -static bool __cluster_may_compress(struct compress_ctx *cc)
>> +static bool cluster_has_invalid_data(struct compress_ctx *cc)
>> {
>> - struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
>> loff_t i_size = i_size_read(cc->inode);
>> unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
>> int i;
>> @@ -898,18 +897,13 @@ static bool __cluster_may_compress(struct compress_ctx *cc)
>> for (i = 0; i < cc->cluster_size; i++) {
>> struct page *page = cc->rpages[i];
>>
>> - f2fs_bug_on(sbi, !page);
>> -
>> - if (unlikely(f2fs_cp_error(sbi)))
>> - return false;
>> - if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
>> - return false;
>> + f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
>>
>> /* beyond EOF */
>> if (page->index >= nr_pages)
>> - return false;
>> + return true;
>> }
>> - return true;
>> + return false;
>> }
>>
>> static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
>> @@ -985,7 +979,7 @@ static bool cluster_may_compress(struct compress_ctx *cc)
>> return false;
>> if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
>> return false;
>> - return __cluster_may_compress(cc);
>> + return !cluster_has_invalid_data(cc);
>> }
>>
>> static void set_cluster_writeback(struct compress_ctx *cc)
>> @@ -1232,6 +1226,12 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>> loff_t psize;
>> int i, err;
>>
>> + /* we should bypass data pages to proceed the kworkder jobs */
>> + if (unlikely(f2fs_cp_error(sbi))) {
>> + mapping_set_error(cc->rpages[0]->mapping, -EIO);
>> + goto out_free;
>> + }
>> +
>> if (IS_NOQUOTA(inode)) {
>> /*
>> * We need to wait for node_write to avoid block allocation during
>> --
>> 2.29.2
> .
>
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2021-05-06 2:16 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-27 3:07 [PATCH v2] f2fs: compress: remove unneed check condition Chao Yu
2021-04-27 3:07 ` [f2fs-dev] " Chao Yu
2021-05-04 14:42 ` Jaegeuk Kim
2021-05-04 14:42 ` [f2fs-dev] " Jaegeuk Kim
2021-05-06 2:15 ` Chao Yu
2021-05-06 2:15 ` [f2fs-dev] " Chao Yu
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.