* [f2fs-dev] [RFC PATCH] f2fs: compress: reduce one page array alloc and free when write compressed page
@ 2021-07-22 3:47 Fengnan Chang
2021-07-22 13:53 ` Chao Yu
0 siblings, 1 reply; 5+ messages in thread
From: Fengnan Chang @ 2021-07-22 3:47 UTC (permalink / raw)
To: jaegeuk, chao, linux-f2fs-devel; +Cc: Fengnan Chang
Don't alloc new page array to replace old, just use old page array, try
to reduce one page array alloc and free when write compress page.
Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
---
fs/f2fs/compress.c | 14 ++------------
fs/f2fs/f2fs.h | 1 +
2 files changed, 3 insertions(+), 12 deletions(-)
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 455561826c7d..43daafe382e7 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -618,7 +618,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
unsigned int max_len, new_nr_cpages;
- struct page **new_cpages;
u32 chksum = 0;
int i, ret;
@@ -633,6 +632,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
+ cc->raw_nr_cpages = cc->nr_cpages;
cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
if (!cc->cpages) {
@@ -683,13 +683,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
- /* Now we're going to cut unnecessary tail pages */
- new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
- if (!new_cpages) {
- ret = -ENOMEM;
- goto out_vunmap_cbuf;
- }
-
/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
(new_nr_cpages * PAGE_SIZE) -
@@ -700,7 +693,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
for (i = 0; i < cc->nr_cpages; i++) {
if (i < new_nr_cpages) {
- new_cpages[i] = cc->cpages[i];
continue;
}
f2fs_compress_free_page(cc->cpages[i]);
@@ -710,8 +702,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
- cc->cpages = new_cpages;
cc->nr_cpages = new_nr_cpages;
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
@@ -1330,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
cc->cpages = NULL;
f2fs_destroy_compress_ctx(cc, false);
return 0;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 867f2c5d9559..8b1f84d88a65 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1454,6 +1454,7 @@ struct compress_ctx {
unsigned int nr_rpages; /* total page number in rpages */
struct page **cpages; /* pages store compressed data in cluster */
unsigned int nr_cpages; /* total page number in cpages */
+ unsigned int raw_nr_cpages; /* max total page number in cpages */
void *rbuf; /* virtual mapped address on rpages */
struct compress_data *cbuf; /* virtual mapped address on cpages */
size_t rlen; /* valid data length in rbuf */
--
2.29.0
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [f2fs-dev] [RFC PATCH] f2fs: compress: reduce one page array alloc and free when write compressed page
2021-07-22 3:47 [f2fs-dev] [RFC PATCH] f2fs: compress: reduce one page array alloc and free when write compressed page Fengnan Chang
@ 2021-07-22 13:53 ` Chao Yu
2021-07-23 3:52 ` Fengnan Chang
0 siblings, 1 reply; 5+ messages in thread
From: Chao Yu @ 2021-07-22 13:53 UTC (permalink / raw)
To: Fengnan Chang, jaegeuk, linux-f2fs-devel
On 2021/7/22 11:47, Fengnan Chang wrote:
> Don't alloc new page array to replace old, just use old page array, try
> to reduce one page array alloc and free when write compress page.
Nope, see whole story in below link:
https://lore.kernel.org/patchwork/patch/1305096/
Thanks,
>
> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
> ---
> fs/f2fs/compress.c | 14 ++------------
> fs/f2fs/f2fs.h | 1 +
> 2 files changed, 3 insertions(+), 12 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 455561826c7d..43daafe382e7 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -618,7 +618,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> const struct f2fs_compress_ops *cops =
> f2fs_cops[fi->i_compress_algorithm];
> unsigned int max_len, new_nr_cpages;
> - struct page **new_cpages;
> u32 chksum = 0;
> int i, ret;
>
> @@ -633,6 +632,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>
> max_len = COMPRESS_HEADER_SIZE + cc->clen;
> cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
> + cc->raw_nr_cpages = cc->nr_cpages;
>
> cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
> if (!cc->cpages) {
> @@ -683,13 +683,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>
> new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
>
> - /* Now we're going to cut unnecessary tail pages */
> - new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
> - if (!new_cpages) {
> - ret = -ENOMEM;
> - goto out_vunmap_cbuf;
> - }
> -
> /* zero out any unused part of the last page */
> memset(&cc->cbuf->cdata[cc->clen], 0,
> (new_nr_cpages * PAGE_SIZE) -
> @@ -700,7 +693,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>
> for (i = 0; i < cc->nr_cpages; i++) {
> if (i < new_nr_cpages) {
> - new_cpages[i] = cc->cpages[i];
> continue;
> }
> f2fs_compress_free_page(cc->cpages[i]);
> @@ -710,8 +702,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> if (cops->destroy_compress_ctx)
> cops->destroy_compress_ctx(cc);
>
> - page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> - cc->cpages = new_cpages;
> cc->nr_cpages = new_nr_cpages;
>
> trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
> @@ -1330,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> spin_unlock(&fi->i_size_lock);
>
> f2fs_put_rpages(cc);
> - page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> + page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
> cc->cpages = NULL;
> f2fs_destroy_compress_ctx(cc, false);
> return 0;
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 867f2c5d9559..8b1f84d88a65 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1454,6 +1454,7 @@ struct compress_ctx {
> unsigned int nr_rpages; /* total page number in rpages */
> struct page **cpages; /* pages store compressed data in cluster */
> unsigned int nr_cpages; /* total page number in cpages */
> + unsigned int raw_nr_cpages; /* max total page number in cpages */
> void *rbuf; /* virtual mapped address on rpages */
> struct compress_data *cbuf; /* virtual mapped address on cpages */
> size_t rlen; /* valid data length in rbuf */
>
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [f2fs-dev] [RFC PATCH] f2fs: compress: reduce one page array alloc and free when write compressed page
2021-07-22 13:53 ` Chao Yu
@ 2021-07-23 3:52 ` Fengnan Chang
2021-07-23 5:26 ` Chao Yu
0 siblings, 1 reply; 5+ messages in thread
From: Fengnan Chang @ 2021-07-23 3:52 UTC (permalink / raw)
To: Chao Yu, jaegeuk, linux-f2fs-devel
Sorry, I didn't get your point, in my opinion, new_nr_cpages should
always little than nr_cpages, is this right? So we can just use cpages,
don't need to alloc new one.
Thanks.
On 2021/7/22 21:53, Chao Yu wrote:
> On 2021/7/22 11:47, Fengnan Chang wrote:
>> Don't alloc new page array to replace old, just use old page array, try
>> to reduce one page array alloc and free when write compress page.
>
> Nope, see whole story in below link:
>
> https://lore.kernel.org/patchwork/patch/1305096/
>
> Thanks,
>
>>
>> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
>> ---
>> fs/f2fs/compress.c | 14 ++------------
>> fs/f2fs/f2fs.h | 1 +
>> 2 files changed, 3 insertions(+), 12 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index 455561826c7d..43daafe382e7 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -618,7 +618,6 @@ static int f2fs_compress_pages(struct compress_ctx
>> *cc)
>> const struct f2fs_compress_ops *cops =
>> f2fs_cops[fi->i_compress_algorithm];
>> unsigned int max_len, new_nr_cpages;
>> - struct page **new_cpages;
>> u32 chksum = 0;
>> int i, ret;
>> @@ -633,6 +632,7 @@ static int f2fs_compress_pages(struct compress_ctx
>> *cc)
>> max_len = COMPRESS_HEADER_SIZE + cc->clen;
>> cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
>> + cc->raw_nr_cpages = cc->nr_cpages;
>> cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
>> if (!cc->cpages) {
>> @@ -683,13 +683,6 @@ static int f2fs_compress_pages(struct
>> compress_ctx *cc)
>> new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE,
>> PAGE_SIZE);
>> - /* Now we're going to cut unnecessary tail pages */
>> - new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
>> - if (!new_cpages) {
>> - ret = -ENOMEM;
>> - goto out_vunmap_cbuf;
>> - }
>> -
>> /* zero out any unused part of the last page */
>> memset(&cc->cbuf->cdata[cc->clen], 0,
>> (new_nr_cpages * PAGE_SIZE) -
>> @@ -700,7 +693,6 @@ static int f2fs_compress_pages(struct compress_ctx
>> *cc)
>> for (i = 0; i < cc->nr_cpages; i++) {
>> if (i < new_nr_cpages) {
>> - new_cpages[i] = cc->cpages[i];
>> continue;
>> }
>> f2fs_compress_free_page(cc->cpages[i]);
>> @@ -710,8 +702,6 @@ static int f2fs_compress_pages(struct compress_ctx
>> *cc)
>> if (cops->destroy_compress_ctx)
>> cops->destroy_compress_ctx(cc);
>> - page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>> - cc->cpages = new_cpages;
>> cc->nr_cpages = new_nr_cpages;
>> trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
>> @@ -1330,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct
>> compress_ctx *cc,
>> spin_unlock(&fi->i_size_lock);
>> f2fs_put_rpages(cc);
>> - page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>> + page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
>> cc->cpages = NULL;
>> f2fs_destroy_compress_ctx(cc, false);
>> return 0;
>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>> index 867f2c5d9559..8b1f84d88a65 100644
>> --- a/fs/f2fs/f2fs.h
>> +++ b/fs/f2fs/f2fs.h
>> @@ -1454,6 +1454,7 @@ struct compress_ctx {
>> unsigned int nr_rpages; /* total page number in rpages */
>> struct page **cpages; /* pages store compressed data in
>> cluster */
>> unsigned int nr_cpages; /* total page number in cpages */
>> + unsigned int raw_nr_cpages; /* max total page number in cpages */
>> void *rbuf; /* virtual mapped address on rpages */
>> struct compress_data *cbuf; /* virtual mapped address on
>> cpages */
>> size_t rlen; /* valid data length in rbuf */
>>
>
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [f2fs-dev] [RFC PATCH] f2fs: compress: reduce one page array alloc and free when write compressed page
2021-07-23 3:52 ` Fengnan Chang
@ 2021-07-23 5:26 ` Chao Yu
2021-07-23 8:20 ` Fengnan Chang
0 siblings, 1 reply; 5+ messages in thread
From: Chao Yu @ 2021-07-23 5:26 UTC (permalink / raw)
To: Fengnan Chang, jaegeuk, linux-f2fs-devel
On 2021/7/23 11:52, Fengnan Chang wrote:
> Sorry, I didn't get your point, in my opinion, new_nr_cpages should
> always little than nr_cpages, is this right? So we can just use cpages,
> don't need to alloc new one.
>
> Thanks.
>
>
> On 2021/7/22 21:53, Chao Yu wrote:
>> On 2021/7/22 11:47, Fengnan Chang wrote:
>>> Don't alloc new page array to replace old, just use old page array, try
>>> to reduce one page array alloc and free when write compress page.
>>
>> Nope, see whole story in below link:
>>
>> https://lore.kernel.org/patchwork/patch/1305096/
>>
>> Thanks,
>>
>>>
>>> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
>>> ---
>>> fs/f2fs/compress.c | 14 ++------------
>>> fs/f2fs/f2fs.h | 1 +
>>> 2 files changed, 3 insertions(+), 12 deletions(-)
>>>
>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>> index 455561826c7d..43daafe382e7 100644
>>> --- a/fs/f2fs/compress.c
>>> +++ b/fs/f2fs/compress.c
>>> @@ -618,7 +618,6 @@ static int f2fs_compress_pages(struct compress_ctx
>>> *cc)
>>> const struct f2fs_compress_ops *cops =
>>> f2fs_cops[fi->i_compress_algorithm];
>>> unsigned int max_len, new_nr_cpages;
>>> - struct page **new_cpages;
>>> u32 chksum = 0;
>>> int i, ret;
>>> @@ -633,6 +632,7 @@ static int f2fs_compress_pages(struct compress_ctx
>>> *cc)
>>> max_len = COMPRESS_HEADER_SIZE + cc->clen;
>>> cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
>>> + cc->raw_nr_cpages = cc->nr_cpages;
If raw_nr_cpags is used to store original nr_cpages, we need to call
page_array_free(, cc->cpages, cc->raw_nr_cpages) in all places, can you
please check whether this are any missed cases?
Thanks,
>>> cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
>>> if (!cc->cpages) {
>>> @@ -683,13 +683,6 @@ static int f2fs_compress_pages(struct
>>> compress_ctx *cc)
>>> new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE,
>>> PAGE_SIZE);
>>> - /* Now we're going to cut unnecessary tail pages */
>>> - new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
>>> - if (!new_cpages) {
>>> - ret = -ENOMEM;
>>> - goto out_vunmap_cbuf;
>>> - }
>>> -
>>> /* zero out any unused part of the last page */
>>> memset(&cc->cbuf->cdata[cc->clen], 0,
>>> (new_nr_cpages * PAGE_SIZE) -
>>> @@ -700,7 +693,6 @@ static int f2fs_compress_pages(struct compress_ctx
>>> *cc)
>>> for (i = 0; i < cc->nr_cpages; i++) {
>>> if (i < new_nr_cpages) {
>>> - new_cpages[i] = cc->cpages[i];
>>> continue;
>>> }
>>> f2fs_compress_free_page(cc->cpages[i]);
>>> @@ -710,8 +702,6 @@ static int f2fs_compress_pages(struct compress_ctx
>>> *cc)
>>> if (cops->destroy_compress_ctx)
>>> cops->destroy_compress_ctx(cc);
>>> - page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>>> - cc->cpages = new_cpages;
>>> cc->nr_cpages = new_nr_cpages;
>>> trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
>>> @@ -1330,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct
>>> compress_ctx *cc,
>>> spin_unlock(&fi->i_size_lock);
>>> f2fs_put_rpages(cc);
>>> - page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>>> + page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
>>> cc->cpages = NULL;
>>> f2fs_destroy_compress_ctx(cc, false);
>>> return 0;
>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>> index 867f2c5d9559..8b1f84d88a65 100644
>>> --- a/fs/f2fs/f2fs.h
>>> +++ b/fs/f2fs/f2fs.h
>>> @@ -1454,6 +1454,7 @@ struct compress_ctx {
>>> unsigned int nr_rpages; /* total page number in rpages */
>>> struct page **cpages; /* pages store compressed data in
>>> cluster */
>>> unsigned int nr_cpages; /* total page number in cpages */
>>> + unsigned int raw_nr_cpages; /* max total page number in cpages */
>>> void *rbuf; /* virtual mapped address on rpages */
>>> struct compress_data *cbuf; /* virtual mapped address on
>>> cpages */
>>> size_t rlen; /* valid data length in rbuf */
>>>
>>
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [f2fs-dev] [RFC PATCH] f2fs: compress: reduce one page array alloc and free when write compressed page
2021-07-23 5:26 ` Chao Yu
@ 2021-07-23 8:20 ` Fengnan Chang
0 siblings, 0 replies; 5+ messages in thread
From: Fengnan Chang @ 2021-07-23 8:20 UTC (permalink / raw)
To: Chao Yu, jaegeuk, linux-f2fs-devel
ok, it seems there is one place was missed.
Thanks.
On 2021/7/23 13:26, Chao Yu wrote:
> On 2021/7/23 11:52, Fengnan Chang wrote:
>> Sorry, I didn't get your point, in my opinion, new_nr_cpages should
>> always little than nr_cpages, is this right? So we can just use cpages,
>> don't need to alloc new one.
>>
>> Thanks.
>>
>>
>> On 2021/7/22 21:53, Chao Yu wrote:
>>> On 2021/7/22 11:47, Fengnan Chang wrote:
>>>> Don't alloc new page array to replace old, just use old page array, try
>>>> to reduce one page array alloc and free when write compress page.
>>>
>>> Nope, see whole story in below link:
>>>
>>> https://lore.kernel.org/patchwork/patch/1305096/
>>>
>>> Thanks,
>>>
>>>>
>>>> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
>>>> ---
>>>> fs/f2fs/compress.c | 14 ++------------
>>>> fs/f2fs/f2fs.h | 1 +
>>>> 2 files changed, 3 insertions(+), 12 deletions(-)
>>>>
>>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>>> index 455561826c7d..43daafe382e7 100644
>>>> --- a/fs/f2fs/compress.c
>>>> +++ b/fs/f2fs/compress.c
>>>> @@ -618,7 +618,6 @@ static int f2fs_compress_pages(struct compress_ctx
>>>> *cc)
>>>> const struct f2fs_compress_ops *cops =
>>>> f2fs_cops[fi->i_compress_algorithm];
>>>> unsigned int max_len, new_nr_cpages;
>>>> - struct page **new_cpages;
>>>> u32 chksum = 0;
>>>> int i, ret;
>>>> @@ -633,6 +632,7 @@ static int f2fs_compress_pages(struct compress_ctx
>>>> *cc)
>>>> max_len = COMPRESS_HEADER_SIZE + cc->clen;
>>>> cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
>>>> + cc->raw_nr_cpages = cc->nr_cpages;
>
> If raw_nr_cpags is used to store original nr_cpages, we need to call
> page_array_free(, cc->cpages, cc->raw_nr_cpages) in all places, can you
> please check whether this are any missed cases?
>
> Thanks,
>
>>>> cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
>>>> if (!cc->cpages) {
>>>> @@ -683,13 +683,6 @@ static int f2fs_compress_pages(struct
>>>> compress_ctx *cc)
>>>> new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE,
>>>> PAGE_SIZE);
>>>> - /* Now we're going to cut unnecessary tail pages */
>>>> - new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
>>>> - if (!new_cpages) {
>>>> - ret = -ENOMEM;
>>>> - goto out_vunmap_cbuf;
>>>> - }
>>>> -
>>>> /* zero out any unused part of the last page */
>>>> memset(&cc->cbuf->cdata[cc->clen], 0,
>>>> (new_nr_cpages * PAGE_SIZE) -
>>>> @@ -700,7 +693,6 @@ static int f2fs_compress_pages(struct compress_ctx
>>>> *cc)
>>>> for (i = 0; i < cc->nr_cpages; i++) {
>>>> if (i < new_nr_cpages) {
>>>> - new_cpages[i] = cc->cpages[i];
>>>> continue;
>>>> }
>>>> f2fs_compress_free_page(cc->cpages[i]);
>>>> @@ -710,8 +702,6 @@ static int f2fs_compress_pages(struct compress_ctx
>>>> *cc)
>>>> if (cops->destroy_compress_ctx)
>>>> cops->destroy_compress_ctx(cc);
>>>> - page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>>>> - cc->cpages = new_cpages;
>>>> cc->nr_cpages = new_nr_cpages;
>>>> trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
>>>> @@ -1330,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct
>>>> compress_ctx *cc,
>>>> spin_unlock(&fi->i_size_lock);
>>>> f2fs_put_rpages(cc);
>>>> - page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>>>> + page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
>>>> cc->cpages = NULL;
>>>> f2fs_destroy_compress_ctx(cc, false);
>>>> return 0;
>>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>>> index 867f2c5d9559..8b1f84d88a65 100644
>>>> --- a/fs/f2fs/f2fs.h
>>>> +++ b/fs/f2fs/f2fs.h
>>>> @@ -1454,6 +1454,7 @@ struct compress_ctx {
>>>> unsigned int nr_rpages; /* total page number in rpages */
>>>> struct page **cpages; /* pages store compressed data in
>>>> cluster */
>>>> unsigned int nr_cpages; /* total page number in cpages */
>>>> + unsigned int raw_nr_cpages; /* max total page number in
>>>> cpages */
>>>> void *rbuf; /* virtual mapped address on rpages */
>>>> struct compress_data *cbuf; /* virtual mapped address on
>>>> cpages */
>>>> size_t rlen; /* valid data length in rbuf */
>>>>
>>>
>
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2021-07-23 8:25 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-22 3:47 [f2fs-dev] [RFC PATCH] f2fs: compress: reduce one page array alloc and free when write compressed page Fengnan Chang
2021-07-22 13:53 ` Chao Yu
2021-07-23 3:52 ` Fengnan Chang
2021-07-23 5:26 ` Chao Yu
2021-07-23 8:20 ` Fengnan Chang
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.