All of lore.kernel.org
 help / color / mirror / Atom feed
* [f2fs-dev] [PATCH v1] f2fs: compress: reduce one page array alloc and free when write compressed page
@ 2021-07-23  8:37 Fengnan Chang
  2021-10-25  9:23 ` 常凤楠
  2021-11-08 15:18 ` Chao Yu
  0 siblings, 2 replies; 3+ messages in thread
From: Fengnan Chang @ 2021-07-23  8:37 UTC (permalink / raw)
  To: chao, jaegeuk, linux-f2fs-devel; +Cc: Fengnan Chang

Don't alloc new page array to replace old, just use old page array, try
to reduce one page array alloc and free when write compress page.

Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
---
 fs/f2fs/compress.c | 18 ++++--------------
 fs/f2fs/f2fs.h     |  1 +
 2 files changed, 5 insertions(+), 14 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 455561826c7d..1395b9cec7f4 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -618,7 +618,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 	const struct f2fs_compress_ops *cops =
 				f2fs_cops[fi->i_compress_algorithm];
 	unsigned int max_len, new_nr_cpages;
-	struct page **new_cpages;
 	u32 chksum = 0;
 	int i, ret;
 
@@ -633,6 +632,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 
 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
+	cc->raw_nr_cpages = cc->nr_cpages;
 
 	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
 	if (!cc->cpages) {
@@ -683,13 +683,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 
 	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
 
-	/* Now we're going to cut unnecessary tail pages */
-	new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
-	if (!new_cpages) {
-		ret = -ENOMEM;
-		goto out_vunmap_cbuf;
-	}
-
 	/* zero out any unused part of the last page */
 	memset(&cc->cbuf->cdata[cc->clen], 0,
 			(new_nr_cpages * PAGE_SIZE) -
@@ -700,7 +693,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 
 	for (i = 0; i < cc->nr_cpages; i++) {
 		if (i < new_nr_cpages) {
-			new_cpages[i] = cc->cpages[i];
 			continue;
 		}
 		f2fs_compress_free_page(cc->cpages[i]);
@@ -710,8 +702,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 	if (cops->destroy_compress_ctx)
 		cops->destroy_compress_ctx(cc);
 
-	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
-	cc->cpages = new_cpages;
 	cc->nr_cpages = new_nr_cpages;
 
 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
@@ -727,7 +717,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 		if (cc->cpages[i])
 			f2fs_compress_free_page(cc->cpages[i]);
 	}
-	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+	page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
 	cc->cpages = NULL;
 destroy_compress_ctx:
 	if (cops->destroy_compress_ctx)
@@ -1330,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
 	spin_unlock(&fi->i_size_lock);
 
 	f2fs_put_rpages(cc);
-	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+	page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
 	cc->cpages = NULL;
 	f2fs_destroy_compress_ctx(cc, false);
 	return 0;
@@ -1356,7 +1346,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
 	else
 		f2fs_unlock_op(sbi);
 out_free:
-	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+	page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
 	cc->cpages = NULL;
 	return -EAGAIN;
 }
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 867f2c5d9559..8b1f84d88a65 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1454,6 +1454,7 @@ struct compress_ctx {
 	unsigned int nr_rpages;		/* total page number in rpages */
 	struct page **cpages;		/* pages store compressed data in cluster */
 	unsigned int nr_cpages;		/* total page number in cpages */
+	unsigned int raw_nr_cpages;	/* max total page number in cpages */
 	void *rbuf;			/* virtual mapped address on rpages */
 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
 	size_t rlen;			/* valid data length in rbuf */
-- 
2.29.0



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [f2fs-dev] [PATCH v1] f2fs: compress: reduce one page array alloc and free when write compressed page
  2021-07-23  8:37 [f2fs-dev] [PATCH v1] f2fs: compress: reduce one page array alloc and free when write compressed page Fengnan Chang
@ 2021-10-25  9:23 ` 常凤楠
  2021-11-08 15:18 ` Chao Yu
  1 sibling, 0 replies; 3+ messages in thread
From: 常凤楠 @ 2021-10-25  9:23 UTC (permalink / raw)
  To: 常凤楠, chao, jaegeuk, linux-f2fs-devel

It seems this has been forgotten..

> -----Original Message-----
> From: Fengnan Chang <changfengnan@vivo.com>
> Sent: Friday, July 23, 2021 4:38 PM
> To: chao@kernel.org; jaegeuk@kernel.org;
> linux-f2fs-devel@lists.sourceforge.net
> Cc: Fengnan Chang <changfengnan@vivo.com>
> Subject: [PATCH v1] f2fs: compress: reduce one page array alloc and free
> when write compressed page
> 
> Don't alloc new page array to replace old, just use old page array, try to
> reduce one page array alloc and free when write compress page.
> 
> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
> ---
>  fs/f2fs/compress.c | 18 ++++--------------
>  fs/f2fs/f2fs.h     |  1 +
>  2 files changed, 5 insertions(+), 14 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index
> 455561826c7d..1395b9cec7f4 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -618,7 +618,6 @@ static int f2fs_compress_pages(struct compress_ctx
> *cc)
>  	const struct f2fs_compress_ops *cops =
>  				f2fs_cops[fi->i_compress_algorithm];
>  	unsigned int max_len, new_nr_cpages;
> -	struct page **new_cpages;
>  	u32 chksum = 0;
>  	int i, ret;
> 
> @@ -633,6 +632,7 @@ static int f2fs_compress_pages(struct compress_ctx
> *cc)
> 
>  	max_len = COMPRESS_HEADER_SIZE + cc->clen;
>  	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
> +	cc->raw_nr_cpages = cc->nr_cpages;
> 
>  	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
>  	if (!cc->cpages) {
> @@ -683,13 +683,6 @@ static int f2fs_compress_pages(struct compress_ctx
> *cc)
> 
>  	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE,
> PAGE_SIZE);
> 
> -	/* Now we're going to cut unnecessary tail pages */
> -	new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
> -	if (!new_cpages) {
> -		ret = -ENOMEM;
> -		goto out_vunmap_cbuf;
> -	}
> -
>  	/* zero out any unused part of the last page */
>  	memset(&cc->cbuf->cdata[cc->clen], 0,
>  			(new_nr_cpages * PAGE_SIZE) -
> @@ -700,7 +693,6 @@ static int f2fs_compress_pages(struct compress_ctx
> *cc)
> 
>  	for (i = 0; i < cc->nr_cpages; i++) {
>  		if (i < new_nr_cpages) {
> -			new_cpages[i] = cc->cpages[i];
>  			continue;
>  		}
>  		f2fs_compress_free_page(cc->cpages[i]);
> @@ -710,8 +702,6 @@ static int f2fs_compress_pages(struct compress_ctx
> *cc)
>  	if (cops->destroy_compress_ctx)
>  		cops->destroy_compress_ctx(cc);
> 
> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> -	cc->cpages = new_cpages;
>  	cc->nr_cpages = new_nr_cpages;
> 
>  	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, @@ -727,7
> +717,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>  		if (cc->cpages[i])
>  			f2fs_compress_free_page(cc->cpages[i]);
>  	}
> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> +	page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
>  	cc->cpages = NULL;
>  destroy_compress_ctx:
>  	if (cops->destroy_compress_ctx)
> @@ -1330,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct
> compress_ctx *cc,
>  	spin_unlock(&fi->i_size_lock);
> 
>  	f2fs_put_rpages(cc);
> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> +	page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
>  	cc->cpages = NULL;
>  	f2fs_destroy_compress_ctx(cc, false);
>  	return 0;
> @@ -1356,7 +1346,7 @@ static int f2fs_write_compressed_pages(struct
> compress_ctx *cc,
>  	else
>  		f2fs_unlock_op(sbi);
>  out_free:
> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> +	page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
>  	cc->cpages = NULL;
>  	return -EAGAIN;
>  }
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 867f2c5d9559..8b1f84d88a65
> 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1454,6 +1454,7 @@ struct compress_ctx {
>  	unsigned int nr_rpages;		/* total page number in rpages */
>  	struct page **cpages;		/* pages store compressed data in cluster
> */
>  	unsigned int nr_cpages;		/* total page number in cpages */
> +	unsigned int raw_nr_cpages;	/* max total page number in cpages */
>  	void *rbuf;			/* virtual mapped address on rpages */
>  	struct compress_data *cbuf;	/* virtual mapped address on cpages */
>  	size_t rlen;			/* valid data length in rbuf */
> --
> 2.29.0
> 


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [f2fs-dev] [PATCH v1] f2fs: compress: reduce one page array alloc and free when write compressed page
  2021-07-23  8:37 [f2fs-dev] [PATCH v1] f2fs: compress: reduce one page array alloc and free when write compressed page Fengnan Chang
  2021-10-25  9:23 ` 常凤楠
@ 2021-11-08 15:18 ` Chao Yu
  1 sibling, 0 replies; 3+ messages in thread
From: Chao Yu @ 2021-11-08 15:18 UTC (permalink / raw)
  To: Fengnan Chang, jaegeuk, linux-f2fs-devel

On 2021/7/23 16:37, Fengnan Chang wrote:
> Don't alloc new page array to replace old, just use old page array, try
> to reduce one page array alloc and free when write compress page.
> 
> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
> ---
>   fs/f2fs/compress.c | 18 ++++--------------
>   fs/f2fs/f2fs.h     |  1 +
>   2 files changed, 5 insertions(+), 14 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 455561826c7d..1395b9cec7f4 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -618,7 +618,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   	const struct f2fs_compress_ops *cops =
>   				f2fs_cops[fi->i_compress_algorithm];
>   	unsigned int max_len, new_nr_cpages;
> -	struct page **new_cpages;
>   	u32 chksum = 0;
>   	int i, ret;
>   
> @@ -633,6 +632,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   
>   	max_len = COMPRESS_HEADER_SIZE + cc->clen;
>   	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
> +	cc->raw_nr_cpages = cc->nr_cpages;
>   
>   	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
>   	if (!cc->cpages) {
> @@ -683,13 +683,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   
>   	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
>   
> -	/* Now we're going to cut unnecessary tail pages */
> -	new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
> -	if (!new_cpages) {
> -		ret = -ENOMEM;
> -		goto out_vunmap_cbuf;
> -	}
> -
>   	/* zero out any unused part of the last page */
>   	memset(&cc->cbuf->cdata[cc->clen], 0,
>   			(new_nr_cpages * PAGE_SIZE) -
> @@ -700,7 +693,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   
>   	for (i = 0; i < cc->nr_cpages; i++) {
>   		if (i < new_nr_cpages) {
> -			new_cpages[i] = cc->cpages[i];
>   			continue;
>   		}
>   		f2fs_compress_free_page(cc->cpages[i]);
> @@ -710,8 +702,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   	if (cops->destroy_compress_ctx)
>   		cops->destroy_compress_ctx(cc);
>   
> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> -	cc->cpages = new_cpages;
>   	cc->nr_cpages = new_nr_cpages;
>   
>   	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
> @@ -727,7 +717,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   		if (cc->cpages[i])
>   			f2fs_compress_free_page(cc->cpages[i]);
>   	}
> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> +	page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
>   	cc->cpages = NULL;
>   destroy_compress_ctx:
>   	if (cops->destroy_compress_ctx)
> @@ -1330,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>   	spin_unlock(&fi->i_size_lock);
>   
>   	f2fs_put_rpages(cc);
> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> +	page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
>   	cc->cpages = NULL;
>   	f2fs_destroy_compress_ctx(cc, false);
>   	return 0;
> @@ -1356,7 +1346,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>   	else
>   		f2fs_unlock_op(sbi);
>   out_free:
> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> +	page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
>   	cc->cpages = NULL;
>   	return -EAGAIN;
>   }
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 867f2c5d9559..8b1f84d88a65 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1454,6 +1454,7 @@ struct compress_ctx {
>   	unsigned int nr_rpages;		/* total page number in rpages */
>   	struct page **cpages;		/* pages store compressed data in cluster */
>   	unsigned int nr_cpages;		/* total page number in cpages */
> +	unsigned int raw_nr_cpages;	/* max total page number in cpages */

Sorry for the long delay, I guess it's worth to take this patch to simply compress
flow...

One more concern about code readability:

How about keeping nr_{cpages,rpages} to indicate max number of page pointers in array,
and introduce valid_nr_cpages to indicate valid number of page pointers in array.

Thoughts?

Thanks,

>   	void *rbuf;			/* virtual mapped address on rpages */
>   	struct compress_data *cbuf;	/* virtual mapped address on cpages */
>   	size_t rlen;			/* valid data length in rbuf */
> 


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-11-08 15:18 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-23  8:37 [f2fs-dev] [PATCH v1] f2fs: compress: reduce one page array alloc and free when write compressed page Fengnan Chang
2021-10-25  9:23 ` 常凤楠
2021-11-08 15:18 ` Chao Yu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.