All of lore.kernel.org
 help / color / mirror / Atom feed
* [f2fs-dev] [PATCH v2] f2fs: compress: reduce one page array alloc and free when write compressed page
@ 2021-11-09  3:54 Fengnan Chang
  2021-11-09 14:10 ` Chao Yu
       [not found] ` <ALIA-QAiE0UFErjRNaMLZaqL.9.1636467042061.Hmail.changfengnan@vivo.com>
  0 siblings, 2 replies; 4+ messages in thread
From: Fengnan Chang @ 2021-11-09  3:54 UTC (permalink / raw)
  To: jaegeuk, chao; +Cc: Fengnan Chang, linux-f2fs-devel

Don't alloc new page pointers array to replace old, just use old, introduce
valid_nr_cpages to indicate valid number of page pointers in array, try to
reduce one page array alloc and free when write compress page.

Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
---
 fs/f2fs/compress.c | 27 +++++++++------------------
 fs/f2fs/data.c     |  1 +
 fs/f2fs/f2fs.h     |  1 +
 3 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 9b663eaf4805..28785dd78c6f 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -153,6 +153,7 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
 	cc->rpages = NULL;
 	cc->nr_rpages = 0;
 	cc->nr_cpages = 0;
+	cc->valid_nr_cpages = 0;
 	if (!reuse)
 		cc->cluster_idx = NULL_CLUSTER;
 }
@@ -619,7 +620,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 	const struct f2fs_compress_ops *cops =
 				f2fs_cops[fi->i_compress_algorithm];
 	unsigned int max_len, new_nr_cpages;
-	struct page **new_cpages;
 	u32 chksum = 0;
 	int i, ret;
 
@@ -634,6 +634,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 
 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
+	cc->valid_nr_cpages = cc->nr_cpages;
 
 	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
 	if (!cc->cpages) {
@@ -684,13 +685,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 
 	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
 
-	/* Now we're going to cut unnecessary tail pages */
-	new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
-	if (!new_cpages) {
-		ret = -ENOMEM;
-		goto out_vunmap_cbuf;
-	}
-
 	/* zero out any unused part of the last page */
 	memset(&cc->cbuf->cdata[cc->clen], 0,
 			(new_nr_cpages * PAGE_SIZE) -
@@ -701,7 +695,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 
 	for (i = 0; i < cc->nr_cpages; i++) {
 		if (i < new_nr_cpages) {
-			new_cpages[i] = cc->cpages[i];
 			continue;
 		}
 		f2fs_compress_free_page(cc->cpages[i]);
@@ -711,9 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 	if (cops->destroy_compress_ctx)
 		cops->destroy_compress_ctx(cc);
 
-	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
-	cc->cpages = new_cpages;
-	cc->nr_cpages = new_nr_cpages;
+	cc->valid_nr_cpages = new_nr_cpages;
 
 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
 							cc->clen, ret);
@@ -1288,14 +1279,14 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
 
 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
 	cic->inode = inode;
-	atomic_set(&cic->pending_pages, cc->nr_cpages);
+	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
 	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
 	if (!cic->rpages)
 		goto out_put_cic;
 
 	cic->nr_rpages = cc->cluster_size;
 
-	for (i = 0; i < cc->nr_cpages; i++) {
+	for (i = 0; i < cc->valid_nr_cpages; i++) {
 		f2fs_set_compressed_page(cc->cpages[i], inode,
 					cc->rpages[i + 1]->index, cic);
 		fio.compressed_page = cc->cpages[i];
@@ -1340,7 +1331,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
 			fio.compr_blocks++;
 
-		if (i > cc->nr_cpages) {
+		if (i > cc->valid_nr_cpages) {
 			if (__is_valid_data_blkaddr(blkaddr)) {
 				f2fs_invalidate_blocks(sbi, blkaddr);
 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
@@ -1365,8 +1356,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
 
 	if (fio.compr_blocks)
 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
-	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
-	add_compr_block_stat(inode, cc->nr_cpages);
+	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
+	add_compr_block_stat(inode, cc->valid_nr_cpages);
 
 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
 	if (cc->cluster_idx == 0)
@@ -1404,7 +1395,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
 	else
 		f2fs_unlock_op(sbi);
 out_free:
-	for (i = 0; i < cc->nr_cpages; i++) {
+	for (i = 0; i < cc->valid_nr_cpages; i++) {
 		if (!cc->cpages[i])
 			continue;
 		f2fs_compress_free_page(cc->cpages[i]);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f4fd6c246c9a..4ddc0ba0f2c0 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2934,6 +2934,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 		.rpages = NULL,
 		.nr_rpages = 0,
 		.cpages = NULL,
+		.valid_nr_cpages = 0,
 		.rbuf = NULL,
 		.cbuf = NULL,
 		.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 039a229e11c9..acb587f054db 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1481,6 +1481,7 @@ struct compress_ctx {
 	unsigned int nr_rpages;		/* total page number in rpages */
 	struct page **cpages;		/* pages store compressed data in cluster */
 	unsigned int nr_cpages;		/* total page number in cpages */
+	unsigned int valid_nr_cpages;	/* valid page number in cpages */
 	void *rbuf;			/* virtual mapped address on rpages */
 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
 	size_t rlen;			/* valid data length in rbuf */
-- 
2.32.0



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [f2fs-dev] [PATCH v2] f2fs: compress: reduce one page array alloc and free when write compressed page
  2021-11-09  3:54 [f2fs-dev] [PATCH v2] f2fs: compress: reduce one page array alloc and free when write compressed page Fengnan Chang
@ 2021-11-09 14:10 ` Chao Yu
       [not found] ` <ALIA-QAiE0UFErjRNaMLZaqL.9.1636467042061.Hmail.changfengnan@vivo.com>
  1 sibling, 0 replies; 4+ messages in thread
From: Chao Yu @ 2021-11-09 14:10 UTC (permalink / raw)
  To: Fengnan Chang, jaegeuk; +Cc: linux-f2fs-devel

On 2021/11/9 11:54, Fengnan Chang wrote:
> Don't alloc new page pointers array to replace old, just use old, introduce
> valid_nr_cpages to indicate valid number of page pointers in array, try to
> reduce one page array alloc and free when write compress page.
> 
> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
> ---
>   fs/f2fs/compress.c | 27 +++++++++------------------
>   fs/f2fs/data.c     |  1 +
>   fs/f2fs/f2fs.h     |  1 +
>   3 files changed, 11 insertions(+), 18 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 9b663eaf4805..28785dd78c6f 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -153,6 +153,7 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
>   	cc->rpages = NULL;
>   	cc->nr_rpages = 0;
>   	cc->nr_cpages = 0;
> +	cc->valid_nr_cpages = 0;
>   	if (!reuse)
>   		cc->cluster_idx = NULL_CLUSTER;
>   }
> @@ -619,7 +620,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   	const struct f2fs_compress_ops *cops =
>   				f2fs_cops[fi->i_compress_algorithm];
>   	unsigned int max_len, new_nr_cpages;
> -	struct page **new_cpages;
>   	u32 chksum = 0;
>   	int i, ret;
>   
> @@ -634,6 +634,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   
>   	max_len = COMPRESS_HEADER_SIZE + cc->clen;
>   	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
> +	cc->valid_nr_cpages = cc->nr_cpages;
>   
>   	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
>   	if (!cc->cpages) {
> @@ -684,13 +685,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   
>   	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
>   
> -	/* Now we're going to cut unnecessary tail pages */
> -	new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
> -	if (!new_cpages) {
> -		ret = -ENOMEM;
> -		goto out_vunmap_cbuf;

It removes last user of out_vunmap_cbuf label, so we can remove below
dead codes in error path as well.

out_vunmap_cbuf:
	vm_unmap_ram(cc->cbuf, cc->nr_cpages);

> -	}
> -
>   	/* zero out any unused part of the last page */
>   	memset(&cc->cbuf->cdata[cc->clen], 0,
>   			(new_nr_cpages * PAGE_SIZE) -
> @@ -701,7 +695,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   
>   	for (i = 0; i < cc->nr_cpages; i++) {
>   		if (i < new_nr_cpages) {
> -			new_cpages[i] = cc->cpages[i];
>   			continue;
>   		}

if (i < new_nr_cpages)
	continue;

>   		f2fs_compress_free_page(cc->cpages[i]);
> @@ -711,9 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>   	if (cops->destroy_compress_ctx)
>   		cops->destroy_compress_ctx(cc);
>   
> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> -	cc->cpages = new_cpages;
> -	cc->nr_cpages = new_nr_cpages;
> +	cc->valid_nr_cpages = new_nr_cpages;
>   
>   	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
>   							cc->clen, ret);
> @@ -1288,14 +1279,14 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>   
>   	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
>   	cic->inode = inode;
> -	atomic_set(&cic->pending_pages, cc->nr_cpages);
> +	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
>   	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
>   	if (!cic->rpages)
>   		goto out_put_cic;
>   
>   	cic->nr_rpages = cc->cluster_size;
>   
> -	for (i = 0; i < cc->nr_cpages; i++) {
> +	for (i = 0; i < cc->valid_nr_cpages; i++) {
>   		f2fs_set_compressed_page(cc->cpages[i], inode,
>   					cc->rpages[i + 1]->index, cic);
>   		fio.compressed_page = cc->cpages[i];
> @@ -1340,7 +1331,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>   		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
>   			fio.compr_blocks++;
>   
> -		if (i > cc->nr_cpages) {
> +		if (i > cc->valid_nr_cpages) {
>   			if (__is_valid_data_blkaddr(blkaddr)) {
>   				f2fs_invalidate_blocks(sbi, blkaddr);
>   				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
> @@ -1365,8 +1356,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>   
>   	if (fio.compr_blocks)
>   		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
> -	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
> -	add_compr_block_stat(inode, cc->nr_cpages);
> +	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
> +	add_compr_block_stat(inode, cc->valid_nr_cpages);
>   
>   	set_inode_flag(cc->inode, FI_APPEND_WRITE);
>   	if (cc->cluster_idx == 0)
> @@ -1404,7 +1395,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>   	else
>   		f2fs_unlock_op(sbi);
>   out_free:
> -	for (i = 0; i < cc->nr_cpages; i++) {
> +	for (i = 0; i < cc->valid_nr_cpages; i++) {
>   		if (!cc->cpages[i])
>   			continue;

We can remove above cpages check?

Thanks,

>   		f2fs_compress_free_page(cc->cpages[i]);
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index f4fd6c246c9a..4ddc0ba0f2c0 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2934,6 +2934,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>   		.rpages = NULL,
>   		.nr_rpages = 0,
>   		.cpages = NULL,
> +		.valid_nr_cpages = 0,
>   		.rbuf = NULL,
>   		.cbuf = NULL,
>   		.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 039a229e11c9..acb587f054db 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1481,6 +1481,7 @@ struct compress_ctx {
>   	unsigned int nr_rpages;		/* total page number in rpages */
>   	struct page **cpages;		/* pages store compressed data in cluster */
>   	unsigned int nr_cpages;		/* total page number in cpages */
> +	unsigned int valid_nr_cpages;	/* valid page number in cpages */
>   	void *rbuf;			/* virtual mapped address on rpages */
>   	struct compress_data *cbuf;	/* virtual mapped address on cpages */
>   	size_t rlen;			/* valid data length in rbuf */
> 


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [f2fs-dev] [PATCH v2] f2fs: compress: reduce one page array alloc and free when write compressed page
       [not found] ` <ALIA-QAiE0UFErjRNaMLZaqL.9.1636467042061.Hmail.changfengnan@vivo.com>
@ 2021-11-10  2:34   ` 常凤楠
  2021-11-10  2:39     ` Chao Yu
  0 siblings, 1 reply; 4+ messages in thread
From: 常凤楠 @ 2021-11-10  2:34 UTC (permalink / raw)
  To: Chao Yu, jaegeuk; +Cc: linux-f2fs-devel



> -----Original Message-----
> From: changfengnan@vivo.com <changfengnan@vivo.com> On Behalf Of
> Chao Yu
> Sent: Tuesday, November 9, 2021 10:11 PM
> To: 常凤楠 <changfengnan@vivo.com>; jaegeuk@kernel.org
> Cc: linux-f2fs-devel@lists.sourceforge.net
> Subject: Re: [PATCH v2] f2fs: compress: reduce one page array alloc and
> free when write compressed page
> 
> On 2021/11/9 11:54, Fengnan Chang wrote:
> > Don't alloc new page pointers array to replace old, just use old,
> > introduce valid_nr_cpages to indicate valid number of page pointers in
> > array, try to reduce one page array alloc and free when write compress
> page.
> >
> > Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
> > ---
> >   fs/f2fs/compress.c | 27 +++++++++------------------
> >   fs/f2fs/data.c     |  1 +
> >   fs/f2fs/f2fs.h     |  1 +
> >   3 files changed, 11 insertions(+), 18 deletions(-)
> >
> > diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index
> > 9b663eaf4805..28785dd78c6f 100644
> > --- a/fs/f2fs/compress.c
> > +++ b/fs/f2fs/compress.c
> > @@ -153,6 +153,7 @@ void f2fs_destroy_compress_ctx(struct
> compress_ctx *cc, bool reuse)
> >   	cc->rpages = NULL;
> >   	cc->nr_rpages = 0;
> >   	cc->nr_cpages = 0;
> > +	cc->valid_nr_cpages = 0;
> >   	if (!reuse)
> >   		cc->cluster_idx = NULL_CLUSTER;
> >   }
> > @@ -619,7 +620,6 @@ static int f2fs_compress_pages(struct
> compress_ctx *cc)
> >   	const struct f2fs_compress_ops *cops =
> >   				f2fs_cops[fi->i_compress_algorithm];
> >   	unsigned int max_len, new_nr_cpages;
> > -	struct page **new_cpages;
> >   	u32 chksum = 0;
> >   	int i, ret;
> >
> > @@ -634,6 +634,7 @@ static int f2fs_compress_pages(struct
> compress_ctx
> > *cc)
> >
> >   	max_len = COMPRESS_HEADER_SIZE + cc->clen;
> >   	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
> > +	cc->valid_nr_cpages = cc->nr_cpages;
> >
> >   	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
> >   	if (!cc->cpages) {
> > @@ -684,13 +685,6 @@ static int f2fs_compress_pages(struct
> > compress_ctx *cc)
> >
> >   	new_nr_cpages = DIV_ROUND_UP(cc->clen +
> COMPRESS_HEADER_SIZE,
> > PAGE_SIZE);
> >
> > -	/* Now we're going to cut unnecessary tail pages */
> > -	new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
> > -	if (!new_cpages) {
> > -		ret = -ENOMEM;
> > -		goto out_vunmap_cbuf;
> 
> It removes last user of out_vunmap_cbuf label, so we can remove below
> dead codes in error path as well.
> 
> out_vunmap_cbuf:
> 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);

Not right, there still other code use this.

Thanks.
> 
> > -	}
> > -
> >   	/* zero out any unused part of the last page */
> >   	memset(&cc->cbuf->cdata[cc->clen], 0,
> >   			(new_nr_cpages * PAGE_SIZE) -
> > @@ -701,7 +695,6 @@ static int f2fs_compress_pages(struct
> compress_ctx
> > *cc)
> >
> >   	for (i = 0; i < cc->nr_cpages; i++) {
> >   		if (i < new_nr_cpages) {
> > -			new_cpages[i] = cc->cpages[i];
> >   			continue;
> >   		}
> 
> if (i < new_nr_cpages)
> 	continue;
> 
> >   		f2fs_compress_free_page(cc->cpages[i]);
> > @@ -711,9 +704,7 @@ static int f2fs_compress_pages(struct
> compress_ctx *cc)
> >   	if (cops->destroy_compress_ctx)
> >   		cops->destroy_compress_ctx(cc);
> >
> > -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> > -	cc->cpages = new_cpages;
> > -	cc->nr_cpages = new_nr_cpages;
> > +	cc->valid_nr_cpages = new_nr_cpages;
> >
> >   	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
> >   							cc->clen, ret);
> > @@ -1288,14 +1279,14 @@ static int
> f2fs_write_compressed_pages(struct
> > compress_ctx *cc,
> >
> >   	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
> >   	cic->inode = inode;
> > -	atomic_set(&cic->pending_pages, cc->nr_cpages);
> > +	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
> >   	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> >   	if (!cic->rpages)
> >   		goto out_put_cic;
> >
> >   	cic->nr_rpages = cc->cluster_size;
> >
> > -	for (i = 0; i < cc->nr_cpages; i++) {
> > +	for (i = 0; i < cc->valid_nr_cpages; i++) {
> >   		f2fs_set_compressed_page(cc->cpages[i], inode,
> >   					cc->rpages[i + 1]->index, cic);
> >   		fio.compressed_page = cc->cpages[i]; @@ -1340,7 +1331,7
> @@ static
> > int f2fs_write_compressed_pages(struct compress_ctx *cc,
> >   		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
> >   			fio.compr_blocks++;
> >
> > -		if (i > cc->nr_cpages) {
> > +		if (i > cc->valid_nr_cpages) {
> >   			if (__is_valid_data_blkaddr(blkaddr)) {
> >   				f2fs_invalidate_blocks(sbi, blkaddr);
> >   				f2fs_update_data_blkaddr(&dn, NEW_ADDR); @@
> -1365,8 +1356,8 @@
> > static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> >
> >   	if (fio.compr_blocks)
> >   		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1,
> false);
> > -	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
> > -	add_compr_block_stat(inode, cc->nr_cpages);
> > +	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
> > +	add_compr_block_stat(inode, cc->valid_nr_cpages);
> >
> >   	set_inode_flag(cc->inode, FI_APPEND_WRITE);
> >   	if (cc->cluster_idx == 0)
> > @@ -1404,7 +1395,7 @@ static int f2fs_write_compressed_pages(struct
> compress_ctx *cc,
> >   	else
> >   		f2fs_unlock_op(sbi);
> >   out_free:
> > -	for (i = 0; i < cc->nr_cpages; i++) {
> > +	for (i = 0; i < cc->valid_nr_cpages; i++) {
> >   		if (!cc->cpages[i])
> >   			continue;
> 
> We can remove above cpages check?
> 
> Thanks,
> 
> >   		f2fs_compress_free_page(cc->cpages[i]);
> > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index
> > f4fd6c246c9a..4ddc0ba0f2c0 100644
> > --- a/fs/f2fs/data.c
> > +++ b/fs/f2fs/data.c
> > @@ -2934,6 +2934,7 @@ static int f2fs_write_cache_pages(struct
> address_space *mapping,
> >   		.rpages = NULL,
> >   		.nr_rpages = 0,
> >   		.cpages = NULL,
> > +		.valid_nr_cpages = 0,
> >   		.rbuf = NULL,
> >   		.cbuf = NULL,
> >   		.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size, diff --git
> > a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 039a229e11c9..acb587f054db
> > 100644
> > --- a/fs/f2fs/f2fs.h
> > +++ b/fs/f2fs/f2fs.h
> > @@ -1481,6 +1481,7 @@ struct compress_ctx {
> >   	unsigned int nr_rpages;		/* total page number in rpages
> */
> >   	struct page **cpages;		/* pages store compressed data in
> cluster */
> >   	unsigned int nr_cpages;		/* total page number in cpages
> */
> > +	unsigned int valid_nr_cpages;	/* valid page number in cpages */
> >   	void *rbuf;			/* virtual mapped address on rpages */
> >   	struct compress_data *cbuf;	/* virtual mapped address on
> cpages */
> >   	size_t rlen;			/* valid data length in rbuf */
> >

_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [f2fs-dev] [PATCH v2] f2fs: compress: reduce one page array alloc and free when write compressed page
  2021-11-10  2:34   ` 常凤楠
@ 2021-11-10  2:39     ` Chao Yu
  0 siblings, 0 replies; 4+ messages in thread
From: Chao Yu @ 2021-11-10  2:39 UTC (permalink / raw)
  To: 常凤楠, jaegeuk; +Cc: linux-f2fs-devel

On 2021/11/10 10:34, 常凤楠 wrote:
> 
> 
>> -----Original Message-----
>> From: changfengnan@vivo.com <changfengnan@vivo.com> On Behalf Of
>> Chao Yu
>> Sent: Tuesday, November 9, 2021 10:11 PM
>> To: 常凤楠 <changfengnan@vivo.com>; jaegeuk@kernel.org
>> Cc: linux-f2fs-devel@lists.sourceforge.net
>> Subject: Re: [PATCH v2] f2fs: compress: reduce one page array alloc and
>> free when write compressed page
>>
>> On 2021/11/9 11:54, Fengnan Chang wrote:
>>> Don't alloc new page pointers array to replace old, just use old,
>>> introduce valid_nr_cpages to indicate valid number of page pointers in
>>> array, try to reduce one page array alloc and free when write compress
>> page.
>>>
>>> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
>>> ---
>>>    fs/f2fs/compress.c | 27 +++++++++------------------
>>>    fs/f2fs/data.c     |  1 +
>>>    fs/f2fs/f2fs.h     |  1 +
>>>    3 files changed, 11 insertions(+), 18 deletions(-)
>>>
>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index
>>> 9b663eaf4805..28785dd78c6f 100644
>>> --- a/fs/f2fs/compress.c
>>> +++ b/fs/f2fs/compress.c
>>> @@ -153,6 +153,7 @@ void f2fs_destroy_compress_ctx(struct
>> compress_ctx *cc, bool reuse)
>>>    	cc->rpages = NULL;
>>>    	cc->nr_rpages = 0;
>>>    	cc->nr_cpages = 0;
>>> +	cc->valid_nr_cpages = 0;
>>>    	if (!reuse)
>>>    		cc->cluster_idx = NULL_CLUSTER;
>>>    }
>>> @@ -619,7 +620,6 @@ static int f2fs_compress_pages(struct
>> compress_ctx *cc)
>>>    	const struct f2fs_compress_ops *cops =
>>>    				f2fs_cops[fi->i_compress_algorithm];
>>>    	unsigned int max_len, new_nr_cpages;
>>> -	struct page **new_cpages;
>>>    	u32 chksum = 0;
>>>    	int i, ret;
>>>
>>> @@ -634,6 +634,7 @@ static int f2fs_compress_pages(struct
>> compress_ctx
>>> *cc)
>>>
>>>    	max_len = COMPRESS_HEADER_SIZE + cc->clen;
>>>    	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
>>> +	cc->valid_nr_cpages = cc->nr_cpages;
>>>
>>>    	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
>>>    	if (!cc->cpages) {
>>> @@ -684,13 +685,6 @@ static int f2fs_compress_pages(struct
>>> compress_ctx *cc)
>>>
>>>    	new_nr_cpages = DIV_ROUND_UP(cc->clen +
>> COMPRESS_HEADER_SIZE,
>>> PAGE_SIZE);
>>>
>>> -	/* Now we're going to cut unnecessary tail pages */
>>> -	new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
>>> -	if (!new_cpages) {
>>> -		ret = -ENOMEM;
>>> -		goto out_vunmap_cbuf;
>>
>> It removes last user of out_vunmap_cbuf label, so we can remove below
>> dead codes in error path as well.
>>
>> out_vunmap_cbuf:
>> 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
> 
> Not right, there still other code use this.

Oh, right. :)

Thanks,

> 
> Thanks.
>>
>>> -	}
>>> -
>>>    	/* zero out any unused part of the last page */
>>>    	memset(&cc->cbuf->cdata[cc->clen], 0,
>>>    			(new_nr_cpages * PAGE_SIZE) -
>>> @@ -701,7 +695,6 @@ static int f2fs_compress_pages(struct
>> compress_ctx
>>> *cc)
>>>
>>>    	for (i = 0; i < cc->nr_cpages; i++) {
>>>    		if (i < new_nr_cpages) {
>>> -			new_cpages[i] = cc->cpages[i];
>>>    			continue;
>>>    		}
>>
>> if (i < new_nr_cpages)
>> 	continue;
>>
>>>    		f2fs_compress_free_page(cc->cpages[i]);
>>> @@ -711,9 +704,7 @@ static int f2fs_compress_pages(struct
>> compress_ctx *cc)
>>>    	if (cops->destroy_compress_ctx)
>>>    		cops->destroy_compress_ctx(cc);
>>>
>>> -	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>>> -	cc->cpages = new_cpages;
>>> -	cc->nr_cpages = new_nr_cpages;
>>> +	cc->valid_nr_cpages = new_nr_cpages;
>>>
>>>    	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
>>>    							cc->clen, ret);
>>> @@ -1288,14 +1279,14 @@ static int
>> f2fs_write_compressed_pages(struct
>>> compress_ctx *cc,
>>>
>>>    	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
>>>    	cic->inode = inode;
>>> -	atomic_set(&cic->pending_pages, cc->nr_cpages);
>>> +	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
>>>    	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
>>>    	if (!cic->rpages)
>>>    		goto out_put_cic;
>>>
>>>    	cic->nr_rpages = cc->cluster_size;
>>>
>>> -	for (i = 0; i < cc->nr_cpages; i++) {
>>> +	for (i = 0; i < cc->valid_nr_cpages; i++) {
>>>    		f2fs_set_compressed_page(cc->cpages[i], inode,
>>>    					cc->rpages[i + 1]->index, cic);
>>>    		fio.compressed_page = cc->cpages[i]; @@ -1340,7 +1331,7
>> @@ static
>>> int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>>    		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
>>>    			fio.compr_blocks++;
>>>
>>> -		if (i > cc->nr_cpages) {
>>> +		if (i > cc->valid_nr_cpages) {
>>>    			if (__is_valid_data_blkaddr(blkaddr)) {
>>>    				f2fs_invalidate_blocks(sbi, blkaddr);
>>>    				f2fs_update_data_blkaddr(&dn, NEW_ADDR); @@
>> -1365,8 +1356,8 @@
>>> static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>>
>>>    	if (fio.compr_blocks)
>>>    		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1,
>> false);
>>> -	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
>>> -	add_compr_block_stat(inode, cc->nr_cpages);
>>> +	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
>>> +	add_compr_block_stat(inode, cc->valid_nr_cpages);
>>>
>>>    	set_inode_flag(cc->inode, FI_APPEND_WRITE);
>>>    	if (cc->cluster_idx == 0)
>>> @@ -1404,7 +1395,7 @@ static int f2fs_write_compressed_pages(struct
>> compress_ctx *cc,
>>>    	else
>>>    		f2fs_unlock_op(sbi);
>>>    out_free:
>>> -	for (i = 0; i < cc->nr_cpages; i++) {
>>> +	for (i = 0; i < cc->valid_nr_cpages; i++) {
>>>    		if (!cc->cpages[i])
>>>    			continue;
>>
>> We can remove above cpages check?
>>
>> Thanks,
>>
>>>    		f2fs_compress_free_page(cc->cpages[i]);
>>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index
>>> f4fd6c246c9a..4ddc0ba0f2c0 100644
>>> --- a/fs/f2fs/data.c
>>> +++ b/fs/f2fs/data.c
>>> @@ -2934,6 +2934,7 @@ static int f2fs_write_cache_pages(struct
>> address_space *mapping,
>>>    		.rpages = NULL,
>>>    		.nr_rpages = 0,
>>>    		.cpages = NULL,
>>> +		.valid_nr_cpages = 0,
>>>    		.rbuf = NULL,
>>>    		.cbuf = NULL,
>>>    		.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size, diff --git
>>> a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 039a229e11c9..acb587f054db
>>> 100644
>>> --- a/fs/f2fs/f2fs.h
>>> +++ b/fs/f2fs/f2fs.h
>>> @@ -1481,6 +1481,7 @@ struct compress_ctx {
>>>    	unsigned int nr_rpages;		/* total page number in rpages
>> */
>>>    	struct page **cpages;		/* pages store compressed data in
>> cluster */
>>>    	unsigned int nr_cpages;		/* total page number in cpages
>> */
>>> +	unsigned int valid_nr_cpages;	/* valid page number in cpages */
>>>    	void *rbuf;			/* virtual mapped address on rpages */
>>>    	struct compress_data *cbuf;	/* virtual mapped address on
>> cpages */
>>>    	size_t rlen;			/* valid data length in rbuf */
>>>


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-11-10  2:39 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-09  3:54 [f2fs-dev] [PATCH v2] f2fs: compress: reduce one page array alloc and free when write compressed page Fengnan Chang
2021-11-09 14:10 ` Chao Yu
     [not found] ` <ALIA-QAiE0UFErjRNaMLZaqL.9.1636467042061.Hmail.changfengnan@vivo.com>
2021-11-10  2:34   ` 常凤楠
2021-11-10  2:39     ` Chao Yu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.