All of lore.kernel.org
 help / color / mirror / Atom feed
* [f2fs-dev] [PATCH v3] f2fs: compress: avoid duplicate counting of valid blocks when read compressed file
@ 2021-08-12  3:05 Fengnan Chang
  2021-08-12  3:09 ` Fengnan Chang
  0 siblings, 1 reply; 2+ messages in thread
From: Fengnan Chang @ 2021-08-12  3:05 UTC (permalink / raw)
  To: jaegeuk, chao, linux-f2fs-devel; +Cc: Fengnan Chang

Since cluster is basic unit of compression, one cluster is compressed or
not, so we can calculate valid blocks only for first page in cluster,
the other pages just skip.

Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
---
 fs/f2fs/data.c | 23 ++++++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index df5e8d8c654e..b06a0e5de1f1 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2299,6 +2299,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
 		.nr_rpages = 0,
 		.nr_cpages = 0,
 	};
+	pgoff_t nc_cluster_idx = NULL_CLUSTER;
 #endif
 	unsigned nr_pages = rac ? readahead_count(rac) : 1;
 	unsigned max_nr_pages = nr_pages;
@@ -2328,15 +2329,26 @@ static int f2fs_mpage_readpages(struct inode *inode,
 							&last_block_in_bio,
 							rac != NULL, false);
 				f2fs_destroy_compress_ctx(&cc, false);
+				nc_cluster_idx = NULL_CLUSTER;
 				if (ret)
 					goto set_error_page;
 			}
-			ret = f2fs_is_compressed_cluster(inode, page->index);
-			if (ret < 0)
-				goto set_error_page;
-			else if (!ret)
-				goto read_single_page;
+			if (cc.cluster_idx == NULL_CLUSTER) {
+				if (nc_cluster_idx != NULL_CLUSTER &&
+					nc_cluster_idx == page->index >> cc.log_cluster_size) {
+					goto read_single_page;
+				}
+
+				ret = f2fs_is_compressed_cluster(inode, page->index);
+				if (ret < 0)
+					goto set_error_page;
+				else if (!ret) {
+					nc_cluster_idx = page->index >> cc.log_cluster_size;
+					goto read_single_page;
+				}
 
+				nc_cluster_idx = NULL_CLUSTER;
+			}
 			ret = f2fs_init_compress_ctx(&cc);
 			if (ret)
 				goto set_error_page;
@@ -2373,6 +2385,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
 							&last_block_in_bio,
 							rac != NULL, false);
 				f2fs_destroy_compress_ctx(&cc, false);
+				nc_cluster_idx = NULL_CLUSTER;
 			}
 		}
 #endif
-- 
2.32.0



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [f2fs-dev] [PATCH v3] f2fs: compress: avoid duplicate counting of valid blocks when read compressed file
  2021-08-12  3:05 [f2fs-dev] [PATCH v3] f2fs: compress: avoid duplicate counting of valid blocks when read compressed file Fengnan Chang
@ 2021-08-12  3:09 ` Fengnan Chang
  0 siblings, 0 replies; 2+ messages in thread
From: Fengnan Chang @ 2021-08-12  3:09 UTC (permalink / raw)
  To: jaegeuk, chao, linux-f2fs-devel

mistake, forget this...

On 2021/8/12 11:05, Fengnan Chang wrote:
> Since cluster is basic unit of compression, one cluster is compressed or
> not, so we can calculate valid blocks only for first page in cluster,
> the other pages just skip.
> 
> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
> ---
>   fs/f2fs/data.c | 23 ++++++++++++++++++-----
>   1 file changed, 18 insertions(+), 5 deletions(-)
> 
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index df5e8d8c654e..b06a0e5de1f1 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2299,6 +2299,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
>   		.nr_rpages = 0,
>   		.nr_cpages = 0,
>   	};
> +	pgoff_t nc_cluster_idx = NULL_CLUSTER;
>   #endif
>   	unsigned nr_pages = rac ? readahead_count(rac) : 1;
>   	unsigned max_nr_pages = nr_pages;
> @@ -2328,15 +2329,26 @@ static int f2fs_mpage_readpages(struct inode *inode,
>   							&last_block_in_bio,
>   							rac != NULL, false);
>   				f2fs_destroy_compress_ctx(&cc, false);
> +				nc_cluster_idx = NULL_CLUSTER;
>   				if (ret)
>   					goto set_error_page;
>   			}
> -			ret = f2fs_is_compressed_cluster(inode, page->index);
> -			if (ret < 0)
> -				goto set_error_page;
> -			else if (!ret)
> -				goto read_single_page;
> +			if (cc.cluster_idx == NULL_CLUSTER) {
> +				if (nc_cluster_idx != NULL_CLUSTER &&
> +					nc_cluster_idx == page->index >> cc.log_cluster_size) {
> +					goto read_single_page;
> +				}
> +
> +				ret = f2fs_is_compressed_cluster(inode, page->index);
> +				if (ret < 0)
> +					goto set_error_page;
> +				else if (!ret) {
> +					nc_cluster_idx = page->index >> cc.log_cluster_size;
> +					goto read_single_page;
> +				}
>   
> +				nc_cluster_idx = NULL_CLUSTER;
> +			}
>   			ret = f2fs_init_compress_ctx(&cc);
>   			if (ret)
>   				goto set_error_page;
> @@ -2373,6 +2385,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
>   							&last_block_in_bio,
>   							rac != NULL, false);
>   				f2fs_destroy_compress_ctx(&cc, false);
> +				nc_cluster_idx = NULL_CLUSTER;
>   			}
>   		}
>   #endif
> 


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-08-12  3:09 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-12  3:05 [f2fs-dev] [PATCH v3] f2fs: compress: avoid duplicate counting of valid blocks when read compressed file Fengnan Chang
2021-08-12  3:09 ` Fengnan Chang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.