All of lore.kernel.org
 help / color / mirror / Atom feed
* [f2fs-dev] [PATCH v2] f2fs: compress: avoid duplicate counting of valid blocks when read compressed file
@ 2021-08-11  8:40 Fengnan Chang
  2021-08-11 13:11 ` Chao Yu
  0 siblings, 1 reply; 2+ messages in thread
From: Fengnan Chang @ 2021-08-11  8:40 UTC (permalink / raw)
  To: jaegeuk, chao, linux-f2fs-devel; +Cc: Fengnan Chang

Since cluster is basic unit of compression, one cluster is compressed or
not, so we can calculate valid blocks only for first page in cluster,
the other pages just skip.

Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
---
 fs/f2fs/compress.c |  1 +
 fs/f2fs/data.c     | 21 ++++++++++++++++-----
 fs/f2fs/f2fs.h     |  1 +
 3 files changed, 18 insertions(+), 5 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 7dbfd6965b97..71768f15752a 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -152,6 +152,7 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
 	cc->rpages = NULL;
 	cc->nr_rpages = 0;
 	cc->nr_cpages = 0;
+	cc->nc_cluster_idx = NULL_CLUSTER;
 	if (!reuse)
 		cc->cluster_idx = NULL_CLUSTER;
 }
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index df5e8d8c654e..3ee1a88d8400 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2294,6 +2294,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
 		.cluster_size = F2FS_I(inode)->i_cluster_size,
 		.cluster_idx = NULL_CLUSTER,
+		.nc_cluster_idx = NULL_CLUSTER,
 		.rpages = NULL,
 		.cpages = NULL,
 		.nr_rpages = 0,
@@ -2331,12 +2332,22 @@ static int f2fs_mpage_readpages(struct inode *inode,
 				if (ret)
 					goto set_error_page;
 			}
-			ret = f2fs_is_compressed_cluster(inode, page->index);
-			if (ret < 0)
-				goto set_error_page;
-			else if (!ret)
-				goto read_single_page;
+			if (cc.cluster_idx == NULL_CLUSTER) {
+				if (cc.nc_cluster_idx != NULL_CLUSTER &&
+					cc.nc_cluster_idx == page->index >> cc.log_cluster_size) {
+					goto read_single_page;
+				}
+
+				ret = f2fs_is_compressed_cluster(inode, page->index);
+				if (ret < 0)
+					goto set_error_page;
+				else if (!ret) {
+					cc.nc_cluster_idx = page->index >> cc.log_cluster_size;
+					goto read_single_page;
+				}
 
+				cc.nc_cluster_idx = NULL_CLUSTER;
+			}
 			ret = f2fs_init_compress_ctx(&cc);
 			if (ret)
 				goto set_error_page;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e97b4d8c5efc..e9b6890a3f19 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1471,6 +1471,7 @@ struct compress_data {
 struct compress_ctx {
 	struct inode *inode;		/* inode the context belong to */
 	pgoff_t cluster_idx;		/* cluster index number */
+	pgoff_t nc_cluster_idx;		/* cluster index number for non-compressed cluster use*/
 	unsigned int cluster_size;	/* page count in cluster */
 	unsigned int log_cluster_size;	/* log of cluster size */
 	struct page **rpages;		/* pages store raw data in cluster */
-- 
2.32.0



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [f2fs-dev] [PATCH v2] f2fs: compress: avoid duplicate counting of valid blocks when read compressed file
  2021-08-11  8:40 [f2fs-dev] [PATCH v2] f2fs: compress: avoid duplicate counting of valid blocks when read compressed file Fengnan Chang
@ 2021-08-11 13:11 ` Chao Yu
  0 siblings, 0 replies; 2+ messages in thread
From: Chao Yu @ 2021-08-11 13:11 UTC (permalink / raw)
  To: Fengnan Chang, jaegeuk, linux-f2fs-devel

On 2021/8/11 16:40, Fengnan Chang wrote:
> Since cluster is basic unit of compression, one cluster is compressed or
> not, so we can calculate valid blocks only for first page in cluster,
> the other pages just skip.
> 
> Signed-off-by: Fengnan Chang <changfengnan@vivo.com>
> ---
>   fs/f2fs/compress.c |  1 +
>   fs/f2fs/data.c     | 21 ++++++++++++++++-----
>   fs/f2fs/f2fs.h     |  1 +
>   3 files changed, 18 insertions(+), 5 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 7dbfd6965b97..71768f15752a 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -152,6 +152,7 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
>   	cc->rpages = NULL;
>   	cc->nr_rpages = 0;
>   	cc->nr_cpages = 0;
> +	cc->nc_cluster_idx = NULL_CLUSTER;
>   	if (!reuse)
>   		cc->cluster_idx = NULL_CLUSTER;
>   }
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index df5e8d8c654e..3ee1a88d8400 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2294,6 +2294,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
>   		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
>   		.cluster_size = F2FS_I(inode)->i_cluster_size,
>   		.cluster_idx = NULL_CLUSTER,
> +		.nc_cluster_idx = NULL_CLUSTER,
>   		.rpages = NULL,
>   		.cpages = NULL,
>   		.nr_rpages = 0,

pgoff_t nc_cluster_idx = NULL_CLUSTER;

> @@ -2331,12 +2332,22 @@ static int f2fs_mpage_readpages(struct inode *inode,
>   				if (ret)
>   					goto set_error_page;
>   			}
> -			ret = f2fs_is_compressed_cluster(inode, page->index);
> -			if (ret < 0)
> -				goto set_error_page;
> -			else if (!ret)
> -				goto read_single_page;
> +			if (cc.cluster_idx == NULL_CLUSTER) {
> +				if (cc.nc_cluster_idx != NULL_CLUSTER &&
> +					cc.nc_cluster_idx == page->index >> cc.log_cluster_size) {

				 if (nc_cluster_idx ==
					page->index >> cc.log_cluster_size)

> +					goto read_single_page;
> +				}
> +
> +				ret = f2fs_is_compressed_cluster(inode, page->index);
> +				if (ret < 0)
> +					goto set_error_page;
> +				else if (!ret) {
> +					cc.nc_cluster_idx = page->index >> cc.log_cluster_size;

					nc_cluster_idx =
						page->index >> cc.log_cluster_size;

> +					goto read_single_page;
> +				}
>   
> +				cc.nc_cluster_idx = NULL_CLUSTER;
> +			}
>   			ret = f2fs_init_compress_ctx(&cc);
>   			if (ret)
>   				goto set_error_page;
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index e97b4d8c5efc..e9b6890a3f19 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1471,6 +1471,7 @@ struct compress_data {
>   struct compress_ctx {
>   	struct inode *inode;		/* inode the context belong to */
>   	pgoff_t cluster_idx;		/* cluster index number */
> +	pgoff_t nc_cluster_idx;		/* cluster index number for non-compressed cluster use*/

We only use this field in read flow, please add a local variable to record
last cluster index number of non-compressed cluster.

Thanks,

>   	unsigned int cluster_size;	/* page count in cluster */
>   	unsigned int log_cluster_size;	/* log of cluster size */
>   	struct page **rpages;		/* pages store raw data in cluster */
> 


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-08-11 13:11 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-11  8:40 [f2fs-dev] [PATCH v2] f2fs: compress: avoid duplicate counting of valid blocks when read compressed file Fengnan Chang
2021-08-11 13:11 ` Chao Yu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.