All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] f2fs: introduce excess_dirty_threshold()
@ 2021-09-01  7:32 ` Chao Yu
  0 siblings, 0 replies; 4+ messages in thread
From: Chao Yu @ 2021-09-01  7:32 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu, Chao Yu

This patch enables f2fs_balance_fs_bg() to check all metadatas' dirty
threshold rather than just checking node block's, so that checkpoint()
from background can be triggered more frequently to avoid heaping up
too much dirty metadatas.

Threshold value by default:
race with foreground ops	single type	global
No				16MB		24MB
Yes				24MB		36MB

In addtion, let f2fs_balance_fs_bg() be aware of roll-forward sapce
as well as fsync().

Signed-off-by: Chao Yu <chao@kernel.org>
---
 fs/f2fs/f2fs.h    |  3 +++
 fs/f2fs/node.h    |  5 -----
 fs/f2fs/segment.c | 23 +++++++++++++++++++++--
 3 files changed, 24 insertions(+), 7 deletions(-)

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 6c5e75f86da4..5ae2ca6dba96 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -562,6 +562,9 @@ enum {
 
 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
 
+/* dirty segments threshold for triggering CP */
+#define DEFAULT_DIRTY_THRESHOLD		4
+
 /* for in-memory extent cache entry */
 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
 
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index ff14a6e5ac1c..18b98cf0465b 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -138,11 +138,6 @@ static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
 	return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
 }
 
-static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
-{
-	return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
-}
-
 enum mem_type {
 	FREE_NIDS,	/* indicates the free nid list */
 	NAT_ENTRIES,	/* indicates the cached nat entry */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 7358342652ec..ffd148429a9d 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -535,6 +535,25 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 	}
 }
 
+static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
+{
+	int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
+	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
+	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
+	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
+	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
+	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
+	unsigned int threshold = sbi->blocks_per_seg * factor *
+					DEFAULT_DIRTY_THRESHOLD;
+	unsigned int global_threshold = threshold * 3 / 2;
+
+	if (dents >= threshold || qdata >= threshold ||
+		nodes >= threshold || meta >= threshold ||
+		imeta >= threshold)
+		return true;
+	return dents + qdata + nodes + meta + imeta >  global_threshold;
+}
+
 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
 {
 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
@@ -553,8 +572,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
 	else
 		f2fs_build_free_nids(sbi, false, false);
 
-	if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
-		excess_prefree_segs(sbi))
+	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
+		excess_prefree_segs(sbi) || f2fs_space_for_roll_forward(sbi))
 		goto do_sync;
 
 	/* there is background inflight IO or foreground operation recently */
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [f2fs-dev] [PATCH] f2fs: introduce excess_dirty_threshold()
@ 2021-09-01  7:32 ` Chao Yu
  0 siblings, 0 replies; 4+ messages in thread
From: Chao Yu @ 2021-09-01  7:32 UTC (permalink / raw)
  To: jaegeuk; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

This patch enables f2fs_balance_fs_bg() to check all metadatas' dirty
threshold rather than just checking node block's, so that checkpoint()
from background can be triggered more frequently to avoid heaping up
too much dirty metadatas.

Threshold value by default:
race with foreground ops	single type	global
No				16MB		24MB
Yes				24MB		36MB

In addtion, let f2fs_balance_fs_bg() be aware of roll-forward sapce
as well as fsync().

Signed-off-by: Chao Yu <chao@kernel.org>
---
 fs/f2fs/f2fs.h    |  3 +++
 fs/f2fs/node.h    |  5 -----
 fs/f2fs/segment.c | 23 +++++++++++++++++++++--
 3 files changed, 24 insertions(+), 7 deletions(-)

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 6c5e75f86da4..5ae2ca6dba96 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -562,6 +562,9 @@ enum {
 
 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
 
+/* dirty segments threshold for triggering CP */
+#define DEFAULT_DIRTY_THRESHOLD		4
+
 /* for in-memory extent cache entry */
 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
 
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index ff14a6e5ac1c..18b98cf0465b 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -138,11 +138,6 @@ static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
 	return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
 }
 
-static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
-{
-	return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
-}
-
 enum mem_type {
 	FREE_NIDS,	/* indicates the free nid list */
 	NAT_ENTRIES,	/* indicates the cached nat entry */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 7358342652ec..ffd148429a9d 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -535,6 +535,25 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 	}
 }
 
+static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
+{
+	int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
+	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
+	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
+	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
+	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
+	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
+	unsigned int threshold = sbi->blocks_per_seg * factor *
+					DEFAULT_DIRTY_THRESHOLD;
+	unsigned int global_threshold = threshold * 3 / 2;
+
+	if (dents >= threshold || qdata >= threshold ||
+		nodes >= threshold || meta >= threshold ||
+		imeta >= threshold)
+		return true;
+	return dents + qdata + nodes + meta + imeta >  global_threshold;
+}
+
 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
 {
 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
@@ -553,8 +572,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
 	else
 		f2fs_build_free_nids(sbi, false, false);
 
-	if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
-		excess_prefree_segs(sbi))
+	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
+		excess_prefree_segs(sbi) || f2fs_space_for_roll_forward(sbi))
 		goto do_sync;
 
 	/* there is background inflight IO or foreground operation recently */
-- 
2.32.0



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] f2fs: introduce excess_dirty_threshold()
  2021-09-01  7:32 ` [f2fs-dev] " Chao Yu
@ 2021-09-10 22:04   ` Jaegeuk Kim
  -1 siblings, 0 replies; 4+ messages in thread
From: Jaegeuk Kim @ 2021-09-10 22:04 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

On 09/01, Chao Yu wrote:
> This patch enables f2fs_balance_fs_bg() to check all metadatas' dirty
> threshold rather than just checking node block's, so that checkpoint()
> from background can be triggered more frequently to avoid heaping up
> too much dirty metadatas.
> 
> Threshold value by default:
> race with foreground ops	single type	global
> No				16MB		24MB
> Yes				24MB		36MB
> 
> In addtion, let f2fs_balance_fs_bg() be aware of roll-forward sapce
> as well as fsync().
> 
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
>  fs/f2fs/f2fs.h    |  3 +++
>  fs/f2fs/node.h    |  5 -----
>  fs/f2fs/segment.c | 23 +++++++++++++++++++++--
>  3 files changed, 24 insertions(+), 7 deletions(-)
> 
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 6c5e75f86da4..5ae2ca6dba96 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -562,6 +562,9 @@ enum {
>  
>  #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
>  
> +/* dirty segments threshold for triggering CP */
> +#define DEFAULT_DIRTY_THRESHOLD		4
> +
>  /* for in-memory extent cache entry */
>  #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
>  
> diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
> index ff14a6e5ac1c..18b98cf0465b 100644
> --- a/fs/f2fs/node.h
> +++ b/fs/f2fs/node.h
> @@ -138,11 +138,6 @@ static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
>  	return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
>  }
>  
> -static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
> -{
> -	return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
> -}
> -
>  enum mem_type {
>  	FREE_NIDS,	/* indicates the free nid list */
>  	NAT_ENTRIES,	/* indicates the cached nat entry */
> diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
> index 7358342652ec..ffd148429a9d 100644
> --- a/fs/f2fs/segment.c
> +++ b/fs/f2fs/segment.c
> @@ -535,6 +535,25 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
>  	}
>  }
>  
> +static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
> +{
> +	int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
> +	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
> +	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
> +	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
> +	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
> +	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
> +	unsigned int threshold = sbi->blocks_per_seg * factor *
> +					DEFAULT_DIRTY_THRESHOLD;
> +	unsigned int global_threshold = threshold * 3 / 2;
> +
> +	if (dents >= threshold || qdata >= threshold ||
> +		nodes >= threshold || meta >= threshold ||
> +		imeta >= threshold)
> +		return true;
> +	return dents + qdata + nodes + meta + imeta >  global_threshold;
> +}
> +
>  void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
>  {
>  	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
> @@ -553,8 +572,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
>  	else
>  		f2fs_build_free_nids(sbi, false, false);
>  
> -	if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
> -		excess_prefree_segs(sbi))
> +	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
> +		excess_prefree_segs(sbi) || f2fs_space_for_roll_forward(sbi))

f2fs_space_for_roll_forward() == 0?


>  		goto do_sync;
>  
>  	/* there is background inflight IO or foreground operation recently */
> -- 
> 2.32.0

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [f2fs-dev] [PATCH] f2fs: introduce excess_dirty_threshold()
@ 2021-09-10 22:04   ` Jaegeuk Kim
  0 siblings, 0 replies; 4+ messages in thread
From: Jaegeuk Kim @ 2021-09-10 22:04 UTC (permalink / raw)
  To: Chao Yu; +Cc: Chao Yu, linux-kernel, linux-f2fs-devel

On 09/01, Chao Yu wrote:
> This patch enables f2fs_balance_fs_bg() to check all metadatas' dirty
> threshold rather than just checking node block's, so that checkpoint()
> from background can be triggered more frequently to avoid heaping up
> too much dirty metadatas.
> 
> Threshold value by default:
> race with foreground ops	single type	global
> No				16MB		24MB
> Yes				24MB		36MB
> 
> In addtion, let f2fs_balance_fs_bg() be aware of roll-forward sapce
> as well as fsync().
> 
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
>  fs/f2fs/f2fs.h    |  3 +++
>  fs/f2fs/node.h    |  5 -----
>  fs/f2fs/segment.c | 23 +++++++++++++++++++++--
>  3 files changed, 24 insertions(+), 7 deletions(-)
> 
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 6c5e75f86da4..5ae2ca6dba96 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -562,6 +562,9 @@ enum {
>  
>  #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
>  
> +/* dirty segments threshold for triggering CP */
> +#define DEFAULT_DIRTY_THRESHOLD		4
> +
>  /* for in-memory extent cache entry */
>  #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
>  
> diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
> index ff14a6e5ac1c..18b98cf0465b 100644
> --- a/fs/f2fs/node.h
> +++ b/fs/f2fs/node.h
> @@ -138,11 +138,6 @@ static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
>  	return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
>  }
>  
> -static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
> -{
> -	return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
> -}
> -
>  enum mem_type {
>  	FREE_NIDS,	/* indicates the free nid list */
>  	NAT_ENTRIES,	/* indicates the cached nat entry */
> diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
> index 7358342652ec..ffd148429a9d 100644
> --- a/fs/f2fs/segment.c
> +++ b/fs/f2fs/segment.c
> @@ -535,6 +535,25 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
>  	}
>  }
>  
> +static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
> +{
> +	int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
> +	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
> +	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
> +	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
> +	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
> +	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
> +	unsigned int threshold = sbi->blocks_per_seg * factor *
> +					DEFAULT_DIRTY_THRESHOLD;
> +	unsigned int global_threshold = threshold * 3 / 2;
> +
> +	if (dents >= threshold || qdata >= threshold ||
> +		nodes >= threshold || meta >= threshold ||
> +		imeta >= threshold)
> +		return true;
> +	return dents + qdata + nodes + meta + imeta >  global_threshold;
> +}
> +
>  void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
>  {
>  	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
> @@ -553,8 +572,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
>  	else
>  		f2fs_build_free_nids(sbi, false, false);
>  
> -	if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
> -		excess_prefree_segs(sbi))
> +	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
> +		excess_prefree_segs(sbi) || f2fs_space_for_roll_forward(sbi))

f2fs_space_for_roll_forward() == 0?


>  		goto do_sync;
>  
>  	/* there is background inflight IO or foreground operation recently */
> -- 
> 2.32.0


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-09-10 22:04 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-01  7:32 [PATCH] f2fs: introduce excess_dirty_threshold() Chao Yu
2021-09-01  7:32 ` [f2fs-dev] " Chao Yu
2021-09-10 22:04 ` Jaegeuk Kim
2021-09-10 22:04   ` [f2fs-dev] " Jaegeuk Kim

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.