linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH RFC] f2fs: combine nat_bits and free_nid_bitmap cache
@ 2017-03-01  9:10 Chao Yu
  2017-03-01 13:09 ` [f2fs-dev] " Kinglong Mee
  2017-03-02 19:10 ` Jaegeuk Kim
  0 siblings, 2 replies; 6+ messages in thread
From: Chao Yu @ 2017-03-01  9:10 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, chao, Chao Yu

Both nat_bits cache and free_nid_bitmap cache provide same functionality
as a intermediate cache between free nid cache and disk, but with
different granularity of indicating free nid range, and different
persistence policy. nat_bits cache provides better persistence ability,
and free_nid_bitmap provides better granularity.

In this patch we combine advantage of both caches, so finally policy of
the intermediate cache would be:
- init: load free nid status from nat_bits into free_nid_bitmap
- lookup: scan free_nid_bitmap before load NAT blocks
- update: update free_nid_bitmap in real-time
- persistence: udpate and persist nat_bits in checkpoint

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 fs/f2fs/node.c | 109 +++++++++++++++++++++------------------------------------
 1 file changed, 39 insertions(+), 70 deletions(-)

diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 1a759d45b7e4..6c027b6833f4 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
 		set_nat_flag(e, IS_CHECKPOINTED, false);
 	__set_nat_cache_dirty(nm_i, e);
 
-	if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
-		clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
-
 	/* update fsync_mark if its inode nat entry is still alive */
 	if (ni->nid != ni->ino)
 		e = __lookup_nat_cache(nm_i, ni->ino);
@@ -1920,58 +1917,6 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
 	up_read(&nm_i->nat_tree_lock);
 }
 
-static int scan_nat_bits(struct f2fs_sb_info *sbi)
-{
-	struct f2fs_nm_info *nm_i = NM_I(sbi);
-	struct page *page;
-	unsigned int i = 0;
-	nid_t nid;
-
-	if (!enabled_nat_bits(sbi, NULL))
-		return -EAGAIN;
-
-	down_read(&nm_i->nat_tree_lock);
-check_empty:
-	i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
-	if (i >= nm_i->nat_blocks) {
-		i = 0;
-		goto check_partial;
-	}
-
-	for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
-									nid++) {
-		if (unlikely(nid >= nm_i->max_nid))
-			break;
-		add_free_nid(sbi, nid, true);
-	}
-
-	if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
-		goto out;
-	i++;
-	goto check_empty;
-
-check_partial:
-	i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
-	if (i >= nm_i->nat_blocks) {
-		disable_nat_bits(sbi, true);
-		up_read(&nm_i->nat_tree_lock);
-		return -EINVAL;
-	}
-
-	nid = i * NAT_ENTRY_PER_BLOCK;
-	page = get_current_nat_page(sbi, nid);
-	scan_nat_page(sbi, page, nid);
-	f2fs_put_page(page, 1);
-
-	if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
-		i++;
-		goto check_partial;
-	}
-out:
-	up_read(&nm_i->nat_tree_lock);
-	return 0;
-}
-
 static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1993,21 +1938,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
 
 		if (nm_i->nid_cnt[FREE_NID_LIST])
 			return;
-
-		/* try to find free nids with nat_bits */
-		if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
-			return;
-	}
-
-	/* find next valid candidate */
-	if (enabled_nat_bits(sbi, NULL)) {
-		int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
-					nm_i->nat_blocks, 0);
-
-		if (idx >= nm_i->nat_blocks)
-			set_sbi_flag(sbi, SBI_NEED_FSCK);
-		else
-			nid = idx * NAT_ENTRY_PER_BLOCK;
 	}
 
 	/* readahead nat pages to be scanned */
@@ -2590,6 +2520,41 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
 	return 0;
 }
 
+inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
+{
+	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	unsigned int i = 0;
+	nid_t nid, last_nid;
+
+	for (i = 0; i < nm_i->nat_blocks; i++) {
+		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
+		if (i >= nm_i->nat_blocks)
+			break;
+
+		set_bit_le(i, nm_i->nat_block_bitmap);
+
+		nid = i * NAT_ENTRY_PER_BLOCK;
+		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
+
+		for (; nid < last_nid; nid++)
+			update_free_nid_bitmap(sbi, nid, true, true);
+	}
+
+	for (i = 0; i < nm_i->nat_blocks; i++) {
+		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
+		if (i >= nm_i->nat_blocks)
+			break;
+
+		set_bit_le(i, nm_i->nat_block_bitmap);
+
+		nid = i * NAT_ENTRY_PER_BLOCK;
+		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
+
+		for (; nid < last_nid; nid++)
+			update_free_nid_bitmap(sbi, nid, false, true);
+	}
+}
+
 static int init_node_manager(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
@@ -2672,6 +2637,10 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
 
 	spin_lock_init(&nm_i->free_nid_lock);
 
+	/* load free nid status from nat_bits table */
+	if (enabled_nat_bits(sbi, NULL))
+		load_free_nid_bitmap(sbi);
+
 	return 0;
 }
 
-- 
2.8.2.295.g3f1c1d0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [f2fs-dev] [PATCH RFC] f2fs: combine nat_bits and free_nid_bitmap cache
  2017-03-01  9:10 [PATCH RFC] f2fs: combine nat_bits and free_nid_bitmap cache Chao Yu
@ 2017-03-01 13:09 ` Kinglong Mee
  2017-03-02  1:35   ` Chao Yu
  2017-03-02 19:10 ` Jaegeuk Kim
  1 sibling, 1 reply; 6+ messages in thread
From: Kinglong Mee @ 2017-03-01 13:09 UTC (permalink / raw)
  To: Chao Yu, jaegeuk; +Cc: chao, linux-kernel, linux-f2fs-devel, Kinglong Mee

On 3/1/2017 17:10, Chao Yu wrote:
> Both nat_bits cache and free_nid_bitmap cache provide same functionality
> as a intermediate cache between free nid cache and disk, but with
> different granularity of indicating free nid range, and different
> persistence policy. nat_bits cache provides better persistence ability,
> and free_nid_bitmap provides better granularity.
> 
> In this patch we combine advantage of both caches, so finally policy of
> the intermediate cache would be:
> - init: load free nid status from nat_bits into free_nid_bitmap
> - lookup: scan free_nid_bitmap before load NAT blocks

Why not scan the full_nat_bits/empty_nat_bits before load NAT blocks here?
If after an objects shrinker, the cached free nid will be empty quickly.

thanks,
Kinglong Mee

> - update: update free_nid_bitmap in real-time
> - persistence: udpate and persist nat_bits in checkpoint
> 
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  fs/f2fs/node.c | 109 +++++++++++++++++++++------------------------------------
>  1 file changed, 39 insertions(+), 70 deletions(-)
> 
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index 1a759d45b7e4..6c027b6833f4 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
>  		set_nat_flag(e, IS_CHECKPOINTED, false);
>  	__set_nat_cache_dirty(nm_i, e);
>  
> -	if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
> -		clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
> -
>  	/* update fsync_mark if its inode nat entry is still alive */
>  	if (ni->nid != ni->ino)
>  		e = __lookup_nat_cache(nm_i, ni->ino);
> @@ -1920,58 +1917,6 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
>  	up_read(&nm_i->nat_tree_lock);
>  }
>  
> -static int scan_nat_bits(struct f2fs_sb_info *sbi)
> -{
> -	struct f2fs_nm_info *nm_i = NM_I(sbi);
> -	struct page *page;
> -	unsigned int i = 0;
> -	nid_t nid;
> -
> -	if (!enabled_nat_bits(sbi, NULL))
> -		return -EAGAIN;
> -
> -	down_read(&nm_i->nat_tree_lock);
> -check_empty:
> -	i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
> -	if (i >= nm_i->nat_blocks) {
> -		i = 0;
> -		goto check_partial;
> -	}
> -
> -	for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
> -									nid++) {
> -		if (unlikely(nid >= nm_i->max_nid))
> -			break;
> -		add_free_nid(sbi, nid, true);
> -	}
> -
> -	if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
> -		goto out;
> -	i++;
> -	goto check_empty;
> -
> -check_partial:
> -	i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
> -	if (i >= nm_i->nat_blocks) {
> -		disable_nat_bits(sbi, true);
> -		up_read(&nm_i->nat_tree_lock);
> -		return -EINVAL;
> -	}
> -
> -	nid = i * NAT_ENTRY_PER_BLOCK;
> -	page = get_current_nat_page(sbi, nid);
> -	scan_nat_page(sbi, page, nid);
> -	f2fs_put_page(page, 1);
> -
> -	if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
> -		i++;
> -		goto check_partial;
> -	}
> -out:
> -	up_read(&nm_i->nat_tree_lock);
> -	return 0;
> -}
> -
>  static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>  {
>  	struct f2fs_nm_info *nm_i = NM_I(sbi);
> @@ -1993,21 +1938,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>  
>  		if (nm_i->nid_cnt[FREE_NID_LIST])
>  			return;
> -
> -		/* try to find free nids with nat_bits */
> -		if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
> -			return;
> -	}
> -
> -	/* find next valid candidate */
> -	if (enabled_nat_bits(sbi, NULL)) {
> -		int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
> -					nm_i->nat_blocks, 0);
> -
> -		if (idx >= nm_i->nat_blocks)
> -			set_sbi_flag(sbi, SBI_NEED_FSCK);
> -		else
> -			nid = idx * NAT_ENTRY_PER_BLOCK;
>  	}
>  
>  	/* readahead nat pages to be scanned */
> @@ -2590,6 +2520,41 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
>  	return 0;
>  }
>  
> +inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
> +{
> +	struct f2fs_nm_info *nm_i = NM_I(sbi);
> +	unsigned int i = 0;
> +	nid_t nid, last_nid;
> +
> +	for (i = 0; i < nm_i->nat_blocks; i++) {
> +		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
> +		if (i >= nm_i->nat_blocks)
> +			break;
> +
> +		set_bit_le(i, nm_i->nat_block_bitmap);
> +
> +		nid = i * NAT_ENTRY_PER_BLOCK;
> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
> +
> +		for (; nid < last_nid; nid++)
> +			update_free_nid_bitmap(sbi, nid, true, true);
> +	}
> +
> +	for (i = 0; i < nm_i->nat_blocks; i++) {
> +		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
> +		if (i >= nm_i->nat_blocks)
> +			break;
> +
> +		set_bit_le(i, nm_i->nat_block_bitmap);
> +
> +		nid = i * NAT_ENTRY_PER_BLOCK;
> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
> +
> +		for (; nid < last_nid; nid++)
> +			update_free_nid_bitmap(sbi, nid, false, true);
> +	}
> +}
> +
>  static int init_node_manager(struct f2fs_sb_info *sbi)
>  {
>  	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
> @@ -2672,6 +2637,10 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
>  
>  	spin_lock_init(&nm_i->free_nid_lock);
>  
> +	/* load free nid status from nat_bits table */
> +	if (enabled_nat_bits(sbi, NULL))
> +		load_free_nid_bitmap(sbi);
> +
>  	return 0;
>  }
>  
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [f2fs-dev] [PATCH RFC] f2fs: combine nat_bits and free_nid_bitmap cache
  2017-03-01 13:09 ` [f2fs-dev] " Kinglong Mee
@ 2017-03-02  1:35   ` Chao Yu
  2017-03-02  4:29     ` Kinglong Mee
  0 siblings, 1 reply; 6+ messages in thread
From: Chao Yu @ 2017-03-02  1:35 UTC (permalink / raw)
  To: Kinglong Mee, jaegeuk; +Cc: chao, linux-kernel, linux-f2fs-devel

On 2017/3/1 21:09, Kinglong Mee wrote:
> On 3/1/2017 17:10, Chao Yu wrote:
>> Both nat_bits cache and free_nid_bitmap cache provide same functionality
>> as a intermediate cache between free nid cache and disk, but with
>> different granularity of indicating free nid range, and different
>> persistence policy. nat_bits cache provides better persistence ability,
>> and free_nid_bitmap provides better granularity.
>>
>> In this patch we combine advantage of both caches, so finally policy of
>> the intermediate cache would be:
>> - init: load free nid status from nat_bits into free_nid_bitmap
>> - lookup: scan free_nid_bitmap before load NAT blocks
> 
> Why not scan the full_nat_bits/empty_nat_bits before load NAT blocks here?
> If after an objects shrinker, the cached free nid will be empty quickly.

Since after this patch, all nids status (free or used) of
full_nat_bits/empty_nat_bits will be loaded into free_nid_bitmap, so we can just
check free_nid_bitmap instead of both cache before loading NAT blocks.

Thanks,

> 
> thanks,
> Kinglong Mee
> 
>> - update: update free_nid_bitmap in real-time
>> - persistence: udpate and persist nat_bits in checkpoint
>>
>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>> ---
>>  fs/f2fs/node.c | 109 +++++++++++++++++++++------------------------------------
>>  1 file changed, 39 insertions(+), 70 deletions(-)
>>
>> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
>> index 1a759d45b7e4..6c027b6833f4 100644
>> --- a/fs/f2fs/node.c
>> +++ b/fs/f2fs/node.c
>> @@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
>>  		set_nat_flag(e, IS_CHECKPOINTED, false);
>>  	__set_nat_cache_dirty(nm_i, e);
>>  
>> -	if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
>> -		clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
>> -
>>  	/* update fsync_mark if its inode nat entry is still alive */
>>  	if (ni->nid != ni->ino)
>>  		e = __lookup_nat_cache(nm_i, ni->ino);
>> @@ -1920,58 +1917,6 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
>>  	up_read(&nm_i->nat_tree_lock);
>>  }
>>  
>> -static int scan_nat_bits(struct f2fs_sb_info *sbi)
>> -{
>> -	struct f2fs_nm_info *nm_i = NM_I(sbi);
>> -	struct page *page;
>> -	unsigned int i = 0;
>> -	nid_t nid;
>> -
>> -	if (!enabled_nat_bits(sbi, NULL))
>> -		return -EAGAIN;
>> -
>> -	down_read(&nm_i->nat_tree_lock);
>> -check_empty:
>> -	i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
>> -	if (i >= nm_i->nat_blocks) {
>> -		i = 0;
>> -		goto check_partial;
>> -	}
>> -
>> -	for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
>> -									nid++) {
>> -		if (unlikely(nid >= nm_i->max_nid))
>> -			break;
>> -		add_free_nid(sbi, nid, true);
>> -	}
>> -
>> -	if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
>> -		goto out;
>> -	i++;
>> -	goto check_empty;
>> -
>> -check_partial:
>> -	i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
>> -	if (i >= nm_i->nat_blocks) {
>> -		disable_nat_bits(sbi, true);
>> -		up_read(&nm_i->nat_tree_lock);
>> -		return -EINVAL;
>> -	}
>> -
>> -	nid = i * NAT_ENTRY_PER_BLOCK;
>> -	page = get_current_nat_page(sbi, nid);
>> -	scan_nat_page(sbi, page, nid);
>> -	f2fs_put_page(page, 1);
>> -
>> -	if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
>> -		i++;
>> -		goto check_partial;
>> -	}
>> -out:
>> -	up_read(&nm_i->nat_tree_lock);
>> -	return 0;
>> -}
>> -
>>  static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>>  {
>>  	struct f2fs_nm_info *nm_i = NM_I(sbi);
>> @@ -1993,21 +1938,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>>  
>>  		if (nm_i->nid_cnt[FREE_NID_LIST])
>>  			return;
>> -
>> -		/* try to find free nids with nat_bits */
>> -		if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
>> -			return;
>> -	}
>> -
>> -	/* find next valid candidate */
>> -	if (enabled_nat_bits(sbi, NULL)) {
>> -		int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
>> -					nm_i->nat_blocks, 0);
>> -
>> -		if (idx >= nm_i->nat_blocks)
>> -			set_sbi_flag(sbi, SBI_NEED_FSCK);
>> -		else
>> -			nid = idx * NAT_ENTRY_PER_BLOCK;
>>  	}
>>  
>>  	/* readahead nat pages to be scanned */
>> @@ -2590,6 +2520,41 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
>>  	return 0;
>>  }
>>  
>> +inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
>> +{
>> +	struct f2fs_nm_info *nm_i = NM_I(sbi);
>> +	unsigned int i = 0;
>> +	nid_t nid, last_nid;
>> +
>> +	for (i = 0; i < nm_i->nat_blocks; i++) {
>> +		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
>> +		if (i >= nm_i->nat_blocks)
>> +			break;
>> +
>> +		set_bit_le(i, nm_i->nat_block_bitmap);
>> +
>> +		nid = i * NAT_ENTRY_PER_BLOCK;
>> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
>> +
>> +		for (; nid < last_nid; nid++)
>> +			update_free_nid_bitmap(sbi, nid, true, true);
>> +	}
>> +
>> +	for (i = 0; i < nm_i->nat_blocks; i++) {
>> +		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
>> +		if (i >= nm_i->nat_blocks)
>> +			break;
>> +
>> +		set_bit_le(i, nm_i->nat_block_bitmap);
>> +
>> +		nid = i * NAT_ENTRY_PER_BLOCK;
>> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
>> +
>> +		for (; nid < last_nid; nid++)
>> +			update_free_nid_bitmap(sbi, nid, false, true);
>> +	}
>> +}
>> +
>>  static int init_node_manager(struct f2fs_sb_info *sbi)
>>  {
>>  	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
>> @@ -2672,6 +2637,10 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
>>  
>>  	spin_lock_init(&nm_i->free_nid_lock);
>>  
>> +	/* load free nid status from nat_bits table */
>> +	if (enabled_nat_bits(sbi, NULL))
>> +		load_free_nid_bitmap(sbi);
>> +
>>  	return 0;
>>  }
>>  
>>
> 
> .
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [f2fs-dev] [PATCH RFC] f2fs: combine nat_bits and free_nid_bitmap cache
  2017-03-02  1:35   ` Chao Yu
@ 2017-03-02  4:29     ` Kinglong Mee
  0 siblings, 0 replies; 6+ messages in thread
From: Kinglong Mee @ 2017-03-02  4:29 UTC (permalink / raw)
  To: Chao Yu; +Cc: jaegeuk, chao, linux-kernel, linux-f2fs-devel, Kinglong Mee

On 3/2/2017 09:35, Chao Yu wrote:
> On 2017/3/1 21:09, Kinglong Mee wrote:
>> On 3/1/2017 17:10, Chao Yu wrote:
>>> Both nat_bits cache and free_nid_bitmap cache provide same functionality
>>> as a intermediate cache between free nid cache and disk, but with
>>> different granularity of indicating free nid range, and different
>>> persistence policy. nat_bits cache provides better persistence ability,
>>> and free_nid_bitmap provides better granularity.
>>>
>>> In this patch we combine advantage of both caches, so finally policy of
>>> the intermediate cache would be:
>>> - init: load free nid status from nat_bits into free_nid_bitmap
>>> - lookup: scan free_nid_bitmap before load NAT blocks
>>
>> Why not scan the full_nat_bits/empty_nat_bits before load NAT blocks here?
>> If after an objects shrinker, the cached free nid will be empty quickly.
> 
> Since after this patch, all nids status (free or used) of
> full_nat_bits/empty_nat_bits will be loaded into free_nid_bitmap, so we can just
> check free_nid_bitmap instead of both cache before loading NAT blocks.

Yes, you are right.
I forgot f2fs also updates update_free_nid_bitmap in __flush_nat_entry_set.

thanks,
Kinglong Mee

>>> - update: update free_nid_bitmap in real-time
>>> - persistence: udpate and persist nat_bits in checkpoint
>>>
>>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>>> ---
>>>  fs/f2fs/node.c | 109 +++++++++++++++++++++------------------------------------
>>>  1 file changed, 39 insertions(+), 70 deletions(-)
>>>
>>> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
>>> index 1a759d45b7e4..6c027b6833f4 100644
>>> --- a/fs/f2fs/node.c
>>> +++ b/fs/f2fs/node.c
>>> @@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
>>>  		set_nat_flag(e, IS_CHECKPOINTED, false);
>>>  	__set_nat_cache_dirty(nm_i, e);
>>>  
>>> -	if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
>>> -		clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
>>> -
>>>  	/* update fsync_mark if its inode nat entry is still alive */
>>>  	if (ni->nid != ni->ino)
>>>  		e = __lookup_nat_cache(nm_i, ni->ino);
>>> @@ -1920,58 +1917,6 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
>>>  	up_read(&nm_i->nat_tree_lock);
>>>  }
>>>  
>>> -static int scan_nat_bits(struct f2fs_sb_info *sbi)
>>> -{
>>> -	struct f2fs_nm_info *nm_i = NM_I(sbi);
>>> -	struct page *page;
>>> -	unsigned int i = 0;
>>> -	nid_t nid;
>>> -
>>> -	if (!enabled_nat_bits(sbi, NULL))
>>> -		return -EAGAIN;
>>> -
>>> -	down_read(&nm_i->nat_tree_lock);
>>> -check_empty:
>>> -	i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
>>> -	if (i >= nm_i->nat_blocks) {
>>> -		i = 0;
>>> -		goto check_partial;
>>> -	}
>>> -
>>> -	for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
>>> -									nid++) {
>>> -		if (unlikely(nid >= nm_i->max_nid))
>>> -			break;
>>> -		add_free_nid(sbi, nid, true);
>>> -	}
>>> -
>>> -	if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
>>> -		goto out;
>>> -	i++;
>>> -	goto check_empty;
>>> -
>>> -check_partial:
>>> -	i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
>>> -	if (i >= nm_i->nat_blocks) {
>>> -		disable_nat_bits(sbi, true);
>>> -		up_read(&nm_i->nat_tree_lock);
>>> -		return -EINVAL;
>>> -	}
>>> -
>>> -	nid = i * NAT_ENTRY_PER_BLOCK;
>>> -	page = get_current_nat_page(sbi, nid);
>>> -	scan_nat_page(sbi, page, nid);
>>> -	f2fs_put_page(page, 1);
>>> -
>>> -	if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
>>> -		i++;
>>> -		goto check_partial;
>>> -	}
>>> -out:
>>> -	up_read(&nm_i->nat_tree_lock);
>>> -	return 0;
>>> -}
>>> -
>>>  static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>>>  {
>>>  	struct f2fs_nm_info *nm_i = NM_I(sbi);
>>> @@ -1993,21 +1938,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>>>  
>>>  		if (nm_i->nid_cnt[FREE_NID_LIST])
>>>  			return;
>>> -
>>> -		/* try to find free nids with nat_bits */
>>> -		if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
>>> -			return;
>>> -	}
>>> -
>>> -	/* find next valid candidate */
>>> -	if (enabled_nat_bits(sbi, NULL)) {
>>> -		int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
>>> -					nm_i->nat_blocks, 0);
>>> -
>>> -		if (idx >= nm_i->nat_blocks)
>>> -			set_sbi_flag(sbi, SBI_NEED_FSCK);
>>> -		else
>>> -			nid = idx * NAT_ENTRY_PER_BLOCK;
>>>  	}
>>>  
>>>  	/* readahead nat pages to be scanned */
>>> @@ -2590,6 +2520,41 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
>>>  	return 0;
>>>  }
>>>  
>>> +inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
>>> +{
>>> +	struct f2fs_nm_info *nm_i = NM_I(sbi);
>>> +	unsigned int i = 0;
>>> +	nid_t nid, last_nid;
>>> +
>>> +	for (i = 0; i < nm_i->nat_blocks; i++) {
>>> +		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
>>> +		if (i >= nm_i->nat_blocks)
>>> +			break;
>>> +
>>> +		set_bit_le(i, nm_i->nat_block_bitmap);
>>> +
>>> +		nid = i * NAT_ENTRY_PER_BLOCK;
>>> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
>>> +
>>> +		for (; nid < last_nid; nid++)
>>> +			update_free_nid_bitmap(sbi, nid, true, true);
>>> +	}
>>> +
>>> +	for (i = 0; i < nm_i->nat_blocks; i++) {
>>> +		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
>>> +		if (i >= nm_i->nat_blocks)
>>> +			break;
>>> +
>>> +		set_bit_le(i, nm_i->nat_block_bitmap);
>>> +
>>> +		nid = i * NAT_ENTRY_PER_BLOCK;
>>> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
>>> +
>>> +		for (; nid < last_nid; nid++)
>>> +			update_free_nid_bitmap(sbi, nid, false, true);
>>> +	}
>>> +}
>>> +
>>>  static int init_node_manager(struct f2fs_sb_info *sbi)
>>>  {
>>>  	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
>>> @@ -2672,6 +2637,10 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
>>>  
>>>  	spin_lock_init(&nm_i->free_nid_lock);
>>>  
>>> +	/* load free nid status from nat_bits table */
>>> +	if (enabled_nat_bits(sbi, NULL))
>>> +		load_free_nid_bitmap(sbi);
>>> +
>>>  	return 0;
>>>  }
>>>  
>>>
>>
>> .
>>
> 
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH RFC] f2fs: combine nat_bits and free_nid_bitmap cache
  2017-03-01  9:10 [PATCH RFC] f2fs: combine nat_bits and free_nid_bitmap cache Chao Yu
  2017-03-01 13:09 ` [f2fs-dev] " Kinglong Mee
@ 2017-03-02 19:10 ` Jaegeuk Kim
  2017-03-06 10:31   ` Chao Yu
  1 sibling, 1 reply; 6+ messages in thread
From: Jaegeuk Kim @ 2017-03-02 19:10 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel, chao

Hi Chao,

On 03/01, Chao Yu wrote:
> Both nat_bits cache and free_nid_bitmap cache provide same functionality
> as a intermediate cache between free nid cache and disk, but with
> different granularity of indicating free nid range, and different
> persistence policy. nat_bits cache provides better persistence ability,
> and free_nid_bitmap provides better granularity.
> 
> In this patch we combine advantage of both caches, so finally policy of
> the intermediate cache would be:
> - init: load free nid status from nat_bits into free_nid_bitmap
> - lookup: scan free_nid_bitmap before load NAT blocks
> - update: update free_nid_bitmap in real-time
> - persistence: udpate and persist nat_bits in checkpoint
> 
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  fs/f2fs/node.c | 109 +++++++++++++++++++++------------------------------------
>  1 file changed, 39 insertions(+), 70 deletions(-)
> 
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index 1a759d45b7e4..6c027b6833f4 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
>  		set_nat_flag(e, IS_CHECKPOINTED, false);
>  	__set_nat_cache_dirty(nm_i, e);
>  
> -	if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
> -		clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
> -
>  	/* update fsync_mark if its inode nat entry is still alive */
>  	if (ni->nid != ni->ino)
>  		e = __lookup_nat_cache(nm_i, ni->ino);
> @@ -1920,58 +1917,6 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
>  	up_read(&nm_i->nat_tree_lock);
>  }
>  
> -static int scan_nat_bits(struct f2fs_sb_info *sbi)
> -{
> -	struct f2fs_nm_info *nm_i = NM_I(sbi);
> -	struct page *page;
> -	unsigned int i = 0;
> -	nid_t nid;
> -
> -	if (!enabled_nat_bits(sbi, NULL))
> -		return -EAGAIN;
> -
> -	down_read(&nm_i->nat_tree_lock);
> -check_empty:
> -	i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
> -	if (i >= nm_i->nat_blocks) {
> -		i = 0;
> -		goto check_partial;
> -	}
> -
> -	for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
> -									nid++) {
> -		if (unlikely(nid >= nm_i->max_nid))
> -			break;
> -		add_free_nid(sbi, nid, true);
> -	}
> -
> -	if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
> -		goto out;
> -	i++;
> -	goto check_empty;
> -
> -check_partial:
> -	i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
> -	if (i >= nm_i->nat_blocks) {
> -		disable_nat_bits(sbi, true);
> -		up_read(&nm_i->nat_tree_lock);
> -		return -EINVAL;
> -	}
> -
> -	nid = i * NAT_ENTRY_PER_BLOCK;
> -	page = get_current_nat_page(sbi, nid);
> -	scan_nat_page(sbi, page, nid);
> -	f2fs_put_page(page, 1);
> -
> -	if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
> -		i++;
> -		goto check_partial;
> -	}
> -out:
> -	up_read(&nm_i->nat_tree_lock);
> -	return 0;
> -}
> -
>  static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>  {
>  	struct f2fs_nm_info *nm_i = NM_I(sbi);
> @@ -1993,21 +1938,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>  
>  		if (nm_i->nid_cnt[FREE_NID_LIST])
>  			return;
> -
> -		/* try to find free nids with nat_bits */
> -		if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
> -			return;
> -	}
> -
> -	/* find next valid candidate */
> -	if (enabled_nat_bits(sbi, NULL)) {
> -		int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
> -					nm_i->nat_blocks, 0);
> -
> -		if (idx >= nm_i->nat_blocks)
> -			set_sbi_flag(sbi, SBI_NEED_FSCK);
> -		else
> -			nid = idx * NAT_ENTRY_PER_BLOCK;
>  	}
>  
>  	/* readahead nat pages to be scanned */
> @@ -2590,6 +2520,41 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
>  	return 0;
>  }
>  
> +inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
> +{
> +	struct f2fs_nm_info *nm_i = NM_I(sbi);
> +	unsigned int i = 0;
> +	nid_t nid, last_nid;
> +
> +	for (i = 0; i < nm_i->nat_blocks; i++) {
> +		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
> +		if (i >= nm_i->nat_blocks)
> +			break;
> +
> +		set_bit_le(i, nm_i->nat_block_bitmap);
> +
> +		nid = i * NAT_ENTRY_PER_BLOCK;
> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
> +
> +		for (; nid < last_nid; nid++)
> +			update_free_nid_bitmap(sbi, nid, true, true);
> +	}
> +
> +	for (i = 0; i < nm_i->nat_blocks; i++) {
> +		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
> +		if (i >= nm_i->nat_blocks)
> +			break;
> +
> +		set_bit_le(i, nm_i->nat_block_bitmap);
> +
> +		nid = i * NAT_ENTRY_PER_BLOCK;
> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
> +
> +		for (; nid < last_nid; nid++)
> +			update_free_nid_bitmap(sbi, nid, false, true);
> +	}
> +}
> +
>  static int init_node_manager(struct f2fs_sb_info *sbi)
>  {
>  	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
> @@ -2672,6 +2637,10 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
>  
>  	spin_lock_init(&nm_i->free_nid_lock);
>  
> +	/* load free nid status from nat_bits table */
> +	if (enabled_nat_bits(sbi, NULL))
> +		load_free_nid_bitmap(sbi);

This should be done after init_free_nid_cache() to avoid NULL pointer access.
And, it shows little bit long latency during mount, so needs to take a look at
bit ops.

Thanks,

> +
>  	return 0;
>  }
>  
> -- 
> 2.8.2.295.g3f1c1d0

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH RFC] f2fs: combine nat_bits and free_nid_bitmap cache
  2017-03-02 19:10 ` Jaegeuk Kim
@ 2017-03-06 10:31   ` Chao Yu
  0 siblings, 0 replies; 6+ messages in thread
From: Chao Yu @ 2017-03-06 10:31 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel, chao

Hi Jaegeuk,

On 2017/3/3 3:10, Jaegeuk Kim wrote:
> Hi Chao,
> 
> On 03/01, Chao Yu wrote:
>> Both nat_bits cache and free_nid_bitmap cache provide same functionality
>> as a intermediate cache between free nid cache and disk, but with
>> different granularity of indicating free nid range, and different
>> persistence policy. nat_bits cache provides better persistence ability,
>> and free_nid_bitmap provides better granularity.
>>
>> In this patch we combine advantage of both caches, so finally policy of
>> the intermediate cache would be:
>> - init: load free nid status from nat_bits into free_nid_bitmap
>> - lookup: scan free_nid_bitmap before load NAT blocks
>> - update: update free_nid_bitmap in real-time
>> - persistence: udpate and persist nat_bits in checkpoint
>>
>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>> ---
>>  fs/f2fs/node.c | 109 +++++++++++++++++++++------------------------------------
>>  1 file changed, 39 insertions(+), 70 deletions(-)
>>
>> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
>> index 1a759d45b7e4..6c027b6833f4 100644
>> --- a/fs/f2fs/node.c
>> +++ b/fs/f2fs/node.c
>> @@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
>>  		set_nat_flag(e, IS_CHECKPOINTED, false);
>>  	__set_nat_cache_dirty(nm_i, e);
>>  
>> -	if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
>> -		clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
>> -
>>  	/* update fsync_mark if its inode nat entry is still alive */
>>  	if (ni->nid != ni->ino)
>>  		e = __lookup_nat_cache(nm_i, ni->ino);
>> @@ -1920,58 +1917,6 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
>>  	up_read(&nm_i->nat_tree_lock);
>>  }
>>  
>> -static int scan_nat_bits(struct f2fs_sb_info *sbi)
>> -{
>> -	struct f2fs_nm_info *nm_i = NM_I(sbi);
>> -	struct page *page;
>> -	unsigned int i = 0;
>> -	nid_t nid;
>> -
>> -	if (!enabled_nat_bits(sbi, NULL))
>> -		return -EAGAIN;
>> -
>> -	down_read(&nm_i->nat_tree_lock);
>> -check_empty:
>> -	i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
>> -	if (i >= nm_i->nat_blocks) {
>> -		i = 0;
>> -		goto check_partial;
>> -	}
>> -
>> -	for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
>> -									nid++) {
>> -		if (unlikely(nid >= nm_i->max_nid))
>> -			break;
>> -		add_free_nid(sbi, nid, true);
>> -	}
>> -
>> -	if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
>> -		goto out;
>> -	i++;
>> -	goto check_empty;
>> -
>> -check_partial:
>> -	i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
>> -	if (i >= nm_i->nat_blocks) {
>> -		disable_nat_bits(sbi, true);
>> -		up_read(&nm_i->nat_tree_lock);
>> -		return -EINVAL;
>> -	}
>> -
>> -	nid = i * NAT_ENTRY_PER_BLOCK;
>> -	page = get_current_nat_page(sbi, nid);
>> -	scan_nat_page(sbi, page, nid);
>> -	f2fs_put_page(page, 1);
>> -
>> -	if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
>> -		i++;
>> -		goto check_partial;
>> -	}
>> -out:
>> -	up_read(&nm_i->nat_tree_lock);
>> -	return 0;
>> -}
>> -
>>  static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>>  {
>>  	struct f2fs_nm_info *nm_i = NM_I(sbi);
>> @@ -1993,21 +1938,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
>>  
>>  		if (nm_i->nid_cnt[FREE_NID_LIST])
>>  			return;
>> -
>> -		/* try to find free nids with nat_bits */
>> -		if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
>> -			return;
>> -	}
>> -
>> -	/* find next valid candidate */
>> -	if (enabled_nat_bits(sbi, NULL)) {
>> -		int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
>> -					nm_i->nat_blocks, 0);
>> -
>> -		if (idx >= nm_i->nat_blocks)
>> -			set_sbi_flag(sbi, SBI_NEED_FSCK);
>> -		else
>> -			nid = idx * NAT_ENTRY_PER_BLOCK;
>>  	}
>>  
>>  	/* readahead nat pages to be scanned */
>> @@ -2590,6 +2520,41 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
>>  	return 0;
>>  }
>>  
>> +inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
>> +{
>> +	struct f2fs_nm_info *nm_i = NM_I(sbi);
>> +	unsigned int i = 0;
>> +	nid_t nid, last_nid;
>> +
>> +	for (i = 0; i < nm_i->nat_blocks; i++) {
>> +		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
>> +		if (i >= nm_i->nat_blocks)
>> +			break;
>> +
>> +		set_bit_le(i, nm_i->nat_block_bitmap);
>> +
>> +		nid = i * NAT_ENTRY_PER_BLOCK;
>> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
>> +
>> +		for (; nid < last_nid; nid++)
>> +			update_free_nid_bitmap(sbi, nid, true, true);
>> +	}
>> +
>> +	for (i = 0; i < nm_i->nat_blocks; i++) {
>> +		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
>> +		if (i >= nm_i->nat_blocks)
>> +			break;
>> +
>> +		set_bit_le(i, nm_i->nat_block_bitmap);
>> +
>> +		nid = i * NAT_ENTRY_PER_BLOCK;
>> +		last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
>> +
>> +		for (; nid < last_nid; nid++)
>> +			update_free_nid_bitmap(sbi, nid, false, true);
>> +	}
>> +}
>> +
>>  static int init_node_manager(struct f2fs_sb_info *sbi)
>>  {
>>  	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
>> @@ -2672,6 +2637,10 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
>>  
>>  	spin_lock_init(&nm_i->free_nid_lock);
>>  
>> +	/* load free nid status from nat_bits table */
>> +	if (enabled_nat_bits(sbi, NULL))
>> +		load_free_nid_bitmap(sbi);
> 
> This should be done after init_free_nid_cache() to avoid NULL pointer access.

OK, let me move it to build_node_manager.

> And, it shows little bit long latency during mount, so needs to take a look at
> bit ops.

Could you describe your environment please?

Below optimizations may help to improve performance of free nid bitmap building:
a. use __set_bit_le instead of set_bit_le.
b. remove clear_bit_le since bitmap was built as zeroed.

Thanks,

> 
> Thanks,
> 
>> +
>>  	return 0;
>>  }
>>  
>> -- 
>> 2.8.2.295.g3f1c1d0
> 
> .
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2017-03-06 10:38 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-03-01  9:10 [PATCH RFC] f2fs: combine nat_bits and free_nid_bitmap cache Chao Yu
2017-03-01 13:09 ` [f2fs-dev] " Kinglong Mee
2017-03-02  1:35   ` Chao Yu
2017-03-02  4:29     ` Kinglong Mee
2017-03-02 19:10 ` Jaegeuk Kim
2017-03-06 10:31   ` Chao Yu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).