>From 4734edcfb755096b1bcdd4153699ec907b072a35 Mon Sep 17 00:00:00 2001 From: Lokesh Nagappa Jaliminche Date: Thu, 7 Jan 2016 05:12:20 +0530 Subject: [PATCH] ext4: optimizing group serch for inode allocation Added a check at the start of group search loop to avoid looping unecessarily in case of empty group. This also allow group search to jump directly to "found_flex_bg" with "stats" and "group" already set, so there is no need to go through the extra steps of setting "best_desc" , "best_group" and then break out of the loop just to set "stats" and "group" again. Signed-off-by: Lokesh Nagappa Jaliminche --- fs/ext4/ext4.h | 2 ++ fs/ext4/ialloc.c | 21 +++++++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index cc7ca4e..fc48f9a 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -326,6 +326,8 @@ struct flex_groups { #define EXT4_MAX_DESC_SIZE EXT4_MIN_BLOCK_SIZE #define EXT4_DESC_SIZE(s) (EXT4_SB(s)->s_desc_size) #ifdef __KERNEL__ +# define EXT4_INODE_TABLE_BLOCKS_PER_GROUP(s) (EXT4_SB(s)->s_itb_per_group) +# define EXT4_BLOCKS_PER_CLUSTER(s) (EXT4_SB(s)->s_cluster_ratio) # define EXT4_BLOCKS_PER_GROUP(s) (EXT4_SB(s)->s_blocks_per_group) # define EXT4_CLUSTERS_PER_GROUP(s) (EXT4_SB(s)->s_clusters_per_group) # define EXT4_DESC_PER_BLOCK(s) (EXT4_SB(s)->s_desc_per_block) diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 1b8024d..dc66ad8 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -463,7 +463,6 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, sbi->s_log_groups_per_flex; parent_group >>= sbi->s_log_groups_per_flex; } - freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); avefreei = freei / ngroups; freeb = EXT4_C2B(sbi, @@ -477,7 +476,20 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) { int best_ndir = inodes_per_group; int ret = -1; - + /* blocks used for inode table and allocation bitmaps */ + unsigned int max_usable_blocks_per_flex_group; + unsigned int blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb); + unsigned int inode_table_blocks_per_group = \ + EXT4_INODE_TABLE_BLOCKS_PER_GROUP(sb); + /* Number of blocks per cluster */ + unsigned int cluster_ratio = EXT4_BLOCKS_PER_CLUSTER(sb); + unsigned int inodes_per_flex_group = inodes_per_group * \ + flex_size; + max_usable_blocks_per_flex_group = \ + ((blocks_per_group*flex_size) - \ + (inode_table_blocks_per_group * \ + flex_size + 2 * flex_size)) / \ + cluster_ratio; if (qstr) { hinfo.hash_version = DX_HASH_HALF_MD4; hinfo.seed = sbi->s_hash_seed; @@ -489,6 +501,11 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, for (i = 0; i < ngroups; i++) { g = (parent_group + i) % ngroups; get_orlov_stats(sb, g, flex_size, &stats); + /* can't get better group than empty group */ + if (max_usable_blocks_per_flex_group == \ + stats.free_clusters && \ + inodes_per_flex_group == stats.free_inodes) + goto found_flex_bg; if (!stats.free_inodes) continue; if (stats.used_dirs >= best_ndir) -- 1.7.1