From mboxrd@z Thu Jan 1 00:00:00 1970 From: gaoxiang25@huawei.com (Gao Xiang) Date: Fri, 2 Aug 2019 20:53:40 +0800 Subject: [PATCH v6 17/24] erofs: introduce per-CPU buffers implementation In-Reply-To: <20190802125347.166018-1-gaoxiang25@huawei.com> References: <20190802125347.166018-1-gaoxiang25@huawei.com> Message-ID: <20190802125347.166018-18-gaoxiang25@huawei.com> This patch introduces per-CPU buffers in order for the upcoming generic decompression framework to use. Note that I tried to use in-kernel per-CPU buffer or per-CPU page approaches to clean up further, however noticeable performanace regression (about 2% for sequential read) was observed. Let's leave it as-is for now. Signed-off-by: Gao Xiang --- fs/erofs/Kconfig | 14 ++++++++++++++ fs/erofs/internal.h | 21 +++++++++++++++++++++ fs/erofs/utils.c | 12 ++++++++++++ 3 files changed, 47 insertions(+) diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index a475fbebb831..5f8787c0cf89 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -81,3 +81,17 @@ config EROFS_FS_ZIP If you don't want to enable compression feature, say N. +config EROFS_FS_CLUSTER_PAGE_LIMIT + int "EROFS Cluster Pages Hard Limit" + depends on EROFS_FS_ZIP + range 1 256 + default "1" + help + Indicates maximum # of pages of a compressed + physical cluster. + + For example, if files in a image were compressed + into 8k-unit, hard limit should not be configured + less than 2. Otherwise, the image will be refused + to mount on this kernel. + diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 6f0384c3f07a..19694886dda9 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -223,6 +223,12 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) return v; } #endif /* !CONFIG_SMP */ + +/* hard limit of pages per compressed cluster */ +#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) +#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES +#else +#define EROFS_PCPUBUF_NR_PAGES 0 #endif /* !CONFIG_EROFS_FS_ZIP */ /* we strictly follow PAGE_SIZE and no buffer head yet */ @@ -483,6 +489,21 @@ int erofs_namei(struct inode *dir, struct qstr *name, extern const struct file_operations erofs_dir_fops; /* utils.c */ +#if (EROFS_PCPUBUF_NR_PAGES > 0) +void *erofs_get_pcpubuf(unsigned int pagenr); +#define erofs_put_pcpubuf(buf) do { \ + (void)&(buf); \ + preempt_enable(); \ +} while (0) +#else +static inline void *erofs_get_pcpubuf(unsigned int pagenr) +{ + return ERR_PTR(-ENOTSUPP); +} + +#define erofs_put_pcpubuf(buf) do {} while (0) +#endif + #ifdef CONFIG_EROFS_FS_ZIP int erofs_workgroup_put(struct erofs_workgroup *grp); struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 628178261056..f3eed9af24d6 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -9,6 +9,18 @@ #include "internal.h" #include +#if (EROFS_PCPUBUF_NR_PAGES > 0) +static struct { + u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES]; +} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; + +void *erofs_get_pcpubuf(unsigned int pagenr) +{ + preempt_disable(); + return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE]; +} +#endif + #ifdef CONFIG_EROFS_FS_ZIP /* global shrink count (for all mounted EROFS instances) */ static atomic_long_t erofs_global_shrink_cnt; -- 2.17.1