From: Dennis Zhou <dennis@kernel.org> To: David Sterba <dsterba@suse.com>, Chris Mason <clm@fb.com>, Josef Bacik <josef@toxicpanda.com>, Omar Sandoval <osandov@osandov.com> Cc: kernel-team@fb.com, linux-btrfs@vger.kernel.org, Dennis Zhou <dennis@kernel.org> Subject: [PATCH 01/22] bitmap: genericize percpu bitmap region iterators Date: Wed, 20 Nov 2019 16:51:00 -0500 Message-ID: <c2fec8d29be07ac70bb85dbac5fa10c259fa21f9.1574282259.git.dennis@kernel.org> (raw) In-Reply-To: <cover.1574282259.git.dennis@kernel.org> In-Reply-To: <cover.1574282259.git.dennis@kernel.org> Bitmaps are fairly popular for their space efficiency, but we don't have generic iterators available. Make percpu's bitmap region iterators available to everyone. Signed-off-by: Dennis Zhou <dennis@kernel.org> Reviewed-by: Josef Bacik <josef@toxicpanda.com> --- include/linux/bitmap.h | 35 ++++++++++++++++++++++++ mm/percpu.c | 61 +++++++++++------------------------------- 2 files changed, 51 insertions(+), 45 deletions(-) diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 29fc933df3bf..9c31b5268f7a 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -438,6 +438,41 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen, return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); } +static inline void bitmap_next_clear_region(unsigned long *bitmap, + unsigned int *rs, unsigned int *re, + unsigned int end) +{ + *rs = find_next_zero_bit(bitmap, end, *rs); + *re = find_next_bit(bitmap, end, *rs + 1); +} + +static inline void bitmap_next_set_region(unsigned long *bitmap, + unsigned int *rs, unsigned int *re, + unsigned int end) +{ + *rs = find_next_bit(bitmap, end, *rs); + *re = find_next_zero_bit(bitmap, end, *rs + 1); +} + +/* + * Bitmap region iterators. Iterates over the bitmap between [@start, @end). + * @rs and @re should be integer variables and will be set to start and end + * index of the current clear or set region. + */ +#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \ + for ((rs) = (start), \ + bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \ + (rs) < (re); \ + (rs) = (re) + 1, \ + bitmap_next_clear_region((bitmap), &(rs), &(re), (end))) + +#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \ + for ((rs) = (start), \ + bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \ + (rs) < (re); \ + (rs) = (re) + 1, \ + bitmap_next_set_region((bitmap), &(rs), &(re), (end))) + /** * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. * @n: u64 value diff --git a/mm/percpu.c b/mm/percpu.c index 7e06a1e58720..e9844086b236 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -270,33 +270,6 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, pcpu_unit_page_offset(cpu, page_idx); } -static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end) -{ - *rs = find_next_zero_bit(bitmap, end, *rs); - *re = find_next_bit(bitmap, end, *rs + 1); -} - -static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end) -{ - *rs = find_next_bit(bitmap, end, *rs); - *re = find_next_zero_bit(bitmap, end, *rs + 1); -} - -/* - * Bitmap region iterators. Iterates over the bitmap between - * [@start, @end) in @chunk. @rs and @re should be integer variables - * and will be set to start and end index of the current free region. - */ -#define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \ - for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \ - (rs) < (re); \ - (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end))) - -#define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \ - for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \ - (rs) < (re); \ - (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end))) - /* * The following are helper functions to help access bitmaps and convert * between bitmap offsets to address offsets. @@ -732,9 +705,8 @@ static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) } bits = 0; - pcpu_for_each_md_free_region(chunk, bit_off, bits) { + pcpu_for_each_md_free_region(chunk, bit_off, bits) pcpu_block_update(chunk_md, bit_off, bit_off + bits); - } } /** @@ -749,7 +721,7 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) { struct pcpu_block_md *block = chunk->md_blocks + index; unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); - int rs, re, start; /* region start, region end */ + unsigned int rs, re, start; /* region start, region end */ /* promote scan_hint to contig_hint */ if (block->scan_hint) { @@ -765,10 +737,9 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) block->right_free = 0; /* iterate over free areas and update the contig hints */ - pcpu_for_each_unpop_region(alloc_map, rs, re, start, - PCPU_BITMAP_BLOCK_BITS) { + bitmap_for_each_clear_region(alloc_map, rs, re, start, + PCPU_BITMAP_BLOCK_BITS) pcpu_block_update(block, rs, re); - } } /** @@ -1041,13 +1012,13 @@ static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, int *next_off) { - int page_start, page_end, rs, re; + unsigned int page_start, page_end, rs, re; page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); rs = page_start; - pcpu_next_unpop(chunk->populated, &rs, &re, page_end); + bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); if (rs >= page_end) return true; @@ -1702,13 +1673,13 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, /* populate if not all pages are already there */ if (!is_atomic) { - int page_start, page_end, rs, re; + unsigned int page_start, page_end, rs, re; page_start = PFN_DOWN(off); page_end = PFN_UP(off + size); - pcpu_for_each_unpop_region(chunk->populated, rs, re, - page_start, page_end) { + bitmap_for_each_clear_region(chunk->populated, rs, re, + page_start, page_end) { WARN_ON(chunk->immutable); ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); @@ -1858,10 +1829,10 @@ static void pcpu_balance_workfn(struct work_struct *work) spin_unlock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, &to_free, list) { - int rs, re; + unsigned int rs, re; - pcpu_for_each_pop_region(chunk->populated, rs, re, 0, - chunk->nr_pages) { + bitmap_for_each_set_region(chunk->populated, rs, re, 0, + chunk->nr_pages) { pcpu_depopulate_chunk(chunk, rs, re); spin_lock_irq(&pcpu_lock); pcpu_chunk_depopulated(chunk, rs, re); @@ -1893,7 +1864,7 @@ static void pcpu_balance_workfn(struct work_struct *work) } for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { - int nr_unpop = 0, rs, re; + unsigned int nr_unpop = 0, rs, re; if (!nr_to_pop) break; @@ -1910,9 +1881,9 @@ static void pcpu_balance_workfn(struct work_struct *work) continue; /* @chunk can't go away while pcpu_alloc_mutex is held */ - pcpu_for_each_unpop_region(chunk->populated, rs, re, 0, - chunk->nr_pages) { - int nr = min(re - rs, nr_to_pop); + bitmap_for_each_clear_region(chunk->populated, rs, re, 0, + chunk->nr_pages) { + int nr = min_t(int, re - rs, nr_to_pop); ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); if (!ret) { -- 2.17.1
next prev parent reply index Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-11-20 21:50 [PATCH v3 00/22] btrfs: async discard support Dennis Zhou 2019-11-20 21:51 ` Dennis Zhou [this message] 2019-11-20 21:51 ` [PATCH 02/22] btrfs: rename DISCARD opt to DISCARD_SYNC Dennis Zhou 2019-11-20 21:51 ` [PATCH 03/22] btrfs: keep track of which extents have been discarded Dennis Zhou 2019-11-20 21:51 ` [PATCH 04/22] btrfs: keep track of cleanliness of the bitmap Dennis Zhou 2019-11-23 0:17 ` kbuild test robot 2019-11-25 13:59 ` David Sterba 2019-11-25 19:37 ` Dennis Zhou 2019-11-20 21:51 ` [PATCH 05/22] btrfs: add the beginning of async discard, discard workqueue Dennis Zhou 2019-11-20 21:51 ` [PATCH 06/22] btrfs: handle empty block_group removal Dennis Zhou 2019-11-20 21:51 ` [PATCH 07/22] btrfs: discard one region at a time in async discard Dennis Zhou 2019-11-20 21:51 ` [PATCH 08/22] btrfs: add removal calls for sysfs debug/ Dennis Zhou 2019-11-20 21:51 ` [PATCH 09/22] btrfs: make UUID/debug have its own kobject Dennis Zhou 2019-11-20 21:51 ` [PATCH 10/22] btrfs: add discard sysfs directory Dennis Zhou 2019-11-20 21:51 ` [PATCH 11/22] btrfs: track discardable extents for async discard Dennis Zhou 2019-11-20 21:51 ` [PATCH 12/22] btrfs: keep track of discardable_bytes Dennis Zhou 2019-11-20 21:51 ` [PATCH 13/22] btrfs: calculate discard delay based on number of extents Dennis Zhou 2019-11-20 21:51 ` [PATCH 14/22] btrfs: add bps discard rate limit Dennis Zhou 2019-11-20 21:51 ` [PATCH 15/22] btrfs: limit max discard size for async discard Dennis Zhou 2019-11-20 21:51 ` [PATCH 16/22] btrfs: make max async discard size tunable Dennis Zhou 2019-11-20 21:51 ` [PATCH 17/22] btrfs: have multiple discard lists Dennis Zhou 2019-11-20 21:51 ` [PATCH 18/22] btrfs: only keep track of data extents for async discard Dennis Zhou 2019-11-20 21:51 ` [PATCH 19/22] btrfs: keep track of discard reuse stats Dennis Zhou 2019-11-20 21:51 ` [PATCH 20/22] btrfs: add async discard header Dennis Zhou 2019-11-20 21:51 ` [PATCH 21/22] btrfs: increase the metadata allowance for the free_space_cache Dennis Zhou 2019-11-20 21:51 ` [PATCH 22/22] btrfs: make smaller extents more likely to go into bitmaps Dennis Zhou -- strict thread matches above, loose matches on Subject: below -- 2019-12-14 0:22 [PATCH v6 00/22] btrfs: async discard support Dennis Zhou 2019-12-14 0:22 ` [PATCH 01/22] bitmap: genericize percpu bitmap region iterators Dennis Zhou 2019-12-09 19:45 [PATCH v5 00/22] btrfs: async discard support Dennis Zhou 2019-12-09 19:45 ` [PATCH 01/22] bitmap: genericize percpu bitmap region iterators Dennis Zhou 2019-11-25 19:46 [PATCH v4 00/22] btrfs: async discard support Dennis Zhou 2019-11-25 19:46 ` [PATCH 01/22] bitmap: genericize percpu bitmap region iterators Dennis Zhou 2019-10-23 22:52 [PATCH v2 00/22] btrfs: async discard support Dennis Zhou 2019-10-23 22:52 ` [PATCH 01/22] bitmap: genericize percpu bitmap region iterators Dennis Zhou
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=c2fec8d29be07ac70bb85dbac5fa10c259fa21f9.1574282259.git.dennis@kernel.org \ --to=dennis@kernel.org \ --cc=clm@fb.com \ --cc=dsterba@suse.com \ --cc=josef@toxicpanda.com \ --cc=kernel-team@fb.com \ --cc=linux-btrfs@vger.kernel.org \ --cc=osandov@osandov.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
Linux-BTRFS Archive on lore.kernel.org Archives are clonable: git clone --mirror https://lore.kernel.org/linux-btrfs/0 linux-btrfs/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 linux-btrfs linux-btrfs/ https://lore.kernel.org/linux-btrfs \ linux-btrfs@vger.kernel.org public-inbox-index linux-btrfs Example config snippet for mirrors Newsgroup available over NNTP: nntp://nntp.lore.kernel.org/org.kernel.vger.linux-btrfs AGPL code for this site: git clone https://public-inbox.org/public-inbox.git