From mboxrd@z Thu Jan 1 00:00:00 1970 From: Mike Rapoport Date: Sun, 02 Aug 2020 16:35:55 +0000 Subject: [PATCH v2 11/17] arch, mm: replace for_each_memblock() with for_each_mem_pfn_range() Message-Id: <20200802163601.8189-12-rppt@kernel.org> List-Id: References: <20200802163601.8189-1-rppt@kernel.org> In-Reply-To: <20200802163601.8189-1-rppt@kernel.org> MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: Andrew Morton Cc: Andy Lutomirski , Baoquan He , Benjamin Herrenschmidt , Borislav Petkov , Catalin Marinas , Christoph Hellwig , Dave Hansen , Emil Renner Berthing , Ingo Molnar , Hari Bathini , Marek Szyprowski , Max Filippov , Michael Ellerman , Michal Simek , Mike Rapoport , Mike Rapoport , Palmer Dabbelt , Paul Mackerras , Paul Walmsley , Peter Zijlstra From: Mike Rapoport There are several occurrences of the following pattern: for_each_memblock(memory, reg) { start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); /* do something with start_pfn and end_pfn */ } Rather than iterate over all memblock.memory regions and each time query for their start and end PFNs, use for_each_mem_pfn_range() iterator to get simpler and clearer code. Signed-off-by: Mike Rapoport --- arch/arm/mm/init.c | 11 ++++------- arch/arm64/mm/init.c | 11 ++++------- arch/powerpc/kernel/fadump.c | 11 ++++++----- arch/powerpc/mm/mem.c | 15 ++++++++------- arch/powerpc/mm/numa.c | 7 ++----- arch/s390/mm/page-states.c | 6 ++---- arch/sh/mm/init.c | 9 +++------ mm/memblock.c | 6 ++---- mm/sparse.c | 10 ++++------ 9 files changed, 35 insertions(+), 51 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 626af348eb8f..d630573277d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -304,16 +304,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned long start, end, prev_end = 0; + int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist @@ -341,8 +339,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(memblock_region_memory_end_pfn(reg), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc7c47a..291b5805457d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -473,12 +473,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); + unsigned long start, end, prev_end = 0; + int i; + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due @@ -498,8 +496,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 78ab9a6ee6ac..fc85cbc66839 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1216,14 +1216,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { - u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..1364dd532107 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -192,15 +192,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; + unsigned long spfn, epfn, prev = 0; + int i; - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..bae2d9edd52c 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -800,17 +800,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc141893d028..567c69f3069e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) void __init cmma_init_nodat(void) { - struct memblock_region *reg; struct page *page; unsigned long start, end, ix; + int i; if (cmma_flag < 2) return; @@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) mark_kernel_pgd(); /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - end = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { page = pfn_to_page(start); for (ix = start; ix < end; ix++, page++) { if (__test_and_clear_bit(PG_arch_1, &page->flags)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 62b8f03ffc80..586ea500dcc7 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -224,15 +224,12 @@ void __init allocate_pgdat(unsigned int nid) static void __init do_init_bootmem(void) { - struct memblock_region *reg; + unsigned long start_pfn, end_pfn; + int i; /* Add active regions with valid PFNs. */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) __add_active_range(0, start_pfn, end_pfn); - } /* All of system RAM sits in node 0 for the non-NUMA case */ allocate_pgdat(0); diff --git a/mm/memblock.c b/mm/memblock.c index 824938849f6d..c1a4c8798973 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1659,12 +1659,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void) phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) { unsigned long pages = 0; - struct memblock_region *r; unsigned long start_pfn, end_pfn; + int i; - for_each_memblock(memory, r) { - start_pfn = memblock_region_memory_base_pfn(r); - end_pfn = memblock_region_memory_end_pfn(r); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn = min_t(unsigned long, start_pfn, limit_pfn); end_pfn = min_t(unsigned long, end_pfn, limit_pfn); pages += end_pfn - start_pfn; diff --git a/mm/sparse.c b/mm/sparse.c index b2b9a3e34696..c2ba412a3141 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -292,13 +292,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) */ void __init memblocks_present(void) { - struct memblock_region *reg; + unsigned long start, end; + int i, nid; - for_each_memblock(memory, reg) { - memory_present(memblock_get_region_node(reg), - memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) + memory_present(nid, start, end); } /* -- 2.26.2 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-13.1 required=3.0 tests=BAYES_00,DKIMWL_WL_HIGH, DKIM_SIGNED,DKIM_VALID,DKIM_VALID_AU,INCLUDES_PATCH,MAILING_LIST_MULTI, SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id CB710C433E3 for ; Sun, 2 Aug 2020 16:38:19 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id A21D220885 for ; Sun, 2 Aug 2020 16:38:19 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1596386299; bh=ZSm6d6pynDsYaspcmFkA9i9QoTs5yDO6OGxrC7T3jZs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=JWgOSDoHCBNaZR4IQNoFmAn4BTcf/zVgc7MRJvKBf9VzisRhDOFcf35WIKDF4OXFb lEwtz+dE0toyFNIeVwLnjK25PLOp0NIOgih/tkFJ9hheyKryWIM4jCJPHhF8FTDrlp tPy6HLIo+Sbm2tmMOXlOnr1NFnomj46NR2U4kF1g= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727864AbgHBQiS (ORCPT ); Sun, 2 Aug 2020 12:38:18 -0400 Received: from mail.kernel.org ([198.145.29.99]:47952 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725768AbgHBQiR (ORCPT ); Sun, 2 Aug 2020 12:38:17 -0400 Received: from aquarius.haifa.ibm.com (nesher1.haifa.il.ibm.com [195.110.40.7]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id A660620738; Sun, 2 Aug 2020 16:38:05 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1596386296; bh=ZSm6d6pynDsYaspcmFkA9i9QoTs5yDO6OGxrC7T3jZs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=wlz0jLSt/yYcm77PhcOHueJKHmXGUdcAwt5zWgDfghUZQqofxnmBbVGi+oCHfmwIN SvA7WmPU2Aaoo8BT9DtKHLs4vlJxytzaJyGC2Et8AJJq3m2NzkXqZUhldg9blFC8ZG +G9gvYl9Rq/waJcSbTeR3DYHP02mbGksAohjl2as= From: Mike Rapoport To: Andrew Morton Cc: Andy Lutomirski , Baoquan He , Benjamin Herrenschmidt , Borislav Petkov , Catalin Marinas , Christoph Hellwig , Dave Hansen , Emil Renner Berthing , Ingo Molnar , Hari Bathini , Marek Szyprowski , Max Filippov , Michael Ellerman , Michal Simek , Mike Rapoport , Mike Rapoport , Palmer Dabbelt , Paul Mackerras , Paul Walmsley , Peter Zijlstra , Russell King , Stafford Horne , Thomas Gleixner , Will Deacon , Yoshinori Sato , clang-built-linux@googlegroups.com, iommu@lists.linux-foundation.org, linux-arch@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-c6x-dev@linux-c6x.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-mm@kvack.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, linux-sh@vger.kernel.org, linux-xtensa@linux-xtensa.org, linuxppc-dev@lists.ozlabs.org, openrisc@lists.librecores.org, sparclinux@vger.kernel.org, uclinux-h8-devel@lists.sourceforge.jp, x86@kernel.org Subject: [PATCH v2 11/17] arch, mm: replace for_each_memblock() with for_each_mem_pfn_range() Date: Sun, 2 Aug 2020 19:35:55 +0300 Message-Id: <20200802163601.8189-12-rppt@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200802163601.8189-1-rppt@kernel.org> References: <20200802163601.8189-1-rppt@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Mike Rapoport There are several occurrences of the following pattern: for_each_memblock(memory, reg) { start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); /* do something with start_pfn and end_pfn */ } Rather than iterate over all memblock.memory regions and each time query for their start and end PFNs, use for_each_mem_pfn_range() iterator to get simpler and clearer code. Signed-off-by: Mike Rapoport --- arch/arm/mm/init.c | 11 ++++------- arch/arm64/mm/init.c | 11 ++++------- arch/powerpc/kernel/fadump.c | 11 ++++++----- arch/powerpc/mm/mem.c | 15 ++++++++------- arch/powerpc/mm/numa.c | 7 ++----- arch/s390/mm/page-states.c | 6 ++---- arch/sh/mm/init.c | 9 +++------ mm/memblock.c | 6 ++---- mm/sparse.c | 10 ++++------ 9 files changed, 35 insertions(+), 51 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 626af348eb8f..d630573277d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -304,16 +304,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned long start, end, prev_end = 0; + int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist @@ -341,8 +339,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(memblock_region_memory_end_pfn(reg), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc7c47a..291b5805457d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -473,12 +473,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); + unsigned long start, end, prev_end = 0; + int i; + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due @@ -498,8 +496,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 78ab9a6ee6ac..fc85cbc66839 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1216,14 +1216,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { - u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..1364dd532107 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -192,15 +192,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; + unsigned long spfn, epfn, prev = 0; + int i; - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..bae2d9edd52c 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -800,17 +800,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc141893d028..567c69f3069e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) void __init cmma_init_nodat(void) { - struct memblock_region *reg; struct page *page; unsigned long start, end, ix; + int i; if (cmma_flag < 2) return; @@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) mark_kernel_pgd(); /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - end = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { page = pfn_to_page(start); for (ix = start; ix < end; ix++, page++) { if (__test_and_clear_bit(PG_arch_1, &page->flags)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 62b8f03ffc80..586ea500dcc7 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -224,15 +224,12 @@ void __init allocate_pgdat(unsigned int nid) static void __init do_init_bootmem(void) { - struct memblock_region *reg; + unsigned long start_pfn, end_pfn; + int i; /* Add active regions with valid PFNs. */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) __add_active_range(0, start_pfn, end_pfn); - } /* All of system RAM sits in node 0 for the non-NUMA case */ allocate_pgdat(0); diff --git a/mm/memblock.c b/mm/memblock.c index 824938849f6d..c1a4c8798973 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1659,12 +1659,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void) phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) { unsigned long pages = 0; - struct memblock_region *r; unsigned long start_pfn, end_pfn; + int i; - for_each_memblock(memory, r) { - start_pfn = memblock_region_memory_base_pfn(r); - end_pfn = memblock_region_memory_end_pfn(r); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn = min_t(unsigned long, start_pfn, limit_pfn); end_pfn = min_t(unsigned long, end_pfn, limit_pfn); pages += end_pfn - start_pfn; diff --git a/mm/sparse.c b/mm/sparse.c index b2b9a3e34696..c2ba412a3141 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -292,13 +292,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) */ void __init memblocks_present(void) { - struct memblock_region *reg; + unsigned long start, end; + int i, nid; - for_each_memblock(memory, reg) { - memory_present(memblock_get_region_node(reg), - memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) + memory_present(nid, start, end); } /* -- 2.26.2 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Mike Rapoport Subject: [PATCH v2 11/17] arch, mm: replace for_each_memblock() with for_each_mem_pfn_range() Date: Sun, 2 Aug 2020 19:35:55 +0300 Message-ID: <20200802163601.8189-12-rppt@kernel.org> References: <20200802163601.8189-1-rppt@kernel.org> Mime-Version: 1.0 Content-Transfer-Encoding: 8bit Return-path: In-Reply-To: <20200802163601.8189-1-rppt@kernel.org> Sender: linux-mips-owner@vger.kernel.org To: Andrew Morton Cc: Andy Lutomirski , Baoquan He , Benjamin Herrenschmidt , Borislav Petkov , Catalin Marinas , Christoph Hellwig , Dave Hansen , Emil Renner Berthing , Ingo Molnar , Hari Bathini , Marek Szyprowski , Max Filippov , Michael Ellerman , Michal Simek , Mike Rapoport , Mike Rapoport , Palmer Dabbelt , Paul Mackerras , Paul Walmsley , Peter Zijlstra List-Id: linux-arch.vger.kernel.org From: Mike Rapoport There are several occurrences of the following pattern: for_each_memblock(memory, reg) { start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); /* do something with start_pfn and end_pfn */ } Rather than iterate over all memblock.memory regions and each time query for their start and end PFNs, use for_each_mem_pfn_range() iterator to get simpler and clearer code. Signed-off-by: Mike Rapoport --- arch/arm/mm/init.c | 11 ++++------- arch/arm64/mm/init.c | 11 ++++------- arch/powerpc/kernel/fadump.c | 11 ++++++----- arch/powerpc/mm/mem.c | 15 ++++++++------- arch/powerpc/mm/numa.c | 7 ++----- arch/s390/mm/page-states.c | 6 ++---- arch/sh/mm/init.c | 9 +++------ mm/memblock.c | 6 ++---- mm/sparse.c | 10 ++++------ 9 files changed, 35 insertions(+), 51 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 626af348eb8f..d630573277d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -304,16 +304,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned long start, end, prev_end = 0; + int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist @@ -341,8 +339,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(memblock_region_memory_end_pfn(reg), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc7c47a..291b5805457d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -473,12 +473,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); + unsigned long start, end, prev_end = 0; + int i; + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due @@ -498,8 +496,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 78ab9a6ee6ac..fc85cbc66839 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1216,14 +1216,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { - u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..1364dd532107 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -192,15 +192,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; + unsigned long spfn, epfn, prev = 0; + int i; - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..bae2d9edd52c 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -800,17 +800,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc141893d028..567c69f3069e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) void __init cmma_init_nodat(void) { - struct memblock_region *reg; struct page *page; unsigned long start, end, ix; + int i; if (cmma_flag < 2) return; @@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) mark_kernel_pgd(); /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - end = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { page = pfn_to_page(start); for (ix = start; ix < end; ix++, page++) { if (__test_and_clear_bit(PG_arch_1, &page->flags)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 62b8f03ffc80..586ea500dcc7 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -224,15 +224,12 @@ void __init allocate_pgdat(unsigned int nid) static void __init do_init_bootmem(void) { - struct memblock_region *reg; + unsigned long start_pfn, end_pfn; + int i; /* Add active regions with valid PFNs. */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) __add_active_range(0, start_pfn, end_pfn); - } /* All of system RAM sits in node 0 for the non-NUMA case */ allocate_pgdat(0); diff --git a/mm/memblock.c b/mm/memblock.c index 824938849f6d..c1a4c8798973 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1659,12 +1659,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void) phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) { unsigned long pages = 0; - struct memblock_region *r; unsigned long start_pfn, end_pfn; + int i; - for_each_memblock(memory, r) { - start_pfn = memblock_region_memory_base_pfn(r); - end_pfn = memblock_region_memory_end_pfn(r); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn = min_t(unsigned long, start_pfn, limit_pfn); end_pfn = min_t(unsigned long, end_pfn, limit_pfn); pages += end_pfn - start_pfn; diff --git a/mm/sparse.c b/mm/sparse.c index b2b9a3e34696..c2ba412a3141 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -292,13 +292,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) */ void __init memblocks_present(void) { - struct memblock_region *reg; + unsigned long start, end; + int i, nid; - for_each_memblock(memory, reg) { - memory_present(memblock_get_region_node(reg), - memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) + memory_present(nid, start, end); } /* -- 2.26.2 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-13.0 required=3.0 tests=BAYES_00,DKIMWL_WL_HIGH, DKIM_SIGNED,DKIM_VALID,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id D1752C433DF for ; Sun, 2 Aug 2020 16:39:27 +0000 (UTC) Received: from merlin.infradead.org (merlin.infradead.org [205.233.59.134]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 8A4A420829 for ; Sun, 2 Aug 2020 16:39:27 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (2048-bit key) header.d=lists.infradead.org header.i=@lists.infradead.org header.b="Lf5vDalM"; dkim=fail reason="signature verification failed" (1024-bit key) header.d=kernel.org header.i=@kernel.org header.b="wlz0jLSt" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 8A4A420829 Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=kernel.org Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=linux-riscv-bounces+linux-riscv=archiver.kernel.org@lists.infradead.org DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=merlin.20170209; h=Sender:Content-Transfer-Encoding: Content-Type:Cc:List-Subscribe:List-Help:List-Post:List-Archive: List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:Message-Id:Date: Subject:To:From:Reply-To:Content-ID:Content-Description:Resent-Date: Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Owner; bh=IY67JmryzE0zuzNHdTe2vQ5nBt4xpfzb+kpUk5qmEsQ=; b=Lf5vDalM24PUt0J7FJztAgAGr UDd83Nx5NJvC3adTkFMv9KM6Cjbzu6h9v0UVTDaQawZI+V36StZ6OPFgN7vhXFLkpHmvRuhXc9KC7 0oQhBwf14qbdnX59HfxdLwmOKJoPIRrvNbcQRhdjVaIewut07wKNsVg2EiuUBB27REBkSgx0dcn9p MItoHQQPCQ/nFkZUrLNKFUTofUYyDH9JbOuxqJr9st7fNbKl2A5AzM/Wc6GaBaRAGvo+K4jBFwdS7 3VZikvjo99p8Mvs3i1VCOKotQGtduhJ/SBeFW0lhONt20x0YXr039hbSjjtRvQN3d58d5zim+WneH mX2Qa0TYg==; Received: from localhost ([::1] helo=merlin.infradead.org) by merlin.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux)) id 1k2H0r-0008W5-HB; Sun, 02 Aug 2020 16:39:05 +0000 Received: from mail.kernel.org ([198.145.29.99]) by merlin.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux)) id 1k2H04-0007xx-NG; Sun, 02 Aug 2020 16:38:18 +0000 Received: from aquarius.haifa.ibm.com (nesher1.haifa.il.ibm.com [195.110.40.7]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id A660620738; Sun, 2 Aug 2020 16:38:05 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1596386296; bh=ZSm6d6pynDsYaspcmFkA9i9QoTs5yDO6OGxrC7T3jZs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=wlz0jLSt/yYcm77PhcOHueJKHmXGUdcAwt5zWgDfghUZQqofxnmBbVGi+oCHfmwIN SvA7WmPU2Aaoo8BT9DtKHLs4vlJxytzaJyGC2Et8AJJq3m2NzkXqZUhldg9blFC8ZG +G9gvYl9Rq/waJcSbTeR3DYHP02mbGksAohjl2as= From: Mike Rapoport To: Andrew Morton Subject: [PATCH v2 11/17] arch, mm: replace for_each_memblock() with for_each_mem_pfn_range() Date: Sun, 2 Aug 2020 19:35:55 +0300 Message-Id: <20200802163601.8189-12-rppt@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200802163601.8189-1-rppt@kernel.org> References: <20200802163601.8189-1-rppt@kernel.org> MIME-Version: 1.0 X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20200802_123817_009681_C837663B X-CRM114-Status: GOOD ( 20.97 ) X-BeenThere: linux-riscv@lists.infradead.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Thomas Gleixner , Emil Renner Berthing , linux-sh@vger.kernel.org, Peter Zijlstra , Benjamin Herrenschmidt , Dave Hansen , linux-mips@vger.kernel.org, Max Filippov , Paul Mackerras , sparclinux@vger.kernel.org, linux-riscv@lists.infradead.org, Will Deacon , Christoph Hellwig , Marek Szyprowski , linux-arch@vger.kernel.org, linux-s390@vger.kernel.org, linux-c6x-dev@linux-c6x.org, Baoquan He , Michael Ellerman , x86@kernel.org, Russell King , Mike Rapoport , clang-built-linux@googlegroups.com, Ingo Molnar , linux-arm-kernel@lists.infradead.org, Catalin Marinas , uclinux-h8-devel@lists.sourceforge.jp, linux-xtensa@linux-xtensa.org, openrisc@lists.librecores.org, Borislav Petkov , Andy Lutomirski , Paul Walmsley , Stafford Horne , Hari Bathini , Michal Simek , Yoshinori Sato , linux-mm@kvack.org, linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org, Palmer Dabbelt , linuxppc-dev@lists.ozlabs.org, Mike Rapoport Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "linux-riscv" Errors-To: linux-riscv-bounces+linux-riscv=archiver.kernel.org@lists.infradead.org From: Mike Rapoport There are several occurrences of the following pattern: for_each_memblock(memory, reg) { start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); /* do something with start_pfn and end_pfn */ } Rather than iterate over all memblock.memory regions and each time query for their start and end PFNs, use for_each_mem_pfn_range() iterator to get simpler and clearer code. Signed-off-by: Mike Rapoport --- arch/arm/mm/init.c | 11 ++++------- arch/arm64/mm/init.c | 11 ++++------- arch/powerpc/kernel/fadump.c | 11 ++++++----- arch/powerpc/mm/mem.c | 15 ++++++++------- arch/powerpc/mm/numa.c | 7 ++----- arch/s390/mm/page-states.c | 6 ++---- arch/sh/mm/init.c | 9 +++------ mm/memblock.c | 6 ++---- mm/sparse.c | 10 ++++------ 9 files changed, 35 insertions(+), 51 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 626af348eb8f..d630573277d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -304,16 +304,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned long start, end, prev_end = 0; + int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist @@ -341,8 +339,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(memblock_region_memory_end_pfn(reg), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc7c47a..291b5805457d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -473,12 +473,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); + unsigned long start, end, prev_end = 0; + int i; + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due @@ -498,8 +496,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 78ab9a6ee6ac..fc85cbc66839 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1216,14 +1216,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { - u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..1364dd532107 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -192,15 +192,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; + unsigned long spfn, epfn, prev = 0; + int i; - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..bae2d9edd52c 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -800,17 +800,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc141893d028..567c69f3069e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) void __init cmma_init_nodat(void) { - struct memblock_region *reg; struct page *page; unsigned long start, end, ix; + int i; if (cmma_flag < 2) return; @@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) mark_kernel_pgd(); /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - end = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { page = pfn_to_page(start); for (ix = start; ix < end; ix++, page++) { if (__test_and_clear_bit(PG_arch_1, &page->flags)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 62b8f03ffc80..586ea500dcc7 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -224,15 +224,12 @@ void __init allocate_pgdat(unsigned int nid) static void __init do_init_bootmem(void) { - struct memblock_region *reg; + unsigned long start_pfn, end_pfn; + int i; /* Add active regions with valid PFNs. */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) __add_active_range(0, start_pfn, end_pfn); - } /* All of system RAM sits in node 0 for the non-NUMA case */ allocate_pgdat(0); diff --git a/mm/memblock.c b/mm/memblock.c index 824938849f6d..c1a4c8798973 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1659,12 +1659,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void) phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) { unsigned long pages = 0; - struct memblock_region *r; unsigned long start_pfn, end_pfn; + int i; - for_each_memblock(memory, r) { - start_pfn = memblock_region_memory_base_pfn(r); - end_pfn = memblock_region_memory_end_pfn(r); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn = min_t(unsigned long, start_pfn, limit_pfn); end_pfn = min_t(unsigned long, end_pfn, limit_pfn); pages += end_pfn - start_pfn; diff --git a/mm/sparse.c b/mm/sparse.c index b2b9a3e34696..c2ba412a3141 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -292,13 +292,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) */ void __init memblocks_present(void) { - struct memblock_region *reg; + unsigned long start, end; + int i, nid; - for_each_memblock(memory, reg) { - memory_present(memblock_get_region_node(reg), - memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) + memory_present(nid, start, end); } /* -- 2.26.2 _______________________________________________ linux-riscv mailing list linux-riscv@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-riscv From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-12.8 required=3.0 tests=BAYES_00,DKIM_INVALID, DKIM_SIGNED,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE, SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1B3E0C433DF for ; Sun, 2 Aug 2020 17:01:46 +0000 (UTC) Received: from lists.ozlabs.org (lists.ozlabs.org [203.11.71.2]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id BD996206E7 for ; Sun, 2 Aug 2020 17:01:45 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=fail reason="signature verification failed" (1024-bit key) header.d=kernel.org header.i=@kernel.org header.b="wlz0jLSt" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org BD996206E7 Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=kernel.org Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=linuxppc-dev-bounces+linuxppc-dev=archiver.kernel.org@lists.ozlabs.org Received: from bilbo.ozlabs.org (lists.ozlabs.org [IPv6:2401:3900:2:1::3]) by lists.ozlabs.org (Postfix) with ESMTP id 4BKS3W5lTdzDqNx for ; Mon, 3 Aug 2020 03:01:43 +1000 (AEST) Authentication-Results: lists.ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=kernel.org (client-ip=198.145.29.99; helo=mail.kernel.org; envelope-from=rppt@kernel.org; receiver=) Authentication-Results: lists.ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: lists.ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=wlz0jLSt; dkim-atps=neutral Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by lists.ozlabs.org (Postfix) with ESMTPS id 4BKRXV3WWkzDqQM for ; Mon, 3 Aug 2020 02:38:18 +1000 (AEST) Received: from aquarius.haifa.ibm.com (nesher1.haifa.il.ibm.com [195.110.40.7]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id A660620738; Sun, 2 Aug 2020 16:38:05 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1596386296; bh=ZSm6d6pynDsYaspcmFkA9i9QoTs5yDO6OGxrC7T3jZs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=wlz0jLSt/yYcm77PhcOHueJKHmXGUdcAwt5zWgDfghUZQqofxnmBbVGi+oCHfmwIN SvA7WmPU2Aaoo8BT9DtKHLs4vlJxytzaJyGC2Et8AJJq3m2NzkXqZUhldg9blFC8ZG +G9gvYl9Rq/waJcSbTeR3DYHP02mbGksAohjl2as= From: Mike Rapoport To: Andrew Morton Subject: [PATCH v2 11/17] arch, mm: replace for_each_memblock() with for_each_mem_pfn_range() Date: Sun, 2 Aug 2020 19:35:55 +0300 Message-Id: <20200802163601.8189-12-rppt@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200802163601.8189-1-rppt@kernel.org> References: <20200802163601.8189-1-rppt@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: linuxppc-dev@lists.ozlabs.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Linux on PowerPC Developers Mail List List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Thomas Gleixner , Emil Renner Berthing , linux-sh@vger.kernel.org, Peter Zijlstra , Dave Hansen , linux-mips@vger.kernel.org, Max Filippov , Paul Mackerras , sparclinux@vger.kernel.org, linux-riscv@lists.infradead.org, Will Deacon , Christoph Hellwig , Marek Szyprowski , linux-arch@vger.kernel.org, linux-s390@vger.kernel.org, linux-c6x-dev@linux-c6x.org, Baoquan He , x86@kernel.org, Russell King , Mike Rapoport , clang-built-linux@googlegroups.com, Ingo Molnar , linux-arm-kernel@lists.infradead.org, Catalin Marinas , uclinux-h8-devel@lists.sourceforge.jp, linux-xtensa@linux-xtensa.org, openrisc@lists.librecores.org, Borislav Petkov , Andy Lutomirski , Paul Walmsley , Stafford Horne , Hari Bathini , Michal Simek , Yoshinori Sato , linux-mm@kvack.org, linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org, Palmer Dabbelt , linuxppc-dev@lists.ozlabs.org, Mike Rapoport Errors-To: linuxppc-dev-bounces+linuxppc-dev=archiver.kernel.org@lists.ozlabs.org Sender: "Linuxppc-dev" From: Mike Rapoport There are several occurrences of the following pattern: for_each_memblock(memory, reg) { start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); /* do something with start_pfn and end_pfn */ } Rather than iterate over all memblock.memory regions and each time query for their start and end PFNs, use for_each_mem_pfn_range() iterator to get simpler and clearer code. Signed-off-by: Mike Rapoport --- arch/arm/mm/init.c | 11 ++++------- arch/arm64/mm/init.c | 11 ++++------- arch/powerpc/kernel/fadump.c | 11 ++++++----- arch/powerpc/mm/mem.c | 15 ++++++++------- arch/powerpc/mm/numa.c | 7 ++----- arch/s390/mm/page-states.c | 6 ++---- arch/sh/mm/init.c | 9 +++------ mm/memblock.c | 6 ++---- mm/sparse.c | 10 ++++------ 9 files changed, 35 insertions(+), 51 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 626af348eb8f..d630573277d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -304,16 +304,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned long start, end, prev_end = 0; + int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist @@ -341,8 +339,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(memblock_region_memory_end_pfn(reg), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc7c47a..291b5805457d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -473,12 +473,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); + unsigned long start, end, prev_end = 0; + int i; + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due @@ -498,8 +496,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 78ab9a6ee6ac..fc85cbc66839 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1216,14 +1216,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { - u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..1364dd532107 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -192,15 +192,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; + unsigned long spfn, epfn, prev = 0; + int i; - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..bae2d9edd52c 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -800,17 +800,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc141893d028..567c69f3069e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) void __init cmma_init_nodat(void) { - struct memblock_region *reg; struct page *page; unsigned long start, end, ix; + int i; if (cmma_flag < 2) return; @@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) mark_kernel_pgd(); /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - end = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { page = pfn_to_page(start); for (ix = start; ix < end; ix++, page++) { if (__test_and_clear_bit(PG_arch_1, &page->flags)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 62b8f03ffc80..586ea500dcc7 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -224,15 +224,12 @@ void __init allocate_pgdat(unsigned int nid) static void __init do_init_bootmem(void) { - struct memblock_region *reg; + unsigned long start_pfn, end_pfn; + int i; /* Add active regions with valid PFNs. */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) __add_active_range(0, start_pfn, end_pfn); - } /* All of system RAM sits in node 0 for the non-NUMA case */ allocate_pgdat(0); diff --git a/mm/memblock.c b/mm/memblock.c index 824938849f6d..c1a4c8798973 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1659,12 +1659,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void) phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) { unsigned long pages = 0; - struct memblock_region *r; unsigned long start_pfn, end_pfn; + int i; - for_each_memblock(memory, r) { - start_pfn = memblock_region_memory_base_pfn(r); - end_pfn = memblock_region_memory_end_pfn(r); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn = min_t(unsigned long, start_pfn, limit_pfn); end_pfn = min_t(unsigned long, end_pfn, limit_pfn); pages += end_pfn - start_pfn; diff --git a/mm/sparse.c b/mm/sparse.c index b2b9a3e34696..c2ba412a3141 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -292,13 +292,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) */ void __init memblocks_present(void) { - struct memblock_region *reg; + unsigned long start, end; + int i, nid; - for_each_memblock(memory, reg) { - memory_present(memblock_get_region_node(reg), - memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) + memory_present(nid, start, end); } /* -- 2.26.2 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-12.8 required=3.0 tests=BAYES_00,DKIM_INVALID, DKIM_SIGNED,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE, SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 6E048C433E1 for ; Sun, 2 Aug 2020 16:38:19 +0000 (UTC) Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 4270820885 for ; Sun, 2 Aug 2020 16:38:19 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=fail reason="signature verification failed" (1024-bit key) header.d=kernel.org header.i=@kernel.org header.b="wlz0jLSt" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 4270820885 Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=kernel.org Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=iommu-bounces@lists.linux-foundation.org Received: from localhost (localhost [127.0.0.1]) by hemlock.osuosl.org (Postfix) with ESMTP id 0B90C87DCE; Sun, 2 Aug 2020 16:38:19 +0000 (UTC) X-Virus-Scanned: amavisd-new at osuosl.org Received: from hemlock.osuosl.org ([127.0.0.1]) by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id B+e62SjAI99Q; Sun, 2 Aug 2020 16:38:18 +0000 (UTC) Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56]) by hemlock.osuosl.org (Postfix) with ESMTP id 243EF87D9C; Sun, 2 Aug 2020 16:38:18 +0000 (UTC) Received: from lf-lists.osuosl.org (localhost [127.0.0.1]) by lists.linuxfoundation.org (Postfix) with ESMTP id 0F2FDC0051; Sun, 2 Aug 2020 16:38:18 +0000 (UTC) Received: from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138]) by lists.linuxfoundation.org (Postfix) with ESMTP id 15587C004C for ; Sun, 2 Aug 2020 16:38:17 +0000 (UTC) Received: from localhost (localhost [127.0.0.1]) by whitealder.osuosl.org (Postfix) with ESMTP id 02C98865D3 for ; Sun, 2 Aug 2020 16:38:17 +0000 (UTC) X-Virus-Scanned: amavisd-new at osuosl.org Received: from whitealder.osuosl.org ([127.0.0.1]) by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id rmoFHq+r29ze for ; Sun, 2 Aug 2020 16:38:16 +0000 (UTC) X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by whitealder.osuosl.org (Postfix) with ESMTPS id 200BA86519 for ; Sun, 2 Aug 2020 16:38:16 +0000 (UTC) Received: from aquarius.haifa.ibm.com (nesher1.haifa.il.ibm.com [195.110.40.7]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id A660620738; Sun, 2 Aug 2020 16:38:05 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1596386296; bh=ZSm6d6pynDsYaspcmFkA9i9QoTs5yDO6OGxrC7T3jZs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=wlz0jLSt/yYcm77PhcOHueJKHmXGUdcAwt5zWgDfghUZQqofxnmBbVGi+oCHfmwIN SvA7WmPU2Aaoo8BT9DtKHLs4vlJxytzaJyGC2Et8AJJq3m2NzkXqZUhldg9blFC8ZG +G9gvYl9Rq/waJcSbTeR3DYHP02mbGksAohjl2as= From: Mike Rapoport To: Andrew Morton Subject: [PATCH v2 11/17] arch, mm: replace for_each_memblock() with for_each_mem_pfn_range() Date: Sun, 2 Aug 2020 19:35:55 +0300 Message-Id: <20200802163601.8189-12-rppt@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200802163601.8189-1-rppt@kernel.org> References: <20200802163601.8189-1-rppt@kernel.org> MIME-Version: 1.0 Cc: Thomas Gleixner , Emil Renner Berthing , linux-sh@vger.kernel.org, Peter Zijlstra , Benjamin Herrenschmidt , Dave Hansen , linux-mips@vger.kernel.org, Max Filippov , Paul Mackerras , sparclinux@vger.kernel.org, linux-riscv@lists.infradead.org, Will Deacon , Christoph Hellwig , linux-arch@vger.kernel.org, linux-s390@vger.kernel.org, linux-c6x-dev@linux-c6x.org, Michael Ellerman , x86@kernel.org, Russell King , Mike Rapoport , clang-built-linux@googlegroups.com, Ingo Molnar , linux-arm-kernel@lists.infradead.org, Catalin Marinas , uclinux-h8-devel@lists.sourceforge.jp, linux-xtensa@linux-xtensa.org, openrisc@lists.librecores.org, Borislav Petkov , Andy Lutomirski , Paul Walmsley , Stafford Horne , Hari Bathini , Michal Simek , Yoshinori Sato , linux-mm@kvack.org, linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org, Palmer Dabbelt , linuxppc-dev@lists.ozlabs.org, Mike Rapoport X-BeenThere: iommu@lists.linux-foundation.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Development issues for Linux IOMMU support List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: iommu-bounces@lists.linux-foundation.org Sender: "iommu" From: Mike Rapoport There are several occurrences of the following pattern: for_each_memblock(memory, reg) { start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); /* do something with start_pfn and end_pfn */ } Rather than iterate over all memblock.memory regions and each time query for their start and end PFNs, use for_each_mem_pfn_range() iterator to get simpler and clearer code. Signed-off-by: Mike Rapoport --- arch/arm/mm/init.c | 11 ++++------- arch/arm64/mm/init.c | 11 ++++------- arch/powerpc/kernel/fadump.c | 11 ++++++----- arch/powerpc/mm/mem.c | 15 ++++++++------- arch/powerpc/mm/numa.c | 7 ++----- arch/s390/mm/page-states.c | 6 ++---- arch/sh/mm/init.c | 9 +++------ mm/memblock.c | 6 ++---- mm/sparse.c | 10 ++++------ 9 files changed, 35 insertions(+), 51 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 626af348eb8f..d630573277d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -304,16 +304,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned long start, end, prev_end = 0; + int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist @@ -341,8 +339,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(memblock_region_memory_end_pfn(reg), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc7c47a..291b5805457d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -473,12 +473,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); + unsigned long start, end, prev_end = 0; + int i; + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due @@ -498,8 +496,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 78ab9a6ee6ac..fc85cbc66839 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1216,14 +1216,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { - u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..1364dd532107 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -192,15 +192,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; + unsigned long spfn, epfn, prev = 0; + int i; - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..bae2d9edd52c 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -800,17 +800,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc141893d028..567c69f3069e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) void __init cmma_init_nodat(void) { - struct memblock_region *reg; struct page *page; unsigned long start, end, ix; + int i; if (cmma_flag < 2) return; @@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) mark_kernel_pgd(); /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - end = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { page = pfn_to_page(start); for (ix = start; ix < end; ix++, page++) { if (__test_and_clear_bit(PG_arch_1, &page->flags)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 62b8f03ffc80..586ea500dcc7 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -224,15 +224,12 @@ void __init allocate_pgdat(unsigned int nid) static void __init do_init_bootmem(void) { - struct memblock_region *reg; + unsigned long start_pfn, end_pfn; + int i; /* Add active regions with valid PFNs. */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) __add_active_range(0, start_pfn, end_pfn); - } /* All of system RAM sits in node 0 for the non-NUMA case */ allocate_pgdat(0); diff --git a/mm/memblock.c b/mm/memblock.c index 824938849f6d..c1a4c8798973 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1659,12 +1659,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void) phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) { unsigned long pages = 0; - struct memblock_region *r; unsigned long start_pfn, end_pfn; + int i; - for_each_memblock(memory, r) { - start_pfn = memblock_region_memory_base_pfn(r); - end_pfn = memblock_region_memory_end_pfn(r); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn = min_t(unsigned long, start_pfn, limit_pfn); end_pfn = min_t(unsigned long, end_pfn, limit_pfn); pages += end_pfn - start_pfn; diff --git a/mm/sparse.c b/mm/sparse.c index b2b9a3e34696..c2ba412a3141 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -292,13 +292,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) */ void __init memblocks_present(void) { - struct memblock_region *reg; + unsigned long start, end; + int i, nid; - for_each_memblock(memory, reg) { - memory_present(memblock_get_region_node(reg), - memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) + memory_present(nid, start, end); } /* -- 2.26.2 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-13.0 required=3.0 tests=BAYES_00,DKIMWL_WL_HIGH, DKIM_SIGNED,DKIM_VALID,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id DB459C433E0 for ; Sun, 2 Aug 2020 16:40:44 +0000 (UTC) Received: from merlin.infradead.org (merlin.infradead.org [205.233.59.134]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 8B9E3207BB for ; Sun, 2 Aug 2020 16:40:44 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (2048-bit key) header.d=lists.infradead.org header.i=@lists.infradead.org header.b="Sl+M1H3H"; dkim=fail reason="signature verification failed" (1024-bit key) header.d=kernel.org header.i=@kernel.org header.b="wlz0jLSt" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 8B9E3207BB Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=kernel.org Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=linux-arm-kernel-bounces+linux-arm-kernel=archiver.kernel.org@lists.infradead.org DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=merlin.20170209; h=Sender:Content-Transfer-Encoding: Content-Type:Cc:List-Subscribe:List-Help:List-Post:List-Archive: List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:Message-Id:Date: Subject:To:From:Reply-To:Content-ID:Content-Description:Resent-Date: Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Owner; bh=T2DrzYj71Zk57HXEbArbyNolxEUiXaaag8LaemByh/s=; b=Sl+M1H3HvC8aYjqWOHaxxQWe1 iFuCYwq4q4DPE2vBA0qigTmT43DTEw3PAr7T6zGIO8evg3ntiIbb2BEYGLESpqPPdEiW9rlnP1zBF iZnEfVOE6pFnpBEOOEdPilt1k8FGoOLvNxD5KB9tBXr64vhIDOyT4qkqV1rEV9X7QqJBQkRxR6mnv c2q8V6ttq3mYShwYGPamn9dg7bnvrMb3oGh8975QCBMI4jCXIOOWewD2hyE6n7AMx55YCPJf2/ZeR EoXgYNTLbXH5rb7vqGtO1VIAHDafTi+50y4jnKx4GC1Fwvw2RCyQ9lxjUhKvdoxIkhBz3YiUB/yjV PPl/5vu4w==; Received: from localhost ([::1] helo=merlin.infradead.org) by merlin.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux)) id 1k2H0a-0008Ft-1F; Sun, 02 Aug 2020 16:38:48 +0000 Received: from mail.kernel.org ([198.145.29.99]) by merlin.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux)) id 1k2H04-0007xx-NG; Sun, 02 Aug 2020 16:38:18 +0000 Received: from aquarius.haifa.ibm.com (nesher1.haifa.il.ibm.com [195.110.40.7]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id A660620738; Sun, 2 Aug 2020 16:38:05 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1596386296; bh=ZSm6d6pynDsYaspcmFkA9i9QoTs5yDO6OGxrC7T3jZs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=wlz0jLSt/yYcm77PhcOHueJKHmXGUdcAwt5zWgDfghUZQqofxnmBbVGi+oCHfmwIN SvA7WmPU2Aaoo8BT9DtKHLs4vlJxytzaJyGC2Et8AJJq3m2NzkXqZUhldg9blFC8ZG +G9gvYl9Rq/waJcSbTeR3DYHP02mbGksAohjl2as= From: Mike Rapoport To: Andrew Morton Subject: [PATCH v2 11/17] arch, mm: replace for_each_memblock() with for_each_mem_pfn_range() Date: Sun, 2 Aug 2020 19:35:55 +0300 Message-Id: <20200802163601.8189-12-rppt@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200802163601.8189-1-rppt@kernel.org> References: <20200802163601.8189-1-rppt@kernel.org> MIME-Version: 1.0 X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20200802_123817_009681_C837663B X-CRM114-Status: GOOD ( 20.97 ) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Thomas Gleixner , Emil Renner Berthing , linux-sh@vger.kernel.org, Peter Zijlstra , Benjamin Herrenschmidt , Dave Hansen , linux-mips@vger.kernel.org, Max Filippov , Paul Mackerras , sparclinux@vger.kernel.org, linux-riscv@lists.infradead.org, Will Deacon , Christoph Hellwig , Marek Szyprowski , linux-arch@vger.kernel.org, linux-s390@vger.kernel.org, linux-c6x-dev@linux-c6x.org, Baoquan He , Michael Ellerman , x86@kernel.org, Russell King , Mike Rapoport , clang-built-linux@googlegroups.com, Ingo Molnar , linux-arm-kernel@lists.infradead.org, Catalin Marinas , uclinux-h8-devel@lists.sourceforge.jp, linux-xtensa@linux-xtensa.org, openrisc@lists.librecores.org, Borislav Petkov , Andy Lutomirski , Paul Walmsley , Stafford Horne , Hari Bathini , Michal Simek , Yoshinori Sato , linux-mm@kvack.org, linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org, Palmer Dabbelt , linuxppc-dev@lists.ozlabs.org, Mike Rapoport Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+linux-arm-kernel=archiver.kernel.org@lists.infradead.org From: Mike Rapoport There are several occurrences of the following pattern: for_each_memblock(memory, reg) { start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); /* do something with start_pfn and end_pfn */ } Rather than iterate over all memblock.memory regions and each time query for their start and end PFNs, use for_each_mem_pfn_range() iterator to get simpler and clearer code. Signed-off-by: Mike Rapoport --- arch/arm/mm/init.c | 11 ++++------- arch/arm64/mm/init.c | 11 ++++------- arch/powerpc/kernel/fadump.c | 11 ++++++----- arch/powerpc/mm/mem.c | 15 ++++++++------- arch/powerpc/mm/numa.c | 7 ++----- arch/s390/mm/page-states.c | 6 ++---- arch/sh/mm/init.c | 9 +++------ mm/memblock.c | 6 ++---- mm/sparse.c | 10 ++++------ 9 files changed, 35 insertions(+), 51 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 626af348eb8f..d630573277d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -304,16 +304,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned long start, end, prev_end = 0; + int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist @@ -341,8 +339,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(memblock_region_memory_end_pfn(reg), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc7c47a..291b5805457d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -473,12 +473,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); + unsigned long start, end, prev_end = 0; + int i; + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due @@ -498,8 +496,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 78ab9a6ee6ac..fc85cbc66839 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1216,14 +1216,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { - u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..1364dd532107 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -192,15 +192,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; + unsigned long spfn, epfn, prev = 0; + int i; - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..bae2d9edd52c 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -800,17 +800,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc141893d028..567c69f3069e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) void __init cmma_init_nodat(void) { - struct memblock_region *reg; struct page *page; unsigned long start, end, ix; + int i; if (cmma_flag < 2) return; @@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) mark_kernel_pgd(); /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - end = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { page = pfn_to_page(start); for (ix = start; ix < end; ix++, page++) { if (__test_and_clear_bit(PG_arch_1, &page->flags)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 62b8f03ffc80..586ea500dcc7 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -224,15 +224,12 @@ void __init allocate_pgdat(unsigned int nid) static void __init do_init_bootmem(void) { - struct memblock_region *reg; + unsigned long start_pfn, end_pfn; + int i; /* Add active regions with valid PFNs. */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) __add_active_range(0, start_pfn, end_pfn); - } /* All of system RAM sits in node 0 for the non-NUMA case */ allocate_pgdat(0); diff --git a/mm/memblock.c b/mm/memblock.c index 824938849f6d..c1a4c8798973 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1659,12 +1659,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void) phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) { unsigned long pages = 0; - struct memblock_region *r; unsigned long start_pfn, end_pfn; + int i; - for_each_memblock(memory, r) { - start_pfn = memblock_region_memory_base_pfn(r); - end_pfn = memblock_region_memory_end_pfn(r); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn = min_t(unsigned long, start_pfn, limit_pfn); end_pfn = min_t(unsigned long, end_pfn, limit_pfn); pages += end_pfn - start_pfn; diff --git a/mm/sparse.c b/mm/sparse.c index b2b9a3e34696..c2ba412a3141 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -292,13 +292,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) */ void __init memblocks_present(void) { - struct memblock_region *reg; + unsigned long start, end; + int i, nid; - for_each_memblock(memory, reg) { - memory_present(memblock_get_region_node(reg), - memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) + memory_present(nid, start, end); } /* -- 2.26.2 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel From mboxrd@z Thu Jan 1 00:00:00 1970 From: Mike Rapoport Date: Sun, 2 Aug 2020 19:35:55 +0300 Subject: [OpenRISC] [PATCH v2 11/17] arch, mm: replace for_each_memblock() with for_each_mem_pfn_range() In-Reply-To: <20200802163601.8189-1-rppt@kernel.org> References: <20200802163601.8189-1-rppt@kernel.org> Message-ID: <20200802163601.8189-12-rppt@kernel.org> List-Id: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: openrisc@lists.librecores.org From: Mike Rapoport There are several occurrences of the following pattern: for_each_memblock(memory, reg) { start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); /* do something with start_pfn and end_pfn */ } Rather than iterate over all memblock.memory regions and each time query for their start and end PFNs, use for_each_mem_pfn_range() iterator to get simpler and clearer code. Signed-off-by: Mike Rapoport --- arch/arm/mm/init.c | 11 ++++------- arch/arm64/mm/init.c | 11 ++++------- arch/powerpc/kernel/fadump.c | 11 ++++++----- arch/powerpc/mm/mem.c | 15 ++++++++------- arch/powerpc/mm/numa.c | 7 ++----- arch/s390/mm/page-states.c | 6 ++---- arch/sh/mm/init.c | 9 +++------ mm/memblock.c | 6 ++---- mm/sparse.c | 10 ++++------ 9 files changed, 35 insertions(+), 51 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 626af348eb8f..d630573277d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -304,16 +304,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned long start, end, prev_end = 0; + int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist @@ -341,8 +339,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(memblock_region_memory_end_pfn(reg), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc7c47a..291b5805457d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -473,12 +473,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); + unsigned long start, end, prev_end = 0; + int i; + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due @@ -498,8 +496,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 78ab9a6ee6ac..fc85cbc66839 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1216,14 +1216,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { - u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..1364dd532107 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -192,15 +192,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; + unsigned long spfn, epfn, prev = 0; + int i; - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..bae2d9edd52c 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -800,17 +800,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc141893d028..567c69f3069e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) void __init cmma_init_nodat(void) { - struct memblock_region *reg; struct page *page; unsigned long start, end, ix; + int i; if (cmma_flag < 2) return; @@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) mark_kernel_pgd(); /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - end = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { page = pfn_to_page(start); for (ix = start; ix < end; ix++, page++) { if (__test_and_clear_bit(PG_arch_1, &page->flags)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 62b8f03ffc80..586ea500dcc7 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -224,15 +224,12 @@ void __init allocate_pgdat(unsigned int nid) static void __init do_init_bootmem(void) { - struct memblock_region *reg; + unsigned long start_pfn, end_pfn; + int i; /* Add active regions with valid PFNs. */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) __add_active_range(0, start_pfn, end_pfn); - } /* All of system RAM sits in node 0 for the non-NUMA case */ allocate_pgdat(0); diff --git a/mm/memblock.c b/mm/memblock.c index 824938849f6d..c1a4c8798973 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1659,12 +1659,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void) phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) { unsigned long pages = 0; - struct memblock_region *r; unsigned long start_pfn, end_pfn; + int i; - for_each_memblock(memory, r) { - start_pfn = memblock_region_memory_base_pfn(r); - end_pfn = memblock_region_memory_end_pfn(r); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn = min_t(unsigned long, start_pfn, limit_pfn); end_pfn = min_t(unsigned long, end_pfn, limit_pfn); pages += end_pfn - start_pfn; diff --git a/mm/sparse.c b/mm/sparse.c index b2b9a3e34696..c2ba412a3141 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -292,13 +292,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) */ void __init memblocks_present(void) { - struct memblock_region *reg; + unsigned long start, end; + int i, nid; - for_each_memblock(memory, reg) { - memory_present(memblock_get_region_node(reg), - memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) + memory_present(nid, start, end); } /* -- 2.26.2