linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Baoquan He <bhe@redhat.com>
To: Mike Rapoport <rppt@kernel.org>
Cc: Rich Felker <dalias@libc.org>,
	linux-ia64@vger.kernel.org, linux-doc@vger.kernel.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Heiko Carstens <heiko.carstens@de.ibm.com>,
	Michal Hocko <mhocko@kernel.org>,
	"James E.J. Bottomley" <James.Bottomley@hansenpartnership.com>,
	Max Filippov <jcmvbkbc@gmail.com>, Guo Ren <guoren@kernel.org>,
	linux-csky@vger.kernel.org, linux-parisc@vger.kernel.org,
	sparclinux@vger.kernel.org, linux-hexagon@vger.kernel.org,
	linux-riscv@lists.infradead.org,
	Greg Ungerer <gerg@linux-m68k.org>,
	linux-arch@vger.kernel.org, linux-s390@vger.kernel.org,
	linux-snps-arc@lists.infradead.org, linux-c6x-dev@linux-c6x.org,
	Brian Cain <bcain@codeaurora.org>,
	Jonathan Corbet <corbet@lwn.net>,
	linux-sh@vger.kernel.org, Michael Ellerman <mpe@ellerman.id.au>,
	Helge Deller <deller@gmx.de>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	Ley Foon Tan <ley.foon.>
Subject: Re: [PATCH 04/21] mm: free_area_init: use maximal zone PFNs rather than zone sizes
Date: Thu, 23 Apr 2020 07:41:49 +0800	[thread overview]
Message-ID: <20200422234149.GW4247@MiWiFi-R3L-srv> (raw)
In-Reply-To: <20200412194859.12663-5-rppt@kernel.org>

On 04/12/20 at 10:48pm, Mike Rapoport wrote:
> From: Mike Rapoport <rppt@linux.ibm.com>
> 
> Currently, architectures that use free_area_init() to initialize memory map
> and node and zone structures need to calculate zone and hole sizes. We can
> use free_area_init_nodes() instead and let it detect the zone boundaries
> while the architectures will only have to supply the possible limits for
> the zones.
> 
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> ---
>  arch/alpha/mm/init.c    | 16 ++++++----------
>  arch/c6x/mm/init.c      |  8 +++-----
>  arch/h8300/mm/init.c    |  6 +++---
>  arch/hexagon/mm/init.c  |  6 +++---
>  arch/m68k/mm/init.c     |  6 +++---
>  arch/m68k/mm/mcfmmu.c   |  9 +++------
>  arch/nds32/mm/init.c    | 11 ++++-------
>  arch/nios2/mm/init.c    |  8 +++-----
>  arch/openrisc/mm/init.c |  9 +++------
>  arch/um/kernel/mem.c    | 12 ++++--------
>  include/linux/mm.h      |  2 +-
>  mm/page_alloc.c         |  5 ++---
>  12 files changed, 38 insertions(+), 60 deletions(-)
> 
> diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
> index 12e218d3792a..667cd21393b5 100644
> --- a/arch/alpha/mm/init.c
> +++ b/arch/alpha/mm/init.c
> @@ -243,21 +243,17 @@ callback_init(void * kernel_end)
>   */
>  void __init paging_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES] = {0, };
> -	unsigned long dma_pfn, high_pfn;
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
> +	unsigned long dma_pfn;
>  
>  	dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
> -	high_pfn = max_pfn = max_low_pfn;
> +	max_pfn = max_low_pfn;
>  
> -	if (dma_pfn >= high_pfn)
> -		zones_size[ZONE_DMA] = high_pfn;
> -	else {
> -		zones_size[ZONE_DMA] = dma_pfn;
> -		zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
> -	}
> +	max_zone_pfn[ZONE_DMA] = dma_pfn;
> +	max_zone_pfn[ZONE_NORMAL] = max_pfn;
>  
>  	/* Initialize mem_map[].  */
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  
>  	/* Initialize the kernel's ZERO_PGE. */
>  	memset((void *)ZERO_PGE, 0, PAGE_SIZE);
> diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
> index 9b374393a8f4..a97e51a3e26d 100644
> --- a/arch/c6x/mm/init.c
> +++ b/arch/c6x/mm/init.c
> @@ -33,7 +33,7 @@ EXPORT_SYMBOL(empty_zero_page);
>  void __init paging_init(void)
>  {
>  	struct pglist_data *pgdat = NODE_DATA(0);
> -	unsigned long zones_size[MAX_NR_ZONES] = {0, };
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
>  
>  	empty_zero_page      = (unsigned long) memblock_alloc(PAGE_SIZE,
>  							      PAGE_SIZE);
> @@ -49,11 +49,9 @@ void __init paging_init(void)
>  	/*
>  	 * Define zones
>  	 */
> -	zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
> -	pgdat->node_zones[ZONE_NORMAL].zone_start_pfn =
> -		__pa(PAGE_OFFSET) >> PAGE_SHIFT;
> +	max_zone_pfn[ZONE_NORMAL] = memory_end >> PAGE_SHIFT;
>  
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  }
>  
>  void __init mem_init(void)
> diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
> index 1eab16b1a0bc..27a0020e3771 100644
> --- a/arch/h8300/mm/init.c
> +++ b/arch/h8300/mm/init.c
> @@ -83,10 +83,10 @@ void __init paging_init(void)
>  		 start_mem, end_mem);
>  
>  	{
> -		unsigned long zones_size[MAX_NR_ZONES] = {0, };
> +		unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
>  
> -		zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
> -		free_area_init(zones_size);
> +		max_zone_pfn[ZONE_NORMAL] = end_mem >> PAGE_SHIFT;
> +		free_area_init(max_zone_pfn);
>  	}
>  }
>  
> diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
> index c961773a6fff..f2e6c868e477 100644
> --- a/arch/hexagon/mm/init.c
> +++ b/arch/hexagon/mm/init.c
> @@ -91,7 +91,7 @@ void sync_icache_dcache(pte_t pte)
>   */
>  void __init paging_init(void)
>  {
> -	unsigned long zones_sizes[MAX_NR_ZONES] = {0, };
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
>  
>  	/*
>  	 *  This is not particularly well documented anywhere, but
> @@ -101,9 +101,9 @@ void __init paging_init(void)
>  	 *  adjust accordingly.
>  	 */
>  
> -	zones_sizes[ZONE_NORMAL] = max_low_pfn;
> +	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
>  
> -	free_area_init(zones_sizes);  /*  sets up the zonelists and mem_map  */
> +	free_area_init(max_zone_pfn);  /*  sets up the zonelists and mem_map  */
>  
>  	/*
>  	 * Start of high memory area.  Will probably need something more
> diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
> index b88d510d4fe3..6d3147662ff2 100644
> --- a/arch/m68k/mm/init.c
> +++ b/arch/m68k/mm/init.c
> @@ -84,7 +84,7 @@ void __init paging_init(void)
>  	 * page_alloc get different views of the world.
>  	 */
>  	unsigned long end_mem = memory_end & PAGE_MASK;
> -	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
>  
>  	high_memory = (void *) end_mem;
>  
> @@ -98,8 +98,8 @@ void __init paging_init(void)
>  	 */
>  	set_fs (USER_DS);
>  
> -	zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
> -	free_area_init(zones_size);
> +	max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
> +	free_area_init(max_zone_pfn);
>  }
>  
>  #endif /* CONFIG_MMU */
> diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
> index 0ea375607767..80064e6d064f 100644
> --- a/arch/m68k/mm/mcfmmu.c
> +++ b/arch/m68k/mm/mcfmmu.c
> @@ -39,7 +39,7 @@ void __init paging_init(void)
>  	pte_t *pg_table;
>  	unsigned long address, size;
>  	unsigned long next_pgtable, bootmem_end;
> -	unsigned long zones_size[MAX_NR_ZONES];
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
>  	enum zone_type zone;
>  	int i;
>  
> @@ -80,11 +80,8 @@ void __init paging_init(void)
>  	}
>  
>  	current->mm = NULL;
> -
> -	for (zone = 0; zone < MAX_NR_ZONES; zone++)
> -		zones_size[zone] = 0x0;
> -	zones_size[ZONE_DMA] = num_pages;
> -	free_area_init(zones_size);
> +	max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
> +	free_area_init(max_zone_pfn);
>  }
>  
>  int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
> diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
> index 0be3833f6814..91147cca4b64 100644
> --- a/arch/nds32/mm/init.c
> +++ b/arch/nds32/mm/init.c
> @@ -31,16 +31,13 @@ EXPORT_SYMBOL(empty_zero_page);
>  
>  static void __init zone_sizes_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES];
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
>  
> -	/* Clear the zone sizes */
> -	memset(zones_size, 0, sizeof(zones_size));
> -
> -	zones_size[ZONE_NORMAL] = max_low_pfn;
> +	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
>  #ifdef CONFIG_HIGHMEM
> -	zones_size[ZONE_HIGHMEM] = max_pfn;
> +	max_zone_pfn[ZONE_HIGHMEM] = max_pfn;
>  #endif
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  
>  }
>  
> diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
> index 2c609c2516b2..9afca77d10b1 100644
> --- a/arch/nios2/mm/init.c
> +++ b/arch/nios2/mm/init.c
> @@ -46,17 +46,15 @@ pgd_t *pgd_current;
>   */
>  void __init paging_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES];
> -
> -	memset(zones_size, 0, sizeof(zones_size));
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
>  
>  	pagetable_init();
>  	pgd_current = swapper_pg_dir;
>  
> -	zones_size[ZONE_NORMAL] = max_mapnr;
> +	max_zone_pfn[ZONE_NORMAL] = max_mapnr;
>  
>  	/* pass the memory from the bootmem allocator to the main allocator */
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  
>  	flush_dcache_range((unsigned long)empty_zero_page,
>  			(unsigned long)empty_zero_page + PAGE_SIZE);
> diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
> index 1f87b524db78..f94fe6d3f499 100644
> --- a/arch/openrisc/mm/init.c
> +++ b/arch/openrisc/mm/init.c
> @@ -45,17 +45,14 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
>  
>  static void __init zone_sizes_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES];
> -
> -	/* Clear the zone sizes */
> -	memset(zones_size, 0, sizeof(zones_size));
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
>  
>  	/*
>  	 * We use only ZONE_NORMAL
>  	 */
> -	zones_size[ZONE_NORMAL] = max_low_pfn;
> +	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
>  
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  }
>  
>  extern const char _s_kernel_ro[], _e_kernel_ro[];
> diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
> index 30885d0b94ac..401b22f14743 100644
> --- a/arch/um/kernel/mem.c
> +++ b/arch/um/kernel/mem.c
> @@ -158,8 +158,8 @@ static void __init fixaddr_user_init( void)
>  
>  void __init paging_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES], vaddr;
> -	int i;
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
> +	unsigned long vaddr;
>  
>  	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
>  							       PAGE_SIZE);
> @@ -167,12 +167,8 @@ void __init paging_init(void)
>  		panic("%s: Failed to allocate %lu bytes align=%lx\n",
>  		      __func__, PAGE_SIZE, PAGE_SIZE);
>  
> -	for (i = 0; i < ARRAY_SIZE(zones_size); i++)
> -		zones_size[i] = 0;
> -
> -	zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
> -		(uml_physmem >> PAGE_SHIFT);
> -	free_area_init(zones_size);
> +	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
> +	free_area_init(max_zone_pfn);
>  
>  	/*
>  	 * Fixed mappings, only the page table structure has to be
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 5903bbbdb336..d9a256a97ac5 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2272,7 +2272,7 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
>  }
>  
>  extern void __init pagecache_init(void);
> -extern void free_area_init(unsigned long * zones_size);
> +extern void free_area_init(unsigned long * max_zone_pfn);
>  extern void __init free_area_init_node(int nid, unsigned long * zones_size,
>  		unsigned long zone_start_pfn, unsigned long *zholes_size);
>  extern void free_initmem(void);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 4530e9cfd9f7..530701b38bc7 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -7700,11 +7700,10 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
>  	dma_reserve = new_dma_reserve;
>  }
>  
> -void __init free_area_init(unsigned long *zones_size)
> +void __init free_area_init(unsigned long *max_zone_pfn)
>  {
>  	init_unavailable_mem();
> -	free_area_init_node(0, zones_size,
> -			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
> +	free_area_init_nodes(max_zone_pfn);

Reviewed-by: Baoquan He <bhe@redhat.com>

>  }
>  
>  static int page_alloc_cpu_dead(unsigned int cpu)
> -- 
> 2.25.1
> 

  reply	other threads:[~2020-04-22 23:41 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-12 19:48 [PATCH 00/21] mm: rework free_area_init*() funcitons Mike Rapoport
2020-04-12 19:48 ` [PATCH 01/21] mm: memblock: replace dereferences of memblock_region.nid with API calls Mike Rapoport
2020-04-21  2:06   ` Baoquan He
2020-04-12 19:48 ` [PATCH 02/21] mm: make early_pfn_to_nid() and related defintions close to each other Mike Rapoport
2020-04-21  2:24   ` Baoquan He
2020-04-21  8:49     ` Mike Rapoport
2020-04-21  8:49       ` Mike Rapoport
2020-04-21  9:33       ` Baoquan He
2020-04-21  3:31   ` Baoquan He
2020-04-21  8:39     ` Mike Rapoport
2020-04-21  8:39       ` Mike Rapoport
2020-04-12 19:48 ` [PATCH 03/21] mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option Mike Rapoport
2020-04-21  4:23   ` Baoquan He
2020-04-21  9:09     ` Mike Rapoport
2020-04-21  9:09       ` Mike Rapoport
2020-04-21  9:45       ` Baoquan He
2020-04-12 19:48 ` [PATCH 04/21] mm: free_area_init: use maximal zone PFNs rather than zone sizes Mike Rapoport
2020-04-22 23:41   ` Baoquan He [this message]
2020-06-15  3:53   ` Greg Ungerer
2020-06-15  6:22     ` Mike Rapoport
2020-06-15  6:22       ` Mike Rapoport
2020-06-15  7:17       ` Greg Ungerer
2020-04-12 19:48 ` [PATCH 05/21] mm: use free_area_init() instead of free_area_init_nodes() Mike Rapoport
2020-04-23  0:02   ` Baoquan He
2020-04-12 19:48 ` [PATCH 06/21] alpha: simplify detection of memory zone boundaries Mike Rapoport
2020-04-12 19:48 ` [PATCH 07/21] arm: " Mike Rapoport
2020-04-12 19:48 ` [PATCH 08/21] arm64: simplify detection of memory zone boundaries for UMA configs Mike Rapoport
2020-04-12 19:48 ` [PATCH 09/21] csky: simplify detection of memory zone boundaries Mike Rapoport
2020-04-12 19:48 ` [PATCH 10/21] m68k: mm: " Mike Rapoport
2020-04-12 19:48 ` [PATCH 11/21] parisc: " Mike Rapoport
2020-04-12 19:48 ` [PATCH 12/21] sparc32: " Mike Rapoport
2020-04-12 19:48 ` [PATCH 13/21] unicore32: " Mike Rapoport
2020-04-12 19:48 ` [PATCH 14/21] xtensa: " Mike Rapoport
2020-04-12 19:48 ` [PATCH 15/21] mm: memmap_init: iterate over memblock regions rather that check each PFN Mike Rapoport
2020-04-20 14:26   ` Qian Cai
2020-04-24  7:22   ` David Hildenbrand
2020-04-25 16:49     ` Mike Rapoport
2020-04-25 16:49       ` Mike Rapoport
2020-04-12 19:48 ` [PATCH 16/21] mm: remove early_pfn_in_nid() and CONFIG_NODES_SPAN_OTHER_NODES Mike Rapoport
2020-04-23  1:13   ` Baoquan He
2020-04-23  5:50     ` Mike Rapoport
2020-04-23  5:50       ` Mike Rapoport
2020-05-18 21:38     ` Hoan Tran
2020-04-12 19:48 ` [PATCH 17/21] mm: free_area_init: allow defining max_zone_pfn in descending order Mike Rapoport
2020-04-23  2:53   ` Baoquan He
2020-04-23  2:57     ` Baoquan He
2020-04-23  5:55       ` Mike Rapoport
2020-04-23  5:55         ` Mike Rapoport
2020-04-24  0:33         ` Baoquan He
2020-04-12 19:48 ` [PATCH 18/21] mm: rename free_area_init_node() to free_area_init_memoryless_node() Mike Rapoport
2020-04-23  3:14   ` Baoquan He
2020-04-23  6:18     ` Mike Rapoport
2020-04-23  6:18       ` Mike Rapoport
2020-04-12 19:48 ` [PATCH 19/21] mm: clean up free_area_init_node() and its helpers Mike Rapoport
2020-04-12 19:48 ` [PATCH 20/21] mm: simplify find_min_pfn_with_active_regions() Mike Rapoport
2020-04-12 19:48 ` [PATCH 21/21] docs/vm: update memory-models documentation Mike Rapoport

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200422234149.GW4247@MiWiFi-R3L-srv \
    --to=bhe@redhat.com \
    --cc=James.Bottomley@hansenpartnership.com \
    --cc=bcain@codeaurora.org \
    --cc=catalin.marinas@arm.com \
    --cc=corbet@lwn.net \
    --cc=dalias@libc.org \
    --cc=deller@gmx.de \
    --cc=gerg@linux-m68k.org \
    --cc=guoren@kernel.org \
    --cc=heiko.carstens@de.ibm.com \
    --cc=jcmvbkbc@gmail.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-c6x-dev@linux-c6x.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-hexagon@vger.kernel.org \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-parisc@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=linux-snps-arc@lists.infradead.org \
    --cc=linux@armlinux.org.uk \
    --cc=mhocko@kernel.org \
    --cc=mpe@ellerman.id.au \
    --cc=rppt@kernel.org \
    --cc=sparclinux@vger.kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).