All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@suse.com>
To: Laurent Dufour <ldufour@linux.ibm.com>
Cc: akpm@linux-foundation.org, David Hildenbrand <david@redhat.com>,
	Oscar Salvador <osalvador@suse.de>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	linux-mm@kvack.org, "Rafael J . Wysocki" <rafael@kernel.org>,
	nathanl@linux.ibm.com, cheloha@linux.ibm.com,
	Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>,
	linux-ia64@vger.kernel.org, linux-kernel@vger.kernel.org,
	stable@vger.kernel.org
Subject: Re: [PATCH v3 1/3] mm: replace memmap_context by meminit_context
Date: Tue, 15 Sep 2020 15:35:11 +0200	[thread overview]
Message-ID: <20200915133511.GA3736@dhcp22.suse.cz> (raw)
In-Reply-To: <20200915132624.9723-1-ldufour@linux.ibm.com>

On Tue 15-09-20 15:26:24, Laurent Dufour wrote:
> The memmap_context enum is used to detect whether a memory operation is due
> to a hot-add operation or happening at boot time.
> 
> Make it general to the hotplug operation and rename it as meminit_context.
> 
> There is no functional change introduced by this patch
> 
> Suggested-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  arch/ia64/mm/init.c    |  6 +++---
>  include/linux/mm.h     |  2 +-
>  include/linux/mmzone.h | 11 ++++++++---
>  mm/memory_hotplug.c    |  2 +-
>  mm/page_alloc.c        | 10 +++++-----
>  5 files changed, 18 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
> index 0b3fb4c7af29..8e7b8c6c576e 100644
> --- a/arch/ia64/mm/init.c
> +++ b/arch/ia64/mm/init.c
> @@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
>  	if (map_start < map_end)
>  		memmap_init_zone((unsigned long)(map_end - map_start),
>  				 args->nid, args->zone, page_to_pfn(map_start),
> -				 MEMMAP_EARLY, NULL);
> +				 MEMINIT_EARLY, NULL);
>  	return 0;
>  }
>  
> @@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
>  	     unsigned long start_pfn)
>  {
>  	if (!vmem_map) {
> -		memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
> -				NULL);
> +		memmap_init_zone(size, nid, zone, start_pfn,
> +				 MEMINIT_EARLY, NULL);
>  	} else {
>  		struct page *start;
>  		struct memmap_init_callback_data args;
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 1983e08f5906..e942f91ed155 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2409,7 +2409,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
>  
>  extern void set_dma_reserve(unsigned long new_dma_reserve);
>  extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
> -		enum memmap_context, struct vmem_altmap *);
> +		enum meminit_context, struct vmem_altmap *);
>  extern void setup_per_zone_wmarks(void);
>  extern int __meminit init_per_zone_wmark_min(void);
>  extern void mem_init(void);
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 8379432f4f2f..0f7a4ff4b059 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -824,10 +824,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
>  		unsigned int alloc_flags);
>  bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
>  		unsigned long mark, int highest_zoneidx);
> -enum memmap_context {
> -	MEMMAP_EARLY,
> -	MEMMAP_HOTPLUG,
> +/*
> + * Memory initialization context, use to differentiate memory added by
> + * the platform statically or via memory hotplug interface.
> + */
> +enum meminit_context {
> +	MEMINIT_EARLY,
> +	MEMINIT_HOTPLUG,
>  };
> +
>  extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
>  				     unsigned long size);
>  
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index e9d5ab5d3ca0..fc25886ad719 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -729,7 +729,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
>  	 * are reserved so nobody should be touching them so we should be safe
>  	 */
>  	memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
> -			MEMMAP_HOTPLUG, altmap);
> +			 MEMINIT_HOTPLUG, altmap);
>  
>  	set_zone_contiguous(zone);
>  }
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index fab5e97dc9ca..5661fa164f13 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -5975,7 +5975,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
>   * done. Non-atomic initialization, single-pass.
>   */
>  void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
> -		unsigned long start_pfn, enum memmap_context context,
> +		unsigned long start_pfn, enum meminit_context context,
>  		struct vmem_altmap *altmap)
>  {
>  	unsigned long pfn, end_pfn = start_pfn + size;
> @@ -6007,7 +6007,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
>  		 * There can be holes in boot-time mem_map[]s handed to this
>  		 * function.  They do not exist on hotplugged memory.
>  		 */
> -		if (context == MEMMAP_EARLY) {
> +		if (context == MEMINIT_EARLY) {
>  			if (overlap_memmap_init(zone, &pfn))
>  				continue;
>  			if (defer_init(nid, pfn, end_pfn))
> @@ -6016,7 +6016,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
>  
>  		page = pfn_to_page(pfn);
>  		__init_single_page(page, pfn, zone, nid);
> -		if (context == MEMMAP_HOTPLUG)
> +		if (context == MEMINIT_HOTPLUG)
>  			__SetPageReserved(page);
>  
>  		/*
> @@ -6099,7 +6099,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
>  		 * check here not to call set_pageblock_migratetype() against
>  		 * pfn out of zone.
>  		 *
> -		 * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
> +		 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
>  		 * because this is done early in section_activate()
>  		 */
>  		if (!(pfn & (pageblock_nr_pages - 1))) {
> @@ -6137,7 +6137,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
>  		if (end_pfn > start_pfn) {
>  			size = end_pfn - start_pfn;
>  			memmap_init_zone(size, nid, zone, start_pfn,
> -					 MEMMAP_EARLY, NULL);
> +					 MEMINIT_EARLY, NULL);
>  		}
>  	}
>  }
> -- 
> 2.28.0

-- 
Michal Hocko
SUSE Labs

WARNING: multiple messages have this Message-ID (diff)
From: Michal Hocko <mhocko@suse.com>
To: Laurent Dufour <ldufour@linux.ibm.com>
Cc: akpm@linux-foundation.org, David Hildenbrand <david@redhat.com>,
	Oscar Salvador <osalvador@suse.de>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	linux-mm@kvack.org, "Rafael J . Wysocki" <rafael@kernel.org>,
	nathanl@linux.ibm.com, cheloha@linux.ibm.com,
	Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>,
	linux-ia64@vger.kernel.org, linux-kernel@vger.kernel.org,
	stable@vger.kernel.org
Subject: Re: [PATCH v3 1/3] mm: replace memmap_context by meminit_context
Date: Tue, 15 Sep 2020 13:35:11 +0000	[thread overview]
Message-ID: <20200915133511.GA3736@dhcp22.suse.cz> (raw)
In-Reply-To: <20200915132624.9723-1-ldufour@linux.ibm.com>

On Tue 15-09-20 15:26:24, Laurent Dufour wrote:
> The memmap_context enum is used to detect whether a memory operation is due
> to a hot-add operation or happening at boot time.
> 
> Make it general to the hotplug operation and rename it as meminit_context.
> 
> There is no functional change introduced by this patch
> 
> Suggested-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  arch/ia64/mm/init.c    |  6 +++---
>  include/linux/mm.h     |  2 +-
>  include/linux/mmzone.h | 11 ++++++++---
>  mm/memory_hotplug.c    |  2 +-
>  mm/page_alloc.c        | 10 +++++-----
>  5 files changed, 18 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
> index 0b3fb4c7af29..8e7b8c6c576e 100644
> --- a/arch/ia64/mm/init.c
> +++ b/arch/ia64/mm/init.c
> @@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
>  	if (map_start < map_end)
>  		memmap_init_zone((unsigned long)(map_end - map_start),
>  				 args->nid, args->zone, page_to_pfn(map_start),
> -				 MEMMAP_EARLY, NULL);
> +				 MEMINIT_EARLY, NULL);
>  	return 0;
>  }
>  
> @@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
>  	     unsigned long start_pfn)
>  {
>  	if (!vmem_map) {
> -		memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
> -				NULL);
> +		memmap_init_zone(size, nid, zone, start_pfn,
> +				 MEMINIT_EARLY, NULL);
>  	} else {
>  		struct page *start;
>  		struct memmap_init_callback_data args;
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 1983e08f5906..e942f91ed155 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2409,7 +2409,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
>  
>  extern void set_dma_reserve(unsigned long new_dma_reserve);
>  extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
> -		enum memmap_context, struct vmem_altmap *);
> +		enum meminit_context, struct vmem_altmap *);
>  extern void setup_per_zone_wmarks(void);
>  extern int __meminit init_per_zone_wmark_min(void);
>  extern void mem_init(void);
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 8379432f4f2f..0f7a4ff4b059 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -824,10 +824,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
>  		unsigned int alloc_flags);
>  bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
>  		unsigned long mark, int highest_zoneidx);
> -enum memmap_context {
> -	MEMMAP_EARLY,
> -	MEMMAP_HOTPLUG,
> +/*
> + * Memory initialization context, use to differentiate memory added by
> + * the platform statically or via memory hotplug interface.
> + */
> +enum meminit_context {
> +	MEMINIT_EARLY,
> +	MEMINIT_HOTPLUG,
>  };
> +
>  extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
>  				     unsigned long size);
>  
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index e9d5ab5d3ca0..fc25886ad719 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -729,7 +729,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
>  	 * are reserved so nobody should be touching them so we should be safe
>  	 */
>  	memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
> -			MEMMAP_HOTPLUG, altmap);
> +			 MEMINIT_HOTPLUG, altmap);
>  
>  	set_zone_contiguous(zone);
>  }
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index fab5e97dc9ca..5661fa164f13 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -5975,7 +5975,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
>   * done. Non-atomic initialization, single-pass.
>   */
>  void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
> -		unsigned long start_pfn, enum memmap_context context,
> +		unsigned long start_pfn, enum meminit_context context,
>  		struct vmem_altmap *altmap)
>  {
>  	unsigned long pfn, end_pfn = start_pfn + size;
> @@ -6007,7 +6007,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
>  		 * There can be holes in boot-time mem_map[]s handed to this
>  		 * function.  They do not exist on hotplugged memory.
>  		 */
> -		if (context = MEMMAP_EARLY) {
> +		if (context = MEMINIT_EARLY) {
>  			if (overlap_memmap_init(zone, &pfn))
>  				continue;
>  			if (defer_init(nid, pfn, end_pfn))
> @@ -6016,7 +6016,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
>  
>  		page = pfn_to_page(pfn);
>  		__init_single_page(page, pfn, zone, nid);
> -		if (context = MEMMAP_HOTPLUG)
> +		if (context = MEMINIT_HOTPLUG)
>  			__SetPageReserved(page);
>  
>  		/*
> @@ -6099,7 +6099,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
>  		 * check here not to call set_pageblock_migratetype() against
>  		 * pfn out of zone.
>  		 *
> -		 * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
> +		 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
>  		 * because this is done early in section_activate()
>  		 */
>  		if (!(pfn & (pageblock_nr_pages - 1))) {
> @@ -6137,7 +6137,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
>  		if (end_pfn > start_pfn) {
>  			size = end_pfn - start_pfn;
>  			memmap_init_zone(size, nid, zone, start_pfn,
> -					 MEMMAP_EARLY, NULL);
> +					 MEMINIT_EARLY, NULL);
>  		}
>  	}
>  }
> -- 
> 2.28.0

-- 
Michal Hocko
SUSE Labs

  reply	other threads:[~2020-09-15 14:04 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-15  9:41 [PATCH v3 0/3] mm: fix memory to node bad links in sysfs Laurent Dufour
2020-09-15  9:41 ` Laurent Dufour
2020-09-15  9:41 ` [PATCH v3 1/3] mm: replace memmap_context by memplug_context Laurent Dufour
2020-09-15  9:41   ` Laurent Dufour
2020-09-15 10:17   ` David Hildenbrand
2020-09-15 10:17     ` David Hildenbrand
2020-09-15 10:28   ` Oscar Salvador
2020-09-15 10:28     ` Oscar Salvador
2020-09-15 12:15   ` Michal Hocko
2020-09-15 12:15     ` Michal Hocko
2020-09-15 13:26     ` [PATCH v3 1/3] mm: replace memmap_context by meminit_context Laurent Dufour
2020-09-15 13:26       ` Laurent Dufour
2020-09-15 13:35       ` Michal Hocko [this message]
2020-09-15 13:35         ` Michal Hocko
2020-09-16  6:33       ` Greg Kroah-Hartman
2020-09-16  6:33         ` Greg Kroah-Hartman
2020-09-16  7:29         ` Laurent Dufour
2020-09-16  7:29           ` Laurent Dufour
2020-09-16  7:40           ` Greg Kroah-Hartman
2020-09-16  7:40             ` Greg Kroah-Hartman
2020-09-16  7:47             ` Laurent Dufour
2020-09-16  7:47               ` Laurent Dufour
2020-09-16  7:52               ` David Hildenbrand
2020-09-16  7:52                 ` David Hildenbrand
2020-09-16 16:09                 ` Laurent Dufour
2020-09-16 16:09                   ` Laurent Dufour
2020-09-16 23:37                   ` Andrew Morton
2020-09-16 23:37                     ` Andrew Morton
2020-09-17  8:00                     ` Laurent Dufour
2020-09-17  8:00                       ` Laurent Dufour
2020-09-15  9:41 ` [PATCH v3 2/3] mm: don't rely on system state to detect hot-plug operations Laurent Dufour
2020-09-15  9:41   ` Laurent Dufour
2020-09-15 10:31   ` Oscar Salvador
2020-09-15 10:31     ` Oscar Salvador
2020-09-15 12:19   ` Michal Hocko
2020-09-15 12:19     ` Michal Hocko
2020-09-15  9:41 ` [PATCH v3 3/3] mm: don't panic when links can't be created in sysfs Laurent Dufour
2020-09-15  9:41   ` Laurent Dufour
2020-09-15 10:33   ` Oscar Salvador
2020-09-15 10:33     ` Oscar Salvador

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200915133511.GA3736@dhcp22.suse.cz \
    --to=mhocko@suse.com \
    --cc=akpm@linux-foundation.org \
    --cc=cheloha@linux.ibm.com \
    --cc=david@redhat.com \
    --cc=fenghua.yu@intel.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=ldufour@linux.ibm.com \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=nathanl@linux.ibm.com \
    --cc=osalvador@suse.de \
    --cc=rafael@kernel.org \
    --cc=stable@vger.kernel.org \
    --cc=tony.luck@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.