linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@kernel.org>
To: linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Mel Gorman <mgorman@suse.de>, Vlastimil Babka <vbabka@suse.cz>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Jerome Glisse <jglisse@redhat.com>,
	Reza Arbab <arbab@linux.vnet.ibm.com>,
	Yasuaki Ishimatsu <yasu.isimatu@gmail.com>,
	qiuxishi@huawei.com, Kani Toshimitsu <toshi.kani@hpe.com>,
	slaoub@gmail.com, Joonsoo Kim <js1304@gmail.com>,
	Andi Kleen <ak@linux.intel.com>,
	David Rientjes <rientjes@google.com>,
	Daniel Kiper <daniel.kiper@oracle.com>,
	Igor Mammedov <imammedo@redhat.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	LKML <linux-kernel@vger.kernel.org>,
	Michal Hocko <mhocko@suse.com>
Subject: [PATCH 9/9] mm, memory_hotplug: remove unused cruft after memory hotplug rework
Date: Mon, 10 Apr 2017 13:03:51 +0200	[thread overview]
Message-ID: <20170410110351.12215-10-mhocko@kernel.org> (raw)
In-Reply-To: <20170410110351.12215-1-mhocko@kernel.org>

From: Michal Hocko <mhocko@suse.com>

zone_for_memory doesn't have any user anymore as well as the whole zone
shifting infrastructure so drop them all.

This shouldn't introduce any functional changes.

Signed-off-by: Michal Hocko <mhocko@suse.com>
---
 include/linux/memory_hotplug.h |   2 -
 mm/memory_hotplug.c            | 207 -----------------------------------------
 2 files changed, 209 deletions(-)

diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index c28d0aba7525..a9985f6c460a 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -274,8 +274,6 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
 		void *arg, int (*func)(struct memory_block *, void *));
 extern int add_memory(int nid, u64 start, u64 size);
 extern int add_memory_resource(int nid, struct resource *resource, bool online);
-extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
-		bool for_device);
 extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock);
 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
 		unsigned long nr_pages);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index be8be844d340..94e96ca790f6 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -299,180 +299,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
 }
 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
 
-static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
-				     unsigned long end_pfn)
-{
-	unsigned long old_zone_end_pfn;
-
-	zone_span_writelock(zone);
-
-	old_zone_end_pfn = zone_end_pfn(zone);
-	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
-		zone->zone_start_pfn = start_pfn;
-
-	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
-				zone->zone_start_pfn;
-
-	zone_span_writeunlock(zone);
-}
-
-static void resize_zone(struct zone *zone, unsigned long start_pfn,
-		unsigned long end_pfn)
-{
-	zone_span_writelock(zone);
-
-	if (end_pfn - start_pfn) {
-		zone->zone_start_pfn = start_pfn;
-		zone->spanned_pages = end_pfn - start_pfn;
-	} else {
-		/*
-		 * make it consist as free_area_init_core(),
-		 * if spanned_pages = 0, then keep start_pfn = 0
-		 */
-		zone->zone_start_pfn = 0;
-		zone->spanned_pages = 0;
-	}
-
-	zone_span_writeunlock(zone);
-}
-
-static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
-		unsigned long end_pfn)
-{
-	enum zone_type zid = zone_idx(zone);
-	int nid = zone->zone_pgdat->node_id;
-	unsigned long pfn;
-
-	for (pfn = start_pfn; pfn < end_pfn; pfn++)
-		set_page_links(pfn_to_page(pfn), zid, nid, pfn);
-}
-
-static void __ref ensure_zone_is_initialized(struct zone *zone,
-			unsigned long start_pfn, unsigned long num_pages)
-{
-	if (!zone_is_initialized(zone))
-		init_currently_empty_zone(zone, start_pfn, num_pages);
-}
-
-static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
-		unsigned long start_pfn, unsigned long end_pfn)
-{
-	unsigned long flags;
-	unsigned long z1_start_pfn;
-
-	ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
-
-	pgdat_resize_lock(z1->zone_pgdat, &flags);
-
-	/* can't move pfns which are higher than @z2 */
-	if (end_pfn > zone_end_pfn(z2))
-		goto out_fail;
-	/* the move out part must be at the left most of @z2 */
-	if (start_pfn > z2->zone_start_pfn)
-		goto out_fail;
-	/* must included/overlap */
-	if (end_pfn <= z2->zone_start_pfn)
-		goto out_fail;
-
-	/* use start_pfn for z1's start_pfn if z1 is empty */
-	if (!zone_is_empty(z1))
-		z1_start_pfn = z1->zone_start_pfn;
-	else
-		z1_start_pfn = start_pfn;
-
-	resize_zone(z1, z1_start_pfn, end_pfn);
-	resize_zone(z2, end_pfn, zone_end_pfn(z2));
-
-	pgdat_resize_unlock(z1->zone_pgdat, &flags);
-
-	fix_zone_id(z1, start_pfn, end_pfn);
-
-	return 0;
-out_fail:
-	pgdat_resize_unlock(z1->zone_pgdat, &flags);
-	return -1;
-}
-
-static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
-		unsigned long start_pfn, unsigned long end_pfn)
-{
-	unsigned long flags;
-	unsigned long z2_end_pfn;
-
-	ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
-
-	pgdat_resize_lock(z1->zone_pgdat, &flags);
-
-	/* can't move pfns which are lower than @z1 */
-	if (z1->zone_start_pfn > start_pfn)
-		goto out_fail;
-	/* the move out part mast at the right most of @z1 */
-	if (zone_end_pfn(z1) >  end_pfn)
-		goto out_fail;
-	/* must included/overlap */
-	if (start_pfn >= zone_end_pfn(z1))
-		goto out_fail;
-
-	/* use end_pfn for z2's end_pfn if z2 is empty */
-	if (!zone_is_empty(z2))
-		z2_end_pfn = zone_end_pfn(z2);
-	else
-		z2_end_pfn = end_pfn;
-
-	resize_zone(z1, z1->zone_start_pfn, start_pfn);
-	resize_zone(z2, start_pfn, z2_end_pfn);
-
-	pgdat_resize_unlock(z1->zone_pgdat, &flags);
-
-	fix_zone_id(z2, start_pfn, end_pfn);
-
-	return 0;
-out_fail:
-	pgdat_resize_unlock(z1->zone_pgdat, &flags);
-	return -1;
-}
-
-static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
-				      unsigned long end_pfn)
-{
-	unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
-
-	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
-		pgdat->node_start_pfn = start_pfn;
-
-	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
-					pgdat->node_start_pfn;
-}
-
-static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
-{
-	struct pglist_data *pgdat = zone->zone_pgdat;
-	int nr_pages = PAGES_PER_SECTION;
-	int nid = pgdat->node_id;
-	int zone_type;
-	unsigned long flags, pfn;
-
-	zone_type = zone - pgdat->node_zones;
-	ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
-
-	pgdat_resize_lock(zone->zone_pgdat, &flags);
-	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
-	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
-			phys_start_pfn + nr_pages);
-	pgdat_resize_unlock(zone->zone_pgdat, &flags);
-	memmap_init_zone(nr_pages, nid, zone_type,
-			 phys_start_pfn, MEMMAP_HOTPLUG);
-
-	/* online_page_range is called later and expects pages reserved */
-	for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
-		if (!pfn_valid(pfn))
-			continue;
-
-		SetPageReserved(pfn_to_page(pfn));
-	}
-	return 0;
-}
-
 static int __meminit __add_section(int nid, unsigned long phys_start_pfn, bool want_memblock)
 {
 	int ret;
@@ -1349,39 +1175,6 @@ static int check_hotplug_memory_range(u64 start, u64 size)
 	return 0;
 }
 
-/*
- * If movable zone has already been setup, newly added memory should be check.
- * If its address is higher than movable zone, it should be added as movable.
- * Without this check, movable zone may overlap with other zone.
- */
-static int should_add_memory_movable(int nid, u64 start, u64 size)
-{
-	unsigned long start_pfn = start >> PAGE_SHIFT;
-	pg_data_t *pgdat = NODE_DATA(nid);
-	struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
-
-	if (zone_is_empty(movable_zone))
-		return 0;
-
-	if (movable_zone->zone_start_pfn <= start_pfn)
-		return 1;
-
-	return 0;
-}
-
-int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
-		bool for_device)
-{
-#ifdef CONFIG_ZONE_DEVICE
-	if (for_device)
-		return ZONE_DEVICE;
-#endif
-	if (should_add_memory_movable(nid, start, size))
-		return ZONE_MOVABLE;
-
-	return zone_default;
-}
-
 static int online_memory_block(struct memory_block *mem, void *arg)
 {
 	return device_online(&mem->dev);
-- 
2.11.0

  parent reply	other threads:[~2017-04-10 11:04 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-04-10 11:03 [PATCH -v2 0/9] mm: make movable onlining suck less Michal Hocko
2017-04-10 11:03 ` [PATCH 1/9] mm: remove return value from init_currently_empty_zone Michal Hocko
2017-04-11  8:10   ` Balbir Singh
2017-04-13 12:03   ` Vlastimil Babka
2017-04-13 19:43   ` YASUAKI ISHIMATSU
2017-04-10 11:03 ` [PATCH 2/9] mm, memory_hotplug: use node instead of zone in can_online_high_movable Michal Hocko
2017-04-13 12:46   ` Vlastimil Babka
2017-04-13 19:45   ` YASUAKI ISHIMATSU
2017-04-10 11:03 ` [PATCH 3/9] mm: drop page_initialized check from get_nid_for_pfn Michal Hocko
2017-04-13 12:59   ` Vlastimil Babka
2017-04-10 11:03 ` [PATCH 4/9] mm, memory_hotplug: get rid of is_zone_device_section Michal Hocko
2017-04-10 16:20   ` Jerome Glisse
2017-04-10 16:31     ` Michal Hocko
2017-04-13 13:05   ` Vlastimil Babka
2017-04-17 20:12   ` Jerome Glisse
2017-04-18  7:19     ` Michal Hocko
2017-04-10 11:03 ` [PATCH 5/9] mm, memory_hotplug: split up register_one_node Michal Hocko
2017-04-13 14:05   ` Vlastimil Babka
2017-04-13 14:13     ` Michal Hocko
2017-04-10 11:03 ` [PATCH 6/9] mm, memory_hotplug: do not associate hotadded memory to zones until online Michal Hocko
2017-04-10 16:25   ` [PATCH v3 " Michal Hocko
2017-04-20  8:25     ` Vlastimil Babka
2017-04-20  9:06       ` Michal Hocko
2017-04-20 10:51         ` Vlastimil Babka
2017-04-10 11:03 ` [PATCH 7/9] mm, memory_hotplug: replace for_device by want_memblock in arch_add_memory Michal Hocko
2017-04-20  8:29   ` Vlastimil Babka
2017-04-10 11:03 ` [PATCH 8/9] mm, memory_hotplug: fix the section mismatch warning Michal Hocko
2017-04-10 11:03 ` Michal Hocko [this message]
2017-04-20  8:38   ` [PATCH 9/9] mm, memory_hotplug: remove unused cruft after memory hotplug rework Vlastimil Babka
2017-04-10 14:27 ` [PATCH -v2 0/9] mm: make movable onlining suck less Igor Mammedov
2017-04-10 14:56   ` Michal Hocko
2017-04-10 15:22     ` Michal Hocko
2017-04-10 15:31       ` Michal Hocko
2017-04-11  8:01     ` Igor Mammedov
2017-04-11  8:41       ` Michal Hocko
2017-04-11  9:53         ` Igor Mammedov
2017-04-11 10:47           ` Michal Hocko
2017-04-10 16:02   ` Michal Hocko
2017-04-18  8:23     ` Vlastimil Babka
2017-04-10 16:09   ` Michal Hocko
2017-04-11  6:38     ` Igor Mammedov
2017-04-11  9:23       ` Michal Hocko
2017-04-11  9:59         ` Igor Mammedov
2017-04-11 11:01           ` Michal Hocko
2017-04-11 11:38             ` Michal Hocko
2017-04-11 12:38               ` Michal Hocko
2017-04-10 15:43 ` Reza Arbab
2017-04-11  8:59   ` Michal Hocko
2017-04-10 16:35 ` Jerome Glisse
2017-04-10 17:53   ` Michal Hocko
2017-04-11  2:51   ` Balbir Singh
2017-04-11 17:03 ` Michal Hocko
2017-04-17 21:51   ` Dan Williams
2017-04-18  7:14     ` Michal Hocko
2017-04-18 16:42       ` Dan Williams
2017-04-18 19:54         ` Michal Hocko
2017-04-20  3:37           ` Dan Williams
2017-04-15 12:17 ` Michal Hocko
2017-04-15 12:17   ` [PATCH 1/3] mm: consider zone which is not fully populated to have holes Michal Hocko
2017-04-18  8:45     ` Vlastimil Babka
2017-04-18  9:27       ` Michal Hocko
2017-04-19 11:59         ` Vlastimil Babka
2017-04-19 12:16           ` Michal Hocko
2017-04-19 12:34             ` Vlastimil Babka
2017-04-19 12:50               ` Michal Hocko
2017-04-15 12:17   ` [PATCH 2/3] mm, compaction: skip over holes in __reset_isolation_suitable Michal Hocko
2017-04-15 12:17   ` [PATCH 3/3] mm: __first_valid_page skip over offline pages Michal Hocko
2017-04-17  5:47   ` your mail Joonsoo Kim
2017-04-17  8:15     ` Michal Hocko
2017-04-20  1:27       ` Joonsoo Kim
2017-04-20  7:28         ` Michal Hocko
2017-04-20  8:49           ` Michal Hocko
2017-04-20 11:56             ` Vlastimil Babka
2017-04-20 12:13               ` Michal Hocko
2017-04-21  2:46             ` [lkp-robot] 73821bb516: WARNING:at_mm/memblock.c:#memblock_virt_alloc_internal kernel test robot
2017-04-21  8:05               ` Michal Hocko
2017-04-21  4:38           ` your mail Joonsoo Kim
2017-04-21  7:16             ` Michal Hocko
2017-04-24  1:44               ` Joonsoo Kim
2017-04-24  7:53                 ` Michal Hocko
2017-04-25  2:50                   ` Joonsoo Kim
2017-04-26  9:19                     ` Michal Hocko
2017-04-27  2:08                       ` Joonsoo Kim
2017-04-27 15:10                         ` Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170410110351.12215-10-mhocko@kernel.org \
    --to=mhocko@kernel.org \
    --cc=aarcange@redhat.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=arbab@linux.vnet.ibm.com \
    --cc=daniel.kiper@oracle.com \
    --cc=imammedo@redhat.com \
    --cc=jglisse@redhat.com \
    --cc=js1304@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=qiuxishi@huawei.com \
    --cc=rientjes@google.com \
    --cc=slaoub@gmail.com \
    --cc=toshi.kani@hpe.com \
    --cc=vbabka@suse.cz \
    --cc=vkuznets@redhat.com \
    --cc=yasu.isimatu@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).