From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-qt1-f199.google.com (mail-qt1-f199.google.com [209.85.160.199]) by kanga.kvack.org (Postfix) with ESMTP id 9E2D86B0003 for ; Tue, 16 Oct 2018 16:33:14 -0400 (EDT) Received: by mail-qt1-f199.google.com with SMTP id m1-v6so25645825qtb.18 for ; Tue, 16 Oct 2018 13:33:14 -0700 (PDT) Received: from mail-sor-f65.google.com (mail-sor-f65.google.com. [209.85.220.65]) by mx.google.com with SMTPS id 42sor1967268qvg.66.2018.10.16.13.33.12 for (Google Transport Security); Tue, 16 Oct 2018 13:33:12 -0700 (PDT) Subject: Re: [mm PATCH v3 2/6] mm: Drop meminit_pfn_in_nid as it is redundant References: <20181015202456.2171.88406.stgit@localhost.localdomain> <20181015202703.2171.40829.stgit@localhost.localdomain> From: Pavel Tatashin Message-ID: Date: Tue, 16 Oct 2018 16:33:10 -0400 MIME-Version: 1.0 In-Reply-To: <20181015202703.2171.40829.stgit@localhost.localdomain> Content-Type: text/plain; charset=utf-8 Content-Language: en-US Content-Transfer-Encoding: 7bit Sender: owner-linux-mm@kvack.org List-ID: To: Alexander Duyck , linux-mm@kvack.org, akpm@linux-foundation.org Cc: pavel.tatashin@microsoft.com, mhocko@suse.com, dave.jiang@intel.com, linux-kernel@vger.kernel.org, willy@infradead.org, davem@davemloft.net, yi.z.zhang@linux.intel.com, khalid.aziz@oracle.com, rppt@linux.vnet.ibm.com, vbabka@suse.cz, sparclinux@vger.kernel.org, dan.j.williams@intel.com, ldufour@linux.vnet.ibm.com, mgorman@techsingularity.net, mingo@kernel.org, kirill.shutemov@linux.intel.com On 10/15/18 4:27 PM, Alexander Duyck wrote: > As best as I can tell the meminit_pfn_in_nid call is completely redundant. > The deferred memory initialization is already making use of > for_each_free_mem_range which in turn will call into __next_mem_range which > will only return a memory range if it matches the node ID provided assuming > it is not NUMA_NO_NODE. > > I am operating on the assumption that there are no zones or pgdata_t > structures that have a NUMA node of NUMA_NO_NODE associated with them. If > that is the case then __next_mem_range will never return a memory range > that doesn't match the zone's node ID and as such the check is redundant. > > So one piece I would like to verfy on this is if this works for ia64. > Technically it was using a different approach to get the node ID, but it > seems to have the node ID also encoded into the memblock. So I am > assuming this is okay, but would like to get confirmation on that. > > Signed-off-by: Alexander Duyck If I am not mistaken, this code is for systems with memory interleaving. Quick looks shows that x86, powerpc, s390, and sparc have it set. I am not sure about other arches, but at least on SPARC, there are some processors with memory interleaving feature: http://www.fujitsu.com/global/products/computing/servers/unix/sparc-enterprise/technology/performance/memory.html Pavel > --- > mm/page_alloc.c | 50 ++++++++++++++------------------------------------ > 1 file changed, 14 insertions(+), 36 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 4bd858d1c3ba..a766a15fad81 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -1301,36 +1301,22 @@ int __meminit early_pfn_to_nid(unsigned long pfn) > #endif > > #ifdef CONFIG_NODES_SPAN_OTHER_NODES > -static inline bool __meminit __maybe_unused > -meminit_pfn_in_nid(unsigned long pfn, int node, > - struct mminit_pfnnid_cache *state) > +/* Only safe to use early in boot when initialisation is single-threaded */ > +static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) > { > int nid; > > - nid = __early_pfn_to_nid(pfn, state); > + nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); > if (nid >= 0 && nid != node) > return false; > return true; > } > > -/* Only safe to use early in boot when initialisation is single-threaded */ > -static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) > -{ > - return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); > -} > - > #else > - > static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) > { > return true; > } > -static inline bool __meminit __maybe_unused > -meminit_pfn_in_nid(unsigned long pfn, int node, > - struct mminit_pfnnid_cache *state) > -{ > - return true; > -} > #endif > > > @@ -1459,21 +1445,13 @@ static inline void __init pgdat_init_report_one_done(void) > * > * Then, we check if a current large page is valid by only checking the validity > * of the head pfn. > - * > - * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave > - * within a node: a pfn is between start and end of a node, but does not belong > - * to this memory node. > */ > -static inline bool __init > -deferred_pfn_valid(int nid, unsigned long pfn, > - struct mminit_pfnnid_cache *nid_init_state) > +static inline bool __init deferred_pfn_valid(unsigned long pfn) > { > if (!pfn_valid_within(pfn)) > return false; > if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) > return false; > - if (!meminit_pfn_in_nid(pfn, nid, nid_init_state)) > - return false; > return true; > } > > @@ -1481,15 +1459,14 @@ static inline void __init pgdat_init_report_one_done(void) > * Free pages to buddy allocator. Try to free aligned pages in > * pageblock_nr_pages sizes. > */ > -static void __init deferred_free_pages(int nid, int zid, unsigned long pfn, > +static void __init deferred_free_pages(unsigned long pfn, > unsigned long end_pfn) > { > - struct mminit_pfnnid_cache nid_init_state = { }; > unsigned long nr_pgmask = pageblock_nr_pages - 1; > unsigned long nr_free = 0; > > for (; pfn < end_pfn; pfn++) { > - if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) { > + if (!deferred_pfn_valid(pfn)) { > deferred_free_range(pfn - nr_free, nr_free); > nr_free = 0; > } else if (!(pfn & nr_pgmask)) { > @@ -1509,17 +1486,18 @@ static void __init deferred_free_pages(int nid, int zid, unsigned long pfn, > * by performing it only once every pageblock_nr_pages. > * Return number of pages initialized. > */ > -static unsigned long __init deferred_init_pages(int nid, int zid, > +static unsigned long __init deferred_init_pages(struct zone *zone, > unsigned long pfn, > unsigned long end_pfn) > { > - struct mminit_pfnnid_cache nid_init_state = { }; > unsigned long nr_pgmask = pageblock_nr_pages - 1; > + int nid = zone_to_nid(zone); > unsigned long nr_pages = 0; > + int zid = zone_idx(zone); > struct page *page = NULL; > > for (; pfn < end_pfn; pfn++) { > - if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) { > + if (!deferred_pfn_valid(pfn)) { > page = NULL; > continue; > } else if (!page || !(pfn & nr_pgmask)) { > @@ -1582,12 +1560,12 @@ static int __init deferred_init_memmap(void *data) > for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) { > spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa)); > epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); > - nr_pages += deferred_init_pages(nid, zid, spfn, epfn); > + nr_pages += deferred_init_pages(zone, spfn, epfn); > } > for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) { > spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa)); > epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); > - deferred_free_pages(nid, zid, spfn, epfn); > + deferred_free_pages(spfn, epfn); > } > pgdat_resize_unlock(pgdat, &flags); > > @@ -1676,7 +1654,7 @@ static int __init deferred_init_memmap(void *data) > while (spfn < epfn && nr_pages < nr_pages_needed) { > t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION); > first_deferred_pfn = min(t, epfn); > - nr_pages += deferred_init_pages(nid, zid, spfn, > + nr_pages += deferred_init_pages(zone, spfn, > first_deferred_pfn); > spfn = first_deferred_pfn; > } > @@ -1688,7 +1666,7 @@ static int __init deferred_init_memmap(void *data) > for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) { > spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa)); > epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa)); > - deferred_free_pages(nid, zid, spfn, epfn); > + deferred_free_pages(spfn, epfn); > > if (first_deferred_pfn == epfn) > break; >