From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: ARC-Seal: i=1; a=rsa-sha256; t=1522229189; cv=none; d=google.com; s=arc-20160816; b=ZpL17poXgDVnEfx+jXx91TW+DwtJ55aiX+eiddGBBxmGFE6nUkzFGbBIgpGPdh37WT RTOulKdnYH/elvSwAgs6cmAQt9txGE4rsB0vmc2yTVec92j1fkTaShbc5tU5EVBy/35t Ex52PvUMnl1tLPfM2FiF6N3nkOVgqZOPaoO5aMVRaL4aYkFvD0vKMDCryTAWd9y1pvhw IuDrfquWcG189AvsaU7tTgH5b9QvZZH3yo4lWk36QTJ4PC81mzq0JSn/E8CdJIR646pd SaJEelowXCQPKNrlA7UenJD4BReCawTtCByG2D5cXfUe22JWOWbjm0/kWs1XU5dd+NiD dbug== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=user-agent:in-reply-to:content-disposition:mime-version:references :reply-to:message-id:subject:cc:to:from:date:dkim-signature :arc-authentication-results; bh=iuU9/5odCEJdS8edheEUkQaIQBFOKSlZgXAWalGsq50=; b=HRMJv3wc0CGkCW5VNZbRmu7t6xVJlxiSei0QMwaRb4p3EX+/EdtpGl5kK8o+0biaYw smAIYtFrbsYVHhrohSsh0nyJr1OgaYHMPnNgMfDGbgEjXKp5kwITsUhj65K+APaiMnBW Du0y0/VnJX1KyHKqVcJg1OpaigG8yvGUadvfwR28HOuOG0/UvojN36xd0MVLJc28U52k EGEdG4VyNhZb2sus/JxA+KqWyXbvhqQoSXZnRLVL9j9TMzOZGBde+tUrYJpbM1Xek/qa zUg0cbBo02PREcYZUgeXaeVmsZZk8PcZuC8+eZj28dXTMbkynJE5CeHZoimEptMnTEPN fIWg== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@gmail.com header.s=20161025 header.b=iyClOXxl; spf=pass (google.com: domain of richard.weiyang@gmail.com designates 209.85.220.65 as permitted sender) smtp.mailfrom=richard.weiyang@gmail.com; dmarc=pass (p=NONE sp=QUARANTINE dis=NONE) header.from=gmail.com Authentication-Results: mx.google.com; dkim=pass header.i=@gmail.com header.s=20161025 header.b=iyClOXxl; spf=pass (google.com: domain of richard.weiyang@gmail.com designates 209.85.220.65 as permitted sender) smtp.mailfrom=richard.weiyang@gmail.com; dmarc=pass (p=NONE sp=QUARANTINE dis=NONE) header.from=gmail.com X-Google-Smtp-Source: AIpwx4+l+HgtH23/cvdK0bjTWXuF4BDmBgWMCMZ8lJxuL/7PnXeh3jFkCjb/YyiV4DArk2G6VXkx5g== Date: Wed, 28 Mar 2018 17:26:20 +0800 From: Wei Yang To: Jia He Cc: Andrew Morton , Michal Hocko , Catalin Marinas , Mel Gorman , Will Deacon , Mark Rutland , Ard Biesheuvel , Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" , Pavel Tatashin , Daniel Jordan , AKASHI Takahiro , Gioh Kim , Steven Sistare , Daniel Vacek , Eugeniu Rosca , Vlastimil Babka , linux-kernel@vger.kernel.org, linux-mm@kvack.org, James Morse , Steve Capper , x86@kernel.org, Greg Kroah-Hartman , Kate Stewart , Philippe Ombredanne , Johannes Weiner , Kemi Wang , Petr Tesarik , YASUAKI ISHIMATSU , Andrey Ryabinin , Nikolay Borisov , Jia He Subject: Re: [PATCH v3 2/5] mm: page_alloc: reduce unnecessary binary search in memblock_next_valid_pfn() Message-ID: <20180328092620.GA98648@WeideMacBook-Pro.local> Reply-To: Wei Yang References: <1522033340-6575-1-git-send-email-hejianet@gmail.com> <1522033340-6575-3-git-send-email-hejianet@gmail.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1522033340-6575-3-git-send-email-hejianet@gmail.com> User-Agent: Mutt/1.9.1 (2017-09-22) X-getmail-retrieved-from-mailbox: INBOX X-GMAIL-THRID: =?utf-8?q?1595967686923865256?= X-GMAIL-MSGID: =?utf-8?q?1596172994087335515?= X-Mailing-List: linux-kernel@vger.kernel.org List-ID: On Sun, Mar 25, 2018 at 08:02:16PM -0700, Jia He wrote: >Commit b92df1de5d28 ("mm: page_alloc: skip over regions of invalid pfns >where possible") optimized the loop in memmap_init_zone(). But there is >still some room for improvement. E.g. if pfn and pfn+1 are in the same >memblock region, we can simply pfn++ instead of doing the binary search >in memblock_next_valid_pfn. This patch only works when >CONFIG_HAVE_ARCH_PFN_VALID is enable. > >Signed-off-by: Jia He >--- > include/linux/memblock.h | 2 +- > mm/memblock.c | 73 +++++++++++++++++++++++++++++------------------- > mm/page_alloc.c | 3 +- > 3 files changed, 47 insertions(+), 31 deletions(-) > >diff --git a/include/linux/memblock.h b/include/linux/memblock.h >index efbbe4b..a8fb2ab 100644 >--- a/include/linux/memblock.h >+++ b/include/linux/memblock.h >@@ -204,7 +204,7 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, > #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ > > #ifdef CONFIG_HAVE_ARCH_PFN_VALID >-unsigned long memblock_next_valid_pfn(unsigned long pfn); >+unsigned long memblock_next_valid_pfn(unsigned long pfn, int *idx); > #endif > > /** >diff --git a/mm/memblock.c b/mm/memblock.c >index bea5a9c..06c1a08 100644 >--- a/mm/memblock.c >+++ b/mm/memblock.c >@@ -1102,35 +1102,6 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid, > *out_nid = r->nid; > } > >-#ifdef CONFIG_HAVE_ARCH_PFN_VALID >-unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn) >-{ >- struct memblock_type *type = &memblock.memory; >- unsigned int right = type->cnt; >- unsigned int mid, left = 0; >- phys_addr_t addr = PFN_PHYS(++pfn); >- >- do { >- mid = (right + left) / 2; >- >- if (addr < type->regions[mid].base) >- right = mid; >- else if (addr >= (type->regions[mid].base + >- type->regions[mid].size)) >- left = mid + 1; >- else { >- /* addr is within the region, so pfn is valid */ >- return pfn; >- } >- } while (left < right); >- >- if (right == type->cnt) >- return -1UL; >- else >- return PHYS_PFN(type->regions[right].base); >-} >-#endif /*CONFIG_HAVE_ARCH_PFN_VALID*/ >- > /** > * memblock_set_node - set node ID on memblock regions > * @base: base of area to set node ID for >@@ -1162,6 +1133,50 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, > } > #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ > >+#ifdef CONFIG_HAVE_ARCH_PFN_VALID >+unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, >+ int *last_idx) >+{ >+ struct memblock_type *type = &memblock.memory; >+ unsigned int right = type->cnt; >+ unsigned int mid, left = 0; >+ unsigned long start_pfn, end_pfn; >+ phys_addr_t addr = PFN_PHYS(++pfn); >+ >+ /* fast path, return pfh+1 if next pfn is in the same region */ ^^^ pfn >+ if (*last_idx != -1) { >+ start_pfn = PFN_DOWN(type->regions[*last_idx].base); To me, it should be PFN_UP(). >+ end_pfn = PFN_DOWN(type->regions[*last_idx].base + >+ type->regions[*last_idx].size); >+ >+ if (pfn < end_pfn && pfn > start_pfn) Could be (pfn < end_pfn && pfn >= start_pfn)? pfn == start_pfn is also a valid address. >+ return pfn; >+ } >+ >+ /* slow path, do the binary searching */ >+ do { >+ mid = (right + left) / 2; >+ >+ if (addr < type->regions[mid].base) >+ right = mid; >+ else if (addr >= (type->regions[mid].base + >+ type->regions[mid].size)) >+ left = mid + 1; >+ else { >+ *last_idx = mid; >+ return pfn; >+ } >+ } while (left < right); >+ >+ if (right == type->cnt) >+ return -1UL; >+ >+ *last_idx = right; >+ >+ return PHYS_PFN(type->regions[*last_idx].base); >+} >+#endif /*CONFIG_HAVE_ARCH_PFN_VALID*/ The same comment as Daniel, you are moving the function out of CONFIG_HAVE_MEMBLOCK_NODE_MAP. >+ > static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, > phys_addr_t align, phys_addr_t start, > phys_addr_t end, int nid, ulong flags) >diff --git a/mm/page_alloc.c b/mm/page_alloc.c >index 2a967f7..0bb0274 100644 >--- a/mm/page_alloc.c >+++ b/mm/page_alloc.c >@@ -5459,6 +5459,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, > unsigned long end_pfn = start_pfn + size; > pg_data_t *pgdat = NODE_DATA(nid); > unsigned long pfn; >+ int idx = -1; > unsigned long nr_initialised = 0; > struct page *page; > #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP >@@ -5490,7 +5491,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, > * end_pfn), such that we hit a valid pfn (or end_pfn) > * on our next iteration of the loop. > */ >- pfn = memblock_next_valid_pfn(pfn) - 1; >+ pfn = memblock_next_valid_pfn(pfn, &idx) - 1; > #endif > continue; > } >-- >2.7.4 -- Wei Yang Help you, Help me