From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753586AbcDOJAr (ORCPT ); Fri, 15 Apr 2016 05:00:47 -0400 Received: from outbound-smtp04.blacknight.com ([81.17.249.35]:47925 "EHLO outbound-smtp04.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751916AbcDOJAo (ORCPT ); Fri, 15 Apr 2016 05:00:44 -0400 From: Mel Gorman To: Andrew Morton Cc: Vlastimil Babka , Jesper Dangaard Brouer , Linux-MM , LKML , Mel Gorman Subject: [PATCH 07/28] mm, page_alloc: Avoid unnecessary zone lookups during pageblock operations Date: Fri, 15 Apr 2016 09:58:59 +0100 Message-Id: <1460710760-32601-8-git-send-email-mgorman@techsingularity.net> X-Mailer: git-send-email 2.6.4 In-Reply-To: <1460710760-32601-1-git-send-email-mgorman@techsingularity.net> References: <1460710760-32601-1-git-send-email-mgorman@techsingularity.net> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Pageblocks have an associated bitmap to store migrate types and whether the pageblock should be skipped during compaction. The bitmap may be associated with a memory section or a zone but the zone is looked up unconditionally. The compiler should optimise this away automatically so this is a cosmetic patch only in many cases. Signed-off-by: Mel Gorman --- mm/page_alloc.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ab16560b76e6..d00847bb1612 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6759,23 +6759,23 @@ void *__init alloc_large_system_hash(const char *tablename, } /* Return a pointer to the bitmap storing bits affecting a block of pages */ -static inline unsigned long *get_pageblock_bitmap(struct zone *zone, +static inline unsigned long *get_pageblock_bitmap(struct page *page, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM return __pfn_to_section(pfn)->pageblock_flags; #else - return zone->pageblock_flags; + return page_zone(page)->pageblock_flags; #endif /* CONFIG_SPARSEMEM */ } -static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) +static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM pfn &= (PAGES_PER_SECTION-1); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #else - pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); + pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #endif /* CONFIG_SPARSEMEM */ } @@ -6793,14 +6793,12 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, unsigned long end_bitidx, unsigned long mask) { - struct zone *zone; unsigned long *bitmap; unsigned long bitidx, word_bitidx; unsigned long word; - zone = page_zone(page); - bitmap = get_pageblock_bitmap(zone, pfn); - bitidx = pfn_to_bitidx(zone, pfn); + bitmap = get_pageblock_bitmap(page, pfn); + bitidx = pfn_to_bitidx(page, pfn); word_bitidx = bitidx / BITS_PER_LONG; bitidx &= (BITS_PER_LONG-1); @@ -6822,20 +6820,18 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long end_bitidx, unsigned long mask) { - struct zone *zone; unsigned long *bitmap; unsigned long bitidx, word_bitidx; unsigned long old_word, word; BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); - zone = page_zone(page); - bitmap = get_pageblock_bitmap(zone, pfn); - bitidx = pfn_to_bitidx(zone, pfn); + bitmap = get_pageblock_bitmap(page, pfn); + bitidx = pfn_to_bitidx(page, pfn); word_bitidx = bitidx / BITS_PER_LONG; bitidx &= (BITS_PER_LONG-1); - VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); + VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); bitidx += end_bitidx; mask <<= (BITS_PER_LONG - bitidx - 1); -- 2.6.4 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f69.google.com (mail-wm0-f69.google.com [74.125.82.69]) by kanga.kvack.org (Postfix) with ESMTP id 9F8216B025F for ; Fri, 15 Apr 2016 05:00:43 -0400 (EDT) Received: by mail-wm0-f69.google.com with SMTP id a140so13029425wma.1 for ; Fri, 15 Apr 2016 02:00:43 -0700 (PDT) Received: from outbound-smtp11.blacknight.com (outbound-smtp11.blacknight.com. [46.22.139.16]) by mx.google.com with ESMTPS id lh10si23545768wjc.245.2016.04.15.02.00.42 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 15 Apr 2016 02:00:42 -0700 (PDT) Received: from mail.blacknight.com (pemlinmail06.blacknight.ie [81.17.255.152]) by outbound-smtp11.blacknight.com (Postfix) with ESMTPS id 597501C19DD for ; Fri, 15 Apr 2016 10:00:42 +0100 (IST) From: Mel Gorman Subject: [PATCH 07/28] mm, page_alloc: Avoid unnecessary zone lookups during pageblock operations Date: Fri, 15 Apr 2016 09:58:59 +0100 Message-Id: <1460710760-32601-8-git-send-email-mgorman@techsingularity.net> In-Reply-To: <1460710760-32601-1-git-send-email-mgorman@techsingularity.net> References: <1460710760-32601-1-git-send-email-mgorman@techsingularity.net> Sender: owner-linux-mm@kvack.org List-ID: To: Andrew Morton Cc: Vlastimil Babka , Jesper Dangaard Brouer , Linux-MM , LKML , Mel Gorman Pageblocks have an associated bitmap to store migrate types and whether the pageblock should be skipped during compaction. The bitmap may be associated with a memory section or a zone but the zone is looked up unconditionally. The compiler should optimise this away automatically so this is a cosmetic patch only in many cases. Signed-off-by: Mel Gorman --- mm/page_alloc.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ab16560b76e6..d00847bb1612 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6759,23 +6759,23 @@ void *__init alloc_large_system_hash(const char *tablename, } /* Return a pointer to the bitmap storing bits affecting a block of pages */ -static inline unsigned long *get_pageblock_bitmap(struct zone *zone, +static inline unsigned long *get_pageblock_bitmap(struct page *page, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM return __pfn_to_section(pfn)->pageblock_flags; #else - return zone->pageblock_flags; + return page_zone(page)->pageblock_flags; #endif /* CONFIG_SPARSEMEM */ } -static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) +static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM pfn &= (PAGES_PER_SECTION-1); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #else - pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); + pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #endif /* CONFIG_SPARSEMEM */ } @@ -6793,14 +6793,12 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, unsigned long end_bitidx, unsigned long mask) { - struct zone *zone; unsigned long *bitmap; unsigned long bitidx, word_bitidx; unsigned long word; - zone = page_zone(page); - bitmap = get_pageblock_bitmap(zone, pfn); - bitidx = pfn_to_bitidx(zone, pfn); + bitmap = get_pageblock_bitmap(page, pfn); + bitidx = pfn_to_bitidx(page, pfn); word_bitidx = bitidx / BITS_PER_LONG; bitidx &= (BITS_PER_LONG-1); @@ -6822,20 +6820,18 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long end_bitidx, unsigned long mask) { - struct zone *zone; unsigned long *bitmap; unsigned long bitidx, word_bitidx; unsigned long old_word, word; BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); - zone = page_zone(page); - bitmap = get_pageblock_bitmap(zone, pfn); - bitidx = pfn_to_bitidx(zone, pfn); + bitmap = get_pageblock_bitmap(page, pfn); + bitidx = pfn_to_bitidx(page, pfn); word_bitidx = bitidx / BITS_PER_LONG; bitidx &= (BITS_PER_LONG-1); - VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); + VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); bitidx += end_bitidx; mask <<= (BITS_PER_LONG - bitidx - 1); -- 2.6.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org