From: Mel Gorman <mgorman@techsingularity.net> To: Andrew Morton <akpm@linux-foundation.org> Cc: Vlastimil Babka <vbabka@suse.cz>, Jesper Dangaard Brouer <brouer@redhat.com>, Linux-MM <linux-mm@kvack.org>, LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@techsingularity.net> Subject: [PATCH 27/28] mm, page_alloc: Defer debugging checks of freed pages until a PCP drain Date: Fri, 15 Apr 2016 10:07:54 +0100 [thread overview] Message-ID: <1460711275-1130-15-git-send-email-mgorman@techsingularity.net> (raw) In-Reply-To: <1460711275-1130-1-git-send-email-mgorman@techsingularity.net> Every page free checks a number of page fields for validity. This catches premature frees and corruptions but it is also expensive. This patch weakens the debugging check by checking PCP pages at the time they are drained from the PCP list. This will trigger the bug but the site that freed the corrupt page will be lost. To get the full context, a kernel rebuild with DEBUG_VM is necessary. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> --- mm/page_alloc.c | 244 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 146 insertions(+), 98 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e63afe07c032..b5722790c846 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -939,6 +939,148 @@ static inline int free_pages_check(struct page *page) return 1; } +static int free_tail_pages_check(struct page *head_page, struct page *page) +{ + int ret = 1; + + /* + * We rely page->lru.next never has bit 0 set, unless the page + * is PageTail(). Let's make sure that's true even for poisoned ->lru. + */ + BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); + + if (!IS_ENABLED(CONFIG_DEBUG_VM)) { + ret = 0; + goto out; + } + switch (page - head_page) { + case 1: + /* the first tail page: ->mapping is compound_mapcount() */ + if (unlikely(compound_mapcount(page))) { + bad_page(page, "nonzero compound_mapcount", 0); + goto out; + } + break; + case 2: + /* + * the second tail page: ->mapping is + * page_deferred_list().next -- ignore value. + */ + break; + default: + if (page->mapping != TAIL_MAPPING) { + bad_page(page, "corrupted mapping in tail page", 0); + goto out; + } + break; + } + if (unlikely(!PageTail(page))) { + bad_page(page, "PageTail not set", 0); + goto out; + } + if (unlikely(compound_head(page) != head_page)) { + bad_page(page, "compound_head not consistent", 0); + goto out; + } + ret = 0; +out: + page->mapping = NULL; + clear_compound_head(page); + return ret; +} + +static bool free_pages_prepare(struct page *page, unsigned int order) +{ + int bad = 0; + + VM_BUG_ON_PAGE(PageTail(page), page); + + trace_mm_page_free(page, order); + kmemcheck_free_shadow(page, order); + kasan_free_pages(page, order); + + /* + * Check tail pages before head page information is cleared to + * avoid checking PageCompound for order-0 pages. + */ + if (order) { + bool compound = PageCompound(page); + int i; + + VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); + + for (i = 1; i < (1 << order); i++) { + if (compound) + bad += free_tail_pages_check(page, page + i); + bad += free_pages_check(page + i); + } + } + if (PageAnonHead(page)) + page->mapping = NULL; + bad += free_pages_check(page); + if (bad) + return false; + + reset_page_owner(page, order); + + if (!PageHighMem(page)) { + debug_check_no_locks_freed(page_address(page), + PAGE_SIZE << order); + debug_check_no_obj_freed(page_address(page), + PAGE_SIZE << order); + } + arch_free_page(page, order); + kernel_poison_pages(page, 1 << order, 0); + kernel_map_pages(page, 1 << order, 0); + + return true; +} + +#ifdef CONFIG_DEBUG_VM +static inline bool free_pcp_prepare(struct page *page) +{ + return free_pages_prepare(page, 0); +} + +static inline bool bulkfree_pcp_prepare(struct page *page) +{ + return false; +} +#else +static bool free_pcp_prepare(struct page *page) +{ + VM_BUG_ON_PAGE(PageTail(page), page); + + trace_mm_page_free(page, 0); + kmemcheck_free_shadow(page, 0); + kasan_free_pages(page, 0); + + if (PageAnonHead(page)) + page->mapping = NULL; + + reset_page_owner(page, 0); + + if (!PageHighMem(page)) { + debug_check_no_locks_freed(page_address(page), + PAGE_SIZE); + debug_check_no_obj_freed(page_address(page), + PAGE_SIZE); + } + arch_free_page(page, 0); + kernel_poison_pages(page, 0, 0); + kernel_map_pages(page, 0, 0); + + page_cpupid_reset_last(page); + page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + return true; +} + +static bool bulkfree_pcp_prepare(struct page *page) +{ + return free_pages_check(page); +} +#endif /* CONFIG_DEBUG_VM */ + /* * Frees a number of pages from the PCP lists * Assumes all pages on list are in same zone, and of same order. @@ -999,6 +1141,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, if (unlikely(isolated_pageblocks)) mt = get_pageblock_migratetype(page); + if (bulkfree_pcp_prepare(page)) + continue; + __free_one_page(page, page_to_pfn(page), zone, 0, mt); trace_mm_page_pcpu_drain(page, 0, mt); } while (--count && --batch_free && !list_empty(list)); @@ -1025,56 +1170,6 @@ static void free_one_page(struct zone *zone, spin_unlock(&zone->lock); } -static int free_tail_pages_check(struct page *head_page, struct page *page) -{ - int ret = 1; - - /* - * We rely page->lru.next never has bit 0 set, unless the page - * is PageTail(). Let's make sure that's true even for poisoned ->lru. - */ - BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); - - if (!IS_ENABLED(CONFIG_DEBUG_VM)) { - ret = 0; - goto out; - } - switch (page - head_page) { - case 1: - /* the first tail page: ->mapping is compound_mapcount() */ - if (unlikely(compound_mapcount(page))) { - bad_page(page, "nonzero compound_mapcount", 0); - goto out; - } - break; - case 2: - /* - * the second tail page: ->mapping is - * page_deferred_list().next -- ignore value. - */ - break; - default: - if (page->mapping != TAIL_MAPPING) { - bad_page(page, "corrupted mapping in tail page", 0); - goto out; - } - break; - } - if (unlikely(!PageTail(page))) { - bad_page(page, "PageTail not set", 0); - goto out; - } - if (unlikely(compound_head(page) != head_page)) { - bad_page(page, "compound_head not consistent", 0); - goto out; - } - ret = 0; -out: - page->mapping = NULL; - clear_compound_head(page); - return ret; -} - static void __meminit __init_single_page(struct page *page, unsigned long pfn, unsigned long zone, int nid) { @@ -1148,53 +1243,6 @@ void __meminit reserve_bootmem_region(unsigned long start, unsigned long end) } } -static bool free_pages_prepare(struct page *page, unsigned int order) -{ - int bad = 0; - - VM_BUG_ON_PAGE(PageTail(page), page); - - trace_mm_page_free(page, order); - kmemcheck_free_shadow(page, order); - kasan_free_pages(page, order); - - /* - * Check tail pages before head page information is cleared to - * avoid checking PageCompound for order-0 pages. - */ - if (order) { - bool compound = PageCompound(page); - int i; - - VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); - - for (i = 1; i < (1 << order); i++) { - if (compound) - bad += free_tail_pages_check(page, page + i); - bad += free_pages_check(page + i); - } - } - if (PageAnonHead(page)) - page->mapping = NULL; - bad += free_pages_check(page); - if (bad) - return false; - - reset_page_owner(page, order); - - if (!PageHighMem(page)) { - debug_check_no_locks_freed(page_address(page), - PAGE_SIZE << order); - debug_check_no_obj_freed(page_address(page), - PAGE_SIZE << order); - } - arch_free_page(page, order); - kernel_poison_pages(page, 1 << order, 0); - kernel_map_pages(page, 1 << order, 0); - - return true; -} - static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; @@ -2327,7 +2375,7 @@ void free_hot_cold_page(struct page *page, bool cold) unsigned long pfn = page_to_pfn(page); int migratetype; - if (!free_pages_prepare(page, 0)) + if (!free_pcp_prepare(page)) return; migratetype = get_pfnblock_migratetype(page, pfn); -- 2.6.4
WARNING: multiple messages have this Message-ID (diff)
From: Mel Gorman <mgorman@techsingularity.net> To: Andrew Morton <akpm@linux-foundation.org> Cc: Vlastimil Babka <vbabka@suse.cz>, Jesper Dangaard Brouer <brouer@redhat.com>, Linux-MM <linux-mm@kvack.org>, LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@techsingularity.net> Subject: [PATCH 27/28] mm, page_alloc: Defer debugging checks of freed pages until a PCP drain Date: Fri, 15 Apr 2016 10:07:54 +0100 [thread overview] Message-ID: <1460711275-1130-15-git-send-email-mgorman@techsingularity.net> (raw) In-Reply-To: <1460711275-1130-1-git-send-email-mgorman@techsingularity.net> Every page free checks a number of page fields for validity. This catches premature frees and corruptions but it is also expensive. This patch weakens the debugging check by checking PCP pages at the time they are drained from the PCP list. This will trigger the bug but the site that freed the corrupt page will be lost. To get the full context, a kernel rebuild with DEBUG_VM is necessary. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> --- mm/page_alloc.c | 244 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 146 insertions(+), 98 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e63afe07c032..b5722790c846 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -939,6 +939,148 @@ static inline int free_pages_check(struct page *page) return 1; } +static int free_tail_pages_check(struct page *head_page, struct page *page) +{ + int ret = 1; + + /* + * We rely page->lru.next never has bit 0 set, unless the page + * is PageTail(). Let's make sure that's true even for poisoned ->lru. + */ + BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); + + if (!IS_ENABLED(CONFIG_DEBUG_VM)) { + ret = 0; + goto out; + } + switch (page - head_page) { + case 1: + /* the first tail page: ->mapping is compound_mapcount() */ + if (unlikely(compound_mapcount(page))) { + bad_page(page, "nonzero compound_mapcount", 0); + goto out; + } + break; + case 2: + /* + * the second tail page: ->mapping is + * page_deferred_list().next -- ignore value. + */ + break; + default: + if (page->mapping != TAIL_MAPPING) { + bad_page(page, "corrupted mapping in tail page", 0); + goto out; + } + break; + } + if (unlikely(!PageTail(page))) { + bad_page(page, "PageTail not set", 0); + goto out; + } + if (unlikely(compound_head(page) != head_page)) { + bad_page(page, "compound_head not consistent", 0); + goto out; + } + ret = 0; +out: + page->mapping = NULL; + clear_compound_head(page); + return ret; +} + +static bool free_pages_prepare(struct page *page, unsigned int order) +{ + int bad = 0; + + VM_BUG_ON_PAGE(PageTail(page), page); + + trace_mm_page_free(page, order); + kmemcheck_free_shadow(page, order); + kasan_free_pages(page, order); + + /* + * Check tail pages before head page information is cleared to + * avoid checking PageCompound for order-0 pages. + */ + if (order) { + bool compound = PageCompound(page); + int i; + + VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); + + for (i = 1; i < (1 << order); i++) { + if (compound) + bad += free_tail_pages_check(page, page + i); + bad += free_pages_check(page + i); + } + } + if (PageAnonHead(page)) + page->mapping = NULL; + bad += free_pages_check(page); + if (bad) + return false; + + reset_page_owner(page, order); + + if (!PageHighMem(page)) { + debug_check_no_locks_freed(page_address(page), + PAGE_SIZE << order); + debug_check_no_obj_freed(page_address(page), + PAGE_SIZE << order); + } + arch_free_page(page, order); + kernel_poison_pages(page, 1 << order, 0); + kernel_map_pages(page, 1 << order, 0); + + return true; +} + +#ifdef CONFIG_DEBUG_VM +static inline bool free_pcp_prepare(struct page *page) +{ + return free_pages_prepare(page, 0); +} + +static inline bool bulkfree_pcp_prepare(struct page *page) +{ + return false; +} +#else +static bool free_pcp_prepare(struct page *page) +{ + VM_BUG_ON_PAGE(PageTail(page), page); + + trace_mm_page_free(page, 0); + kmemcheck_free_shadow(page, 0); + kasan_free_pages(page, 0); + + if (PageAnonHead(page)) + page->mapping = NULL; + + reset_page_owner(page, 0); + + if (!PageHighMem(page)) { + debug_check_no_locks_freed(page_address(page), + PAGE_SIZE); + debug_check_no_obj_freed(page_address(page), + PAGE_SIZE); + } + arch_free_page(page, 0); + kernel_poison_pages(page, 0, 0); + kernel_map_pages(page, 0, 0); + + page_cpupid_reset_last(page); + page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + return true; +} + +static bool bulkfree_pcp_prepare(struct page *page) +{ + return free_pages_check(page); +} +#endif /* CONFIG_DEBUG_VM */ + /* * Frees a number of pages from the PCP lists * Assumes all pages on list are in same zone, and of same order. @@ -999,6 +1141,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, if (unlikely(isolated_pageblocks)) mt = get_pageblock_migratetype(page); + if (bulkfree_pcp_prepare(page)) + continue; + __free_one_page(page, page_to_pfn(page), zone, 0, mt); trace_mm_page_pcpu_drain(page, 0, mt); } while (--count && --batch_free && !list_empty(list)); @@ -1025,56 +1170,6 @@ static void free_one_page(struct zone *zone, spin_unlock(&zone->lock); } -static int free_tail_pages_check(struct page *head_page, struct page *page) -{ - int ret = 1; - - /* - * We rely page->lru.next never has bit 0 set, unless the page - * is PageTail(). Let's make sure that's true even for poisoned ->lru. - */ - BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); - - if (!IS_ENABLED(CONFIG_DEBUG_VM)) { - ret = 0; - goto out; - } - switch (page - head_page) { - case 1: - /* the first tail page: ->mapping is compound_mapcount() */ - if (unlikely(compound_mapcount(page))) { - bad_page(page, "nonzero compound_mapcount", 0); - goto out; - } - break; - case 2: - /* - * the second tail page: ->mapping is - * page_deferred_list().next -- ignore value. - */ - break; - default: - if (page->mapping != TAIL_MAPPING) { - bad_page(page, "corrupted mapping in tail page", 0); - goto out; - } - break; - } - if (unlikely(!PageTail(page))) { - bad_page(page, "PageTail not set", 0); - goto out; - } - if (unlikely(compound_head(page) != head_page)) { - bad_page(page, "compound_head not consistent", 0); - goto out; - } - ret = 0; -out: - page->mapping = NULL; - clear_compound_head(page); - return ret; -} - static void __meminit __init_single_page(struct page *page, unsigned long pfn, unsigned long zone, int nid) { @@ -1148,53 +1243,6 @@ void __meminit reserve_bootmem_region(unsigned long start, unsigned long end) } } -static bool free_pages_prepare(struct page *page, unsigned int order) -{ - int bad = 0; - - VM_BUG_ON_PAGE(PageTail(page), page); - - trace_mm_page_free(page, order); - kmemcheck_free_shadow(page, order); - kasan_free_pages(page, order); - - /* - * Check tail pages before head page information is cleared to - * avoid checking PageCompound for order-0 pages. - */ - if (order) { - bool compound = PageCompound(page); - int i; - - VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); - - for (i = 1; i < (1 << order); i++) { - if (compound) - bad += free_tail_pages_check(page, page + i); - bad += free_pages_check(page + i); - } - } - if (PageAnonHead(page)) - page->mapping = NULL; - bad += free_pages_check(page); - if (bad) - return false; - - reset_page_owner(page, order); - - if (!PageHighMem(page)) { - debug_check_no_locks_freed(page_address(page), - PAGE_SIZE << order); - debug_check_no_obj_freed(page_address(page), - PAGE_SIZE << order); - } - arch_free_page(page, order); - kernel_poison_pages(page, 1 << order, 0); - kernel_map_pages(page, 1 << order, 0); - - return true; -} - static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; @@ -2327,7 +2375,7 @@ void free_hot_cold_page(struct page *page, bool cold) unsigned long pfn = page_to_pfn(page); int migratetype; - if (!free_pages_prepare(page, 0)) + if (!free_pcp_prepare(page)) return; migratetype = get_pfnblock_migratetype(page, pfn); -- 2.6.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2016-04-15 9:10 UTC|newest] Thread overview: 160+ messages / expand[flat|nested] mbox.gz Atom feed top 2016-04-15 8:58 [PATCH 00/28] Optimise page alloc/free fast paths v3 Mel Gorman 2016-04-15 8:58 ` Mel Gorman 2016-04-15 8:58 ` [PATCH 01/28] mm, page_alloc: Only check PageCompound for high-order pages Mel Gorman 2016-04-15 8:58 ` Mel Gorman 2016-04-25 9:33 ` Vlastimil Babka 2016-04-25 9:33 ` Vlastimil Babka 2016-04-26 10:33 ` Mel Gorman 2016-04-26 10:33 ` Mel Gorman 2016-04-26 11:20 ` Vlastimil Babka 2016-04-26 11:20 ` Vlastimil Babka 2016-04-15 8:58 ` [PATCH 02/28] mm, page_alloc: Use new PageAnonHead helper in the free page fast path Mel Gorman 2016-04-15 8:58 ` Mel Gorman 2016-04-25 9:56 ` Vlastimil Babka 2016-04-25 9:56 ` Vlastimil Babka 2016-04-15 8:58 ` [PATCH 03/28] mm, page_alloc: Reduce branches in zone_statistics Mel Gorman 2016-04-15 8:58 ` Mel Gorman 2016-04-25 11:15 ` Vlastimil Babka 2016-04-25 11:15 ` Vlastimil Babka 2016-04-15 8:58 ` [PATCH 04/28] mm, page_alloc: Inline zone_statistics Mel Gorman 2016-04-15 8:58 ` Mel Gorman 2016-04-25 11:17 ` Vlastimil Babka 2016-04-25 11:17 ` Vlastimil Babka 2016-04-15 8:58 ` [PATCH 05/28] mm, page_alloc: Inline the fast path of the zonelist iterator Mel Gorman 2016-04-15 8:58 ` Mel Gorman 2016-04-25 14:50 ` Vlastimil Babka 2016-04-25 14:50 ` Vlastimil Babka 2016-04-26 10:30 ` Mel Gorman 2016-04-26 10:30 ` Mel Gorman 2016-04-26 11:05 ` Vlastimil Babka 2016-04-26 11:05 ` Vlastimil Babka 2016-04-15 8:58 ` [PATCH 06/28] mm, page_alloc: Use __dec_zone_state for order-0 page allocation Mel Gorman 2016-04-15 8:58 ` Mel Gorman 2016-04-26 11:25 ` Vlastimil Babka 2016-04-26 11:25 ` Vlastimil Babka 2016-04-15 8:58 ` [PATCH 07/28] mm, page_alloc: Avoid unnecessary zone lookups during pageblock operations Mel Gorman 2016-04-15 8:58 ` Mel Gorman 2016-04-26 11:29 ` Vlastimil Babka 2016-04-26 11:29 ` Vlastimil Babka 2016-04-15 8:59 ` [PATCH 08/28] mm, page_alloc: Convert alloc_flags to unsigned Mel Gorman 2016-04-15 8:59 ` Mel Gorman 2016-04-26 11:31 ` Vlastimil Babka 2016-04-26 11:31 ` Vlastimil Babka 2016-04-15 8:59 ` [PATCH 09/28] mm, page_alloc: Convert nr_fair_skipped to bool Mel Gorman 2016-04-15 8:59 ` Mel Gorman 2016-04-26 11:37 ` Vlastimil Babka 2016-04-26 11:37 ` Vlastimil Babka 2016-04-15 8:59 ` [PATCH 10/28] mm, page_alloc: Remove unnecessary local variable in get_page_from_freelist Mel Gorman 2016-04-15 8:59 ` Mel Gorman 2016-04-26 11:38 ` Vlastimil Babka 2016-04-26 11:38 ` Vlastimil Babka 2016-04-15 8:59 ` [PATCH 11/28] mm, page_alloc: Remove unnecessary initialisation " Mel Gorman 2016-04-15 8:59 ` Mel Gorman 2016-04-26 11:39 ` Vlastimil Babka 2016-04-26 11:39 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 13/28] mm, page_alloc: Remove redundant check for empty zonelist Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-15 9:07 ` [PATCH 14/28] mm, page_alloc: Simplify last cpupid reset Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 13:30 ` Vlastimil Babka 2016-04-26 13:30 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 15/28] mm, page_alloc: Move might_sleep_if check to the allocator slowpath Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 13:41 ` Vlastimil Babka 2016-04-26 13:41 ` Vlastimil Babka 2016-04-26 14:50 ` Mel Gorman 2016-04-26 14:50 ` Mel Gorman 2016-04-26 15:16 ` Vlastimil Babka 2016-04-26 15:16 ` Vlastimil Babka 2016-04-26 16:29 ` Mel Gorman 2016-04-26 16:29 ` Mel Gorman 2016-04-15 9:07 ` [PATCH 16/28] mm, page_alloc: Move __GFP_HARDWALL modifications out of the fastpath Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 14:13 ` Vlastimil Babka 2016-04-26 14:13 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 17/28] mm, page_alloc: Check once if a zone has isolated pageblocks Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 14:27 ` Vlastimil Babka 2016-04-26 14:27 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 18/28] mm, page_alloc: Shorten the page allocator fast path Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 15:23 ` Vlastimil Babka 2016-04-26 15:23 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 19/28] mm, page_alloc: Reduce cost of fair zone allocation policy retry Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 17:24 ` Vlastimil Babka 2016-04-26 17:24 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 20/28] mm, page_alloc: Shortcut watermark checks for order-0 pages Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 17:32 ` Vlastimil Babka 2016-04-26 17:32 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 21/28] mm, page_alloc: Avoid looking up the first zone in a zonelist twice Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 17:46 ` Vlastimil Babka 2016-04-26 17:46 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 22/28] mm, page_alloc: Remove field from alloc_context Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-15 9:07 ` [PATCH 23/28] mm, page_alloc: Check multiple page fields with a single branch Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 18:41 ` Vlastimil Babka 2016-04-26 18:41 ` Vlastimil Babka 2016-04-27 10:07 ` Mel Gorman 2016-04-27 10:07 ` Mel Gorman 2016-04-15 9:07 ` [PATCH 24/28] mm, page_alloc: Remove unnecessary variable from free_pcppages_bulk Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 18:43 ` Vlastimil Babka 2016-04-26 18:43 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 25/28] mm, page_alloc: Inline pageblock lookup in page free fast paths Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 19:10 ` Vlastimil Babka 2016-04-26 19:10 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 26/28] cpuset: use static key better and convert to new API Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-26 19:49 ` Vlastimil Babka 2016-04-26 19:49 ` Vlastimil Babka 2016-04-15 9:07 ` Mel Gorman [this message] 2016-04-15 9:07 ` [PATCH 27/28] mm, page_alloc: Defer debugging checks of freed pages until a PCP drain Mel Gorman 2016-04-27 11:59 ` Vlastimil Babka 2016-04-27 11:59 ` Vlastimil Babka 2016-04-27 12:01 ` [PATCH 1/3] mm, page_alloc: un-inline the bad part of free_pages_check Vlastimil Babka 2016-04-27 12:01 ` Vlastimil Babka 2016-04-27 12:01 ` [PATCH 2/3] mm, page_alloc: pull out side effects from free_pages_check Vlastimil Babka 2016-04-27 12:01 ` Vlastimil Babka 2016-04-27 12:41 ` Mel Gorman 2016-04-27 12:41 ` Mel Gorman 2016-04-27 13:00 ` Vlastimil Babka 2016-04-27 13:00 ` Vlastimil Babka 2016-04-27 12:01 ` [PATCH 3/3] mm, page_alloc: don't duplicate code in free_pcp_prepare Vlastimil Babka 2016-04-27 12:01 ` Vlastimil Babka 2016-04-27 12:37 ` [PATCH 1/3] mm, page_alloc: un-inline the bad part of free_pages_check Mel Gorman 2016-04-27 12:37 ` Mel Gorman 2016-04-27 12:53 ` Vlastimil Babka 2016-04-27 12:53 ` Vlastimil Babka 2016-04-15 9:07 ` [PATCH 28/28] mm, page_alloc: Defer debugging checks of pages allocated from the PCP Mel Gorman 2016-04-15 9:07 ` Mel Gorman 2016-04-27 14:06 ` Vlastimil Babka 2016-04-27 14:06 ` Vlastimil Babka 2016-04-27 15:31 ` Mel Gorman 2016-04-27 15:31 ` Mel Gorman 2016-05-17 6:41 ` Naoya Horiguchi 2016-05-17 6:41 ` Naoya Horiguchi 2016-05-18 7:51 ` Vlastimil Babka 2016-05-18 7:51 ` Vlastimil Babka 2016-05-18 7:55 ` Vlastimil Babka 2016-05-18 7:55 ` Vlastimil Babka 2016-05-18 8:49 ` Mel Gorman 2016-05-18 8:49 ` Mel Gorman 2016-04-26 12:04 ` [PATCH 13/28] mm, page_alloc: Remove redundant check for empty zonelist Vlastimil Babka 2016-04-26 12:04 ` Vlastimil Babka 2016-04-26 13:00 ` Mel Gorman 2016-04-26 13:00 ` Mel Gorman 2016-04-26 19:11 ` Andrew Morton 2016-04-26 19:11 ` Andrew Morton 2016-04-15 12:44 ` [PATCH 00/28] Optimise page alloc/free fast paths v3 Jesper Dangaard Brouer 2016-04-15 12:44 ` Jesper Dangaard Brouer 2016-04-15 13:08 ` Mel Gorman 2016-04-15 13:08 ` Mel Gorman 2016-04-16 7:21 ` [PATCH 12/28] mm, page_alloc: Remove unnecessary initialisation from __alloc_pages_nodemask() Mel Gorman 2016-04-16 7:21 ` Mel Gorman 2016-04-26 11:41 ` Vlastimil Babka 2016-04-26 11:41 ` Vlastimil Babka
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1460711275-1130-15-git-send-email-mgorman@techsingularity.net \ --to=mgorman@techsingularity.net \ --cc=akpm@linux-foundation.org \ --cc=brouer@redhat.com \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=vbabka@suse.cz \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.