From: Mel Gorman <mgorman@techsingularity.net> To: Andrew Morton <akpm@linux-foundation.org> Cc: Linux Kernel <linux-kernel@vger.kernel.org>, Linux-MM <linux-mm@kvack.org>, Vlastimil Babka <vbabka@suse.cz>, Hillf Danton <hillf.zj@alibaba-inc.com>, Jesper Dangaard Brouer <brouer@redhat.com>, Mel Gorman <mgorman@techsingularity.net> Subject: [PATCH 1/4] mm, page_alloc: Split buffered_rmqueue Date: Tue, 17 Jan 2017 09:29:51 +0000 [thread overview] Message-ID: <20170117092954.15413-2-mgorman@techsingularity.net> (raw) In-Reply-To: <20170117092954.15413-1-mgorman@techsingularity.net> buffered_rmqueue removes a page from a given zone and uses the per-cpu list for order-0. This is fine but a hypothetical caller that wanted multiple order-0 pages has to disable/reenable interrupts multiple times. This patch structures buffere_rmqueue such that it's relatively easy to build a bulk order-0 page allocator. There is no functional change. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> --- mm/page_alloc.c | 126 +++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 78 insertions(+), 48 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d604d2596b7b..0e8404e546f5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2602,73 +2602,103 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) #endif } +/* Remove page from the per-cpu list, caller must protect the list */ +static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, + bool cold, struct per_cpu_pages *pcp, + struct list_head *list) +{ + struct page *page; + + do { + if (list_empty(list)) { + pcp->count += rmqueue_bulk(zone, 0, + pcp->batch, list, + migratetype, cold); + if (unlikely(list_empty(list))) + return NULL; + } + + if (cold) + page = list_last_entry(list, struct page, lru); + else + page = list_first_entry(list, struct page, lru); + + list_del(&page->lru); + pcp->count--; + } while (check_new_pcp(page)); + + return page; +} + +/* Lock and remove page from the per-cpu list */ +static struct page *rmqueue_pcplist(struct zone *preferred_zone, + struct zone *zone, unsigned int order, + gfp_t gfp_flags, int migratetype) +{ + struct per_cpu_pages *pcp; + struct list_head *list; + bool cold = ((gfp_flags & __GFP_COLD) != 0); + struct page *page; + unsigned long flags; + + local_irq_save(flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; + page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); + if (page) { + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + zone_statistics(preferred_zone, zone, gfp_flags); + } + local_irq_restore(flags); + return page; +} + /* * Allocate a page from the given zone. Use pcplists for order-0 allocations. */ static inline -struct page *buffered_rmqueue(struct zone *preferred_zone, +struct page *rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype) { unsigned long flags; struct page *page; - bool cold = ((gfp_flags & __GFP_COLD) != 0); if (likely(order == 0)) { - struct per_cpu_pages *pcp; - struct list_head *list; - - local_irq_save(flags); - do { - pcp = &this_cpu_ptr(zone->pageset)->pcp; - list = &pcp->lists[migratetype]; - if (list_empty(list)) { - pcp->count += rmqueue_bulk(zone, 0, - pcp->batch, list, - migratetype, cold); - if (unlikely(list_empty(list))) - goto failed; - } - - if (cold) - page = list_last_entry(list, struct page, lru); - else - page = list_first_entry(list, struct page, lru); - - list_del(&page->lru); - pcp->count--; + page = rmqueue_pcplist(preferred_zone, zone, order, + gfp_flags, migratetype); + goto out; + } - } while (check_new_pcp(page)); - } else { - /* - * We most definitely don't want callers attempting to - * allocate greater than order-1 page units with __GFP_NOFAIL. - */ - WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - spin_lock_irqsave(&zone->lock, flags); + /* + * We most definitely don't want callers attempting to + * allocate greater than order-1 page units with __GFP_NOFAIL. + */ + WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); + spin_lock_irqsave(&zone->lock, flags); - do { - page = NULL; - if (alloc_flags & ALLOC_HARDER) { - page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); - if (page) - trace_mm_page_alloc_zone_locked(page, order, migratetype); - } - if (!page) - page = __rmqueue(zone, order, migratetype); - } while (page && check_new_pages(page, order)); - spin_unlock(&zone->lock); + do { + page = NULL; + if (alloc_flags & ALLOC_HARDER) { + page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); + if (page) + trace_mm_page_alloc_zone_locked(page, order, migratetype); + } if (!page) - goto failed; - __mod_zone_freepage_state(zone, -(1 << order), - get_pcppage_migratetype(page)); - } + page = __rmqueue(zone, order, migratetype); + } while (page && check_new_pages(page, order)); + spin_unlock(&zone->lock); + if (!page) + goto failed; + __mod_zone_freepage_state(zone, -(1 << order), + get_pcppage_migratetype(page)); __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); local_irq_restore(flags); +out: VM_BUG_ON_PAGE(bad_range(zone, page), page); return page; @@ -2974,7 +3004,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } try_this_zone: - page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order, + page = rmqueue(ac->preferred_zoneref->zone, zone, order, gfp_mask, alloc_flags, ac->migratetype); if (page) { prep_new_page(page, order, gfp_mask, alloc_flags); -- 2.11.0
WARNING: multiple messages have this Message-ID (diff)
From: Mel Gorman <mgorman@techsingularity.net> To: Andrew Morton <akpm@linux-foundation.org> Cc: Linux Kernel <linux-kernel@vger.kernel.org>, Linux-MM <linux-mm@kvack.org>, Vlastimil Babka <vbabka@suse.cz>, Hillf Danton <hillf.zj@alibaba-inc.com>, Jesper Dangaard Brouer <brouer@redhat.com>, Mel Gorman <mgorman@techsingularity.net> Subject: [PATCH 1/4] mm, page_alloc: Split buffered_rmqueue Date: Tue, 17 Jan 2017 09:29:51 +0000 [thread overview] Message-ID: <20170117092954.15413-2-mgorman@techsingularity.net> (raw) In-Reply-To: <20170117092954.15413-1-mgorman@techsingularity.net> buffered_rmqueue removes a page from a given zone and uses the per-cpu list for order-0. This is fine but a hypothetical caller that wanted multiple order-0 pages has to disable/reenable interrupts multiple times. This patch structures buffere_rmqueue such that it's relatively easy to build a bulk order-0 page allocator. There is no functional change. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> --- mm/page_alloc.c | 126 +++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 78 insertions(+), 48 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d604d2596b7b..0e8404e546f5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2602,73 +2602,103 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) #endif } +/* Remove page from the per-cpu list, caller must protect the list */ +static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, + bool cold, struct per_cpu_pages *pcp, + struct list_head *list) +{ + struct page *page; + + do { + if (list_empty(list)) { + pcp->count += rmqueue_bulk(zone, 0, + pcp->batch, list, + migratetype, cold); + if (unlikely(list_empty(list))) + return NULL; + } + + if (cold) + page = list_last_entry(list, struct page, lru); + else + page = list_first_entry(list, struct page, lru); + + list_del(&page->lru); + pcp->count--; + } while (check_new_pcp(page)); + + return page; +} + +/* Lock and remove page from the per-cpu list */ +static struct page *rmqueue_pcplist(struct zone *preferred_zone, + struct zone *zone, unsigned int order, + gfp_t gfp_flags, int migratetype) +{ + struct per_cpu_pages *pcp; + struct list_head *list; + bool cold = ((gfp_flags & __GFP_COLD) != 0); + struct page *page; + unsigned long flags; + + local_irq_save(flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; + page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); + if (page) { + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + zone_statistics(preferred_zone, zone, gfp_flags); + } + local_irq_restore(flags); + return page; +} + /* * Allocate a page from the given zone. Use pcplists for order-0 allocations. */ static inline -struct page *buffered_rmqueue(struct zone *preferred_zone, +struct page *rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype) { unsigned long flags; struct page *page; - bool cold = ((gfp_flags & __GFP_COLD) != 0); if (likely(order == 0)) { - struct per_cpu_pages *pcp; - struct list_head *list; - - local_irq_save(flags); - do { - pcp = &this_cpu_ptr(zone->pageset)->pcp; - list = &pcp->lists[migratetype]; - if (list_empty(list)) { - pcp->count += rmqueue_bulk(zone, 0, - pcp->batch, list, - migratetype, cold); - if (unlikely(list_empty(list))) - goto failed; - } - - if (cold) - page = list_last_entry(list, struct page, lru); - else - page = list_first_entry(list, struct page, lru); - - list_del(&page->lru); - pcp->count--; + page = rmqueue_pcplist(preferred_zone, zone, order, + gfp_flags, migratetype); + goto out; + } - } while (check_new_pcp(page)); - } else { - /* - * We most definitely don't want callers attempting to - * allocate greater than order-1 page units with __GFP_NOFAIL. - */ - WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - spin_lock_irqsave(&zone->lock, flags); + /* + * We most definitely don't want callers attempting to + * allocate greater than order-1 page units with __GFP_NOFAIL. + */ + WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); + spin_lock_irqsave(&zone->lock, flags); - do { - page = NULL; - if (alloc_flags & ALLOC_HARDER) { - page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); - if (page) - trace_mm_page_alloc_zone_locked(page, order, migratetype); - } - if (!page) - page = __rmqueue(zone, order, migratetype); - } while (page && check_new_pages(page, order)); - spin_unlock(&zone->lock); + do { + page = NULL; + if (alloc_flags & ALLOC_HARDER) { + page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); + if (page) + trace_mm_page_alloc_zone_locked(page, order, migratetype); + } if (!page) - goto failed; - __mod_zone_freepage_state(zone, -(1 << order), - get_pcppage_migratetype(page)); - } + page = __rmqueue(zone, order, migratetype); + } while (page && check_new_pages(page, order)); + spin_unlock(&zone->lock); + if (!page) + goto failed; + __mod_zone_freepage_state(zone, -(1 << order), + get_pcppage_migratetype(page)); __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); local_irq_restore(flags); +out: VM_BUG_ON_PAGE(bad_range(zone, page), page); return page; @@ -2974,7 +3004,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } try_this_zone: - page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order, + page = rmqueue(ac->preferred_zoneref->zone, zone, order, gfp_mask, alloc_flags, ac->migratetype); if (page) { prep_new_page(page, order, gfp_mask, alloc_flags); -- 2.11.0 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-01-17 9:31 UTC|newest] Thread overview: 61+ messages / expand[flat|nested] mbox.gz Atom feed top 2017-01-17 9:29 [PATCH 0/4] Use per-cpu allocator for !irq requests and prepare for a bulk allocator v4 Mel Gorman 2017-01-17 9:29 ` Mel Gorman 2017-01-17 9:29 ` Mel Gorman [this message] 2017-01-17 9:29 ` [PATCH 1/4] mm, page_alloc: Split buffered_rmqueue Mel Gorman 2017-01-17 18:07 ` Jesper Dangaard Brouer 2017-01-17 18:07 ` Jesper Dangaard Brouer 2017-01-17 18:17 ` Vlastimil Babka 2017-01-17 20:20 ` Mel Gorman 2017-01-17 20:20 ` Mel Gorman 2017-01-17 21:07 ` Mel Gorman 2017-01-17 21:07 ` Mel Gorman 2017-01-17 21:24 ` Vlastimil Babka 2017-01-17 21:24 ` Vlastimil Babka 2017-01-17 9:29 ` [PATCH 2/4] mm, page_alloc: Split alloc_pages_nodemask Mel Gorman 2017-01-17 9:29 ` Mel Gorman 2017-01-17 9:29 ` [PATCH 3/4] mm, page_alloc: Drain per-cpu pages from workqueue context Mel Gorman 2017-01-17 9:29 ` Mel Gorman 2017-01-20 14:26 ` Vlastimil Babka 2017-01-20 14:26 ` Vlastimil Babka 2017-01-20 15:26 ` Mel Gorman 2017-01-20 15:26 ` Mel Gorman 2017-01-23 16:29 ` Petr Mladek 2017-01-23 16:29 ` Petr Mladek 2017-01-23 16:50 ` Mel Gorman 2017-01-23 16:50 ` Mel Gorman 2017-01-23 17:03 ` Tejun Heo 2017-01-23 17:03 ` Tejun Heo 2017-01-23 20:04 ` Mel Gorman 2017-01-23 20:04 ` Mel Gorman 2017-01-23 20:55 ` Tejun Heo 2017-01-23 20:55 ` Tejun Heo 2017-01-23 23:04 ` Mel Gorman 2017-01-23 23:04 ` Mel Gorman 2017-01-24 16:07 ` Tejun Heo 2017-01-24 16:07 ` Tejun Heo 2017-01-24 23:54 ` Mel Gorman 2017-01-24 23:54 ` Mel Gorman 2017-01-25 2:02 ` Tejun Heo 2017-01-25 2:02 ` Tejun Heo 2017-01-25 8:30 ` Mel Gorman 2017-01-25 8:30 ` Mel Gorman 2017-01-24 11:08 ` Vlastimil Babka 2017-01-24 11:08 ` Vlastimil Babka 2017-01-17 9:29 ` [PATCH 4/4] mm, page_alloc: Only use per-cpu allocator for irq-safe requests Mel Gorman 2017-01-17 9:29 ` Mel Gorman 2017-01-20 15:02 ` Vlastimil Babka 2017-01-20 15:02 ` Vlastimil Babka 2017-01-23 11:17 ` Mel Gorman 2017-01-23 11:17 ` Mel Gorman -- strict thread matches above, loose matches on Subject: below -- 2017-01-23 15:39 [PATCH 0/4] Use per-cpu allocator for !irq requests and prepare for a bulk allocator v5 Mel Gorman 2017-01-23 15:39 ` [PATCH 1/4] mm, page_alloc: Split buffered_rmqueue Mel Gorman 2017-01-23 15:39 ` Mel Gorman 2017-01-24 10:23 ` Vlastimil Babka 2017-01-24 10:23 ` Vlastimil Babka 2017-01-09 16:35 [RFC PATCH 0/4] Fast noirq bulk page allocator v2r7 Mel Gorman 2017-01-09 16:35 ` [PATCH 1/4] mm, page_alloc: Split buffered_rmqueue Mel Gorman 2017-01-09 16:35 ` Mel Gorman 2017-01-11 12:31 ` Jesper Dangaard Brouer 2017-01-11 12:31 ` Jesper Dangaard Brouer 2017-01-12 3:09 ` Hillf Danton 2017-01-12 3:09 ` Hillf Danton 2017-01-04 11:10 [RFC PATCH 0/4] Fast noirq bulk page allocator Mel Gorman 2017-01-04 11:10 ` [PATCH 1/4] mm, page_alloc: Split buffered_rmqueue Mel Gorman 2017-01-04 11:10 ` Mel Gorman
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20170117092954.15413-2-mgorman@techsingularity.net \ --to=mgorman@techsingularity.net \ --cc=akpm@linux-foundation.org \ --cc=brouer@redhat.com \ --cc=hillf.zj@alibaba-inc.com \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=vbabka@suse.cz \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.