From: Marek Szyprowski <m.szyprowski@samsung.com>
To: linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org,
linux-media@vger.kernel.org, linux-mm@kvack.org,
linaro-mm-sig@lists.linaro.org
Cc: Michal Nazarewicz <mina86@mina86.com>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Kyungmin Park <kyungmin.park@samsung.com>,
Russell King <linux@arm.linux.org.uk>,
Andrew Morton <akpm@linux-foundation.org>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
Daniel Walker <dwalker@codeaurora.org>,
Mel Gorman <mel@csn.ul.ie>, Arnd Bergmann <arnd@arndb.de>,
Jesse Barker <jesse.barker@linaro.org>,
Jonathan Corbet <corbet@lwn.net>,
Shariq Hasnain <shariq.hasnain@linaro.org>,
Chunsang Jeong <chunsang.jeong@linaro.org>,
Dave Hansen <dave@linux.vnet.ibm.com>,
Benjamin Gaignard <benjamin.gaignard@linaro.org>
Subject: [PATCH 04/15] mm: compaction: introduce isolate_freepages_range()
Date: Thu, 26 Jan 2012 10:00:46 +0100 [thread overview]
Message-ID: <1327568457-27734-5-git-send-email-m.szyprowski@samsung.com> (raw)
In-Reply-To: <1327568457-27734-1-git-send-email-m.szyprowski@samsung.com>
From: Michal Nazarewicz <mina86@mina86.com>
This commit introduces isolate_freepages_range() function which
generalises isolate_freepages_block() so that it can be used on
arbitrary PFN ranges.
isolate_freepages_block() is left with only minor changes.
Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
---
mm/compaction.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 100 insertions(+), 18 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index a42bbdd..63f82be 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -54,24 +54,20 @@ static unsigned long release_freepages(struct list_head *freelist)
return count;
}
-/* Isolate free pages onto a private freelist. Must hold zone->lock */
-static unsigned long isolate_freepages_block(struct zone *zone,
- unsigned long blockpfn,
- struct list_head *freelist)
+/*
+ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+ * pages inside of the pageblock (even though it may still end up isolating
+ * some pages).
+ */
+static unsigned long isolate_freepages_block(unsigned long blockpfn,
+ unsigned long end_pfn,
+ struct list_head *freelist,
+ bool strict)
{
- unsigned long zone_end_pfn, end_pfn;
int nr_scanned = 0, total_isolated = 0;
struct page *cursor;
- /* Get the last PFN we should scan for free pages at */
- zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
- end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
-
- /* Find the first usable PFN in the block to initialse page cursor */
- for (; blockpfn < end_pfn; blockpfn++) {
- if (pfn_valid_within(blockpfn))
- break;
- }
cursor = pfn_to_page(blockpfn);
/* Isolate free pages. This assumes the block is valid */
@@ -79,15 +75,23 @@ static unsigned long isolate_freepages_block(struct zone *zone,
int isolated, i;
struct page *page = cursor;
- if (!pfn_valid_within(blockpfn))
+ if (!pfn_valid_within(blockpfn)) {
+ if (strict)
+ return 0;
continue;
+ }
nr_scanned++;
- if (!PageBuddy(page))
+ if (!PageBuddy(page)) {
+ if (strict)
+ return 0;
continue;
+ }
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
+ if (!isolated && strict)
+ return 0;
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
@@ -105,6 +109,80 @@ static unsigned long isolate_freepages_block(struct zone *zone,
return total_isolated;
}
+/**
+ * isolate_freepages_range() - isolate free pages.
+ * @start_pfn: The first PFN to start isolating.
+ * @end_pfn: The one-past-last PFN.
+ *
+ * Non-free pages, invalid PFNs, or zone boundaries within the
+ * [start_pfn, end_pfn) range are considered errors, cause function to
+ * undo its actions and return zero.
+ *
+ * Otherwise, function returns one-past-the-last PFN of isolated page
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
+ */
+static unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long isolated, pfn, block_end_pfn, flags;
+ struct zone *zone = NULL;
+ LIST_HEAD(freelist);
+ struct page *page;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
+ if (!pfn_valid(pfn))
+ break;
+
+ if (!zone)
+ zone = page_zone(pfn_to_page(pfn));
+ else if (zone != page_zone(pfn_to_page(pfn)))
+ break;
+
+ /*
+ * On subsequent iterations round_down() is actually not
+ * needed, but we keep it that we not to complicate the code.
+ */
+ block_end_pfn = round_down(pfn, pageblock_nr_pages)
+ + pageblock_nr_pages;
+ block_end_pfn = min(block_end_pfn, end_pfn);
+
+ spin_lock_irqsave(&zone->lock, flags);
+ isolated = isolate_freepages_block(pfn, block_end_pfn,
+ &freelist, true);
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ /*
+ * In strict mode, isolate_freepages_block() returns 0 if
+ * there are any holes in the block (ie. invalid PFNs or
+ * non-free pages).
+ */
+ if (!isolated)
+ break;
+
+ /*
+ * If we managed to isolate pages, it is always (1 << n) *
+ * pageblock_nr_pages for some non-negative n. (Max order
+ * page may span two pageblocks).
+ */
+ }
+
+ /* split_free_page does not map the pages */
+ list_for_each_entry(page, &freelist, lru) {
+ arch_alloc_page(page, 0);
+ kernel_map_pages(page, 1, 1);
+ }
+
+ if (pfn < end_pfn) {
+ /* Loop terminated early, cleanup. */
+ release_freepages(&freelist);
+ return 0;
+ }
+
+ /* We don't use freelists for anything. */
+ return pfn;
+}
+
/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
{
@@ -135,7 +213,7 @@ static void isolate_freepages(struct zone *zone,
struct compact_control *cc)
{
struct page *page;
- unsigned long high_pfn, low_pfn, pfn;
+ unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
unsigned long flags;
int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;
@@ -155,6 +233,8 @@ static void isolate_freepages(struct zone *zone,
*/
high_pfn = min(low_pfn, pfn);
+ zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+
/*
* Isolate free pages until enough are available to migrate the
* pages on cc->migratepages. We stop searching if the migrate
@@ -191,7 +271,9 @@ static void isolate_freepages(struct zone *zone,
isolated = 0;
spin_lock_irqsave(&zone->lock, flags);
if (suitable_migration_target(page)) {
- isolated = isolate_freepages_block(zone, pfn, freelist);
+ end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
+ isolated = isolate_freepages_block(pfn, end_pfn,
+ freelist, false);
nr_freepages += isolated;
}
spin_unlock_irqrestore(&zone->lock, flags);
--
1.7.1.569.g6f426
next prev parent reply other threads:[~2012-01-26 9:01 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-01-26 9:00 [PATCHv19 00/15] Contiguous Memory Allocator Marek Szyprowski
2012-01-26 9:00 ` [PATCH 01/15] mm: page_alloc: remove trailing whitespace Marek Szyprowski
2012-01-30 10:59 ` Mel Gorman
2012-01-26 9:00 ` [PATCH 02/15] mm: page_alloc: update migrate type of pages on pcp when isolating Marek Szyprowski
2012-01-30 11:15 ` Mel Gorman
2012-01-30 15:41 ` Michal Nazarewicz
2012-01-30 16:14 ` Mel Gorman
2012-01-31 16:23 ` Marek Szyprowski
2012-02-02 12:47 ` Mel Gorman
2012-02-02 19:53 ` Michal Nazarewicz
2012-02-03 9:31 ` Marek Szyprowski
2012-02-03 11:27 ` Mel Gorman
2012-01-26 9:00 ` [PATCH 03/15] mm: compaction: introduce isolate_migratepages_range() Marek Szyprowski
2012-01-30 11:24 ` Mel Gorman
2012-01-30 12:42 ` Michal Nazarewicz
2012-01-30 13:25 ` Mel Gorman
2012-01-26 9:00 ` Marek Szyprowski [this message]
2012-01-30 11:48 ` [PATCH 04/15] mm: compaction: introduce isolate_freepages_range() Mel Gorman
2012-01-30 11:55 ` Mel Gorman
2012-01-26 9:00 ` [PATCH 05/15] mm: compaction: export some of the functions Marek Szyprowski
2012-01-30 11:57 ` Mel Gorman
2012-01-30 12:33 ` Michal Nazarewicz
2012-01-26 9:00 ` [PATCH 06/15] mm: page_alloc: introduce alloc_contig_range() Marek Szyprowski
2012-01-30 12:11 ` Mel Gorman
2012-01-26 9:00 ` [PATCH 07/15] mm: page_alloc: change fallbacks array handling Marek Szyprowski
2012-01-30 12:12 ` Mel Gorman
2012-01-26 9:00 ` [PATCH 08/15] mm: mmzone: MIGRATE_CMA migration type added Marek Szyprowski
2012-01-30 12:35 ` Mel Gorman
2012-01-30 13:06 ` Michal Nazarewicz
2012-01-30 14:52 ` Mel Gorman
2012-01-26 9:00 ` [PATCH 09/15] mm: page_isolation: MIGRATE_CMA isolation functions added Marek Szyprowski
2012-01-26 9:00 ` [PATCH 10/15] mm: extract reclaim code from __alloc_pages_direct_reclaim() Marek Szyprowski
2012-01-30 12:42 ` Mel Gorman
2012-01-26 9:00 ` [PATCH 11/15] mm: trigger page reclaim in alloc_contig_range() to stabilize watermarks Marek Szyprowski
2012-01-30 13:05 ` Mel Gorman
2012-01-31 17:15 ` Marek Szyprowski
2012-01-26 9:00 ` [PATCH 12/15] drivers: add Contiguous Memory Allocator Marek Szyprowski
2012-01-27 9:44 ` [Linaro-mm-sig] " Ohad Ben-Cohen
2012-01-27 10:53 ` Marek Szyprowski
2012-01-27 14:27 ` Clark, Rob
2012-01-27 14:51 ` Marek Szyprowski
2012-01-27 14:59 ` Ohad Ben-Cohen
2012-01-27 15:17 ` Marek Szyprowski
2012-01-28 18:57 ` Ohad Ben-Cohen
2012-01-30 7:43 ` Marek Szyprowski
2012-01-30 9:16 ` Ohad Ben-Cohen
2012-01-27 14:56 ` Ohad Ben-Cohen
2012-01-26 9:00 ` [PATCH 13/15] X86: integrate CMA with DMA-mapping subsystem Marek Szyprowski
2012-01-26 9:00 ` [PATCH 14/15] ARM: " Marek Szyprowski
2012-01-26 9:00 ` [PATCH 15/15] ARM: Samsung: use CMA for 2 memory banks for s5p-mfc device Marek Szyprowski
2012-01-26 15:31 ` [PATCHv19 00/15] Contiguous Memory Allocator Arnd Bergmann
2012-01-26 15:38 ` Michal Nazarewicz
2012-01-26 15:48 ` Marek Szyprowski
2012-01-28 0:26 ` Andrew Morton
2012-01-29 18:09 ` Rob Clark
2012-01-29 20:32 ` Anca Emanuel
2012-01-29 20:51 ` Arnd Bergmann
2012-01-30 13:25 ` Mel Gorman
2012-01-30 15:43 ` Michal Nazarewicz
[not found] ` <CA+M3ks7h1t6DbPSAhPN6LJ5Dw84hSukfWG16avh2eZL+o4caJg@mail.gmail.com>
2012-02-01 8:47 ` Marek Szyprowski
2012-02-10 18:10 ` Marek Szyprowski
2012-02-03 12:18 [PATCHv20 " Marek Szyprowski
2012-02-03 12:18 ` [PATCH 04/15] mm: compaction: introduce isolate_freepages_range() Marek Szyprowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1327568457-27734-5-git-send-email-m.szyprowski@samsung.com \
--to=m.szyprowski@samsung.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=benjamin.gaignard@linaro.org \
--cc=chunsang.jeong@linaro.org \
--cc=corbet@lwn.net \
--cc=dave@linux.vnet.ibm.com \
--cc=dwalker@codeaurora.org \
--cc=jesse.barker@linaro.org \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=kyungmin.park@samsung.com \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux@arm.linux.org.uk \
--cc=mel@csn.ul.ie \
--cc=mina86@mina86.com \
--cc=shariq.hasnain@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).