From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753658Ab2HXEZD (ORCPT ); Fri, 24 Aug 2012 00:25:03 -0400 Received: from LGEMRELSE1Q.lge.com ([156.147.1.111]:42415 "EHLO LGEMRELSE1Q.lge.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752497Ab2HXEZA (ORCPT ); Fri, 24 Aug 2012 00:25:00 -0400 X-AuditID: 9c93016f-b7cc0ae000000e9f-b6-5037021af2a2 From: Minchan Kim To: Andrew Morton Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, Minchan Kim , Marek Szyprowski , Michal Nazarewicz , Rik van Riel , Mel Gorman Subject: [RFC] mm: support MIGRATE_DISCARD Date: Fri, 24 Aug 2012 13:25:30 +0900 Message-Id: <1345782330-23234-1-git-send-email-minchan@kernel.org> X-Mailer: git-send-email 1.7.9.5 X-Brightmail-Tracker: AAAAAA== Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This patch introudes MIGRATE_DISCARD mode in migration. It drops *unmapped clean cache pages* instead of migration so that migration latency could be reduced by avoiding (memcpy + page remapping). It's useful for CMA because latency of migration is very important rather than eviction of background processes's workingset. In addition, it needs less free pages for migration targets so it could avoid memory reclaiming to get free pages, which is another factor increase latency. Cc: Marek Szyprowski Cc: Michal Nazarewicz Cc: Rik van Riel Cc: Mel Gorman Signed-off-by: Minchan Kim --- include/linux/migrate_mode.h | 11 ++++++--- mm/migrate.c | 56 ++++++++++++++++++++++++++++++++++-------- mm/page_alloc.c | 2 +- 3 files changed, 55 insertions(+), 14 deletions(-) diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h index ebf3d89..8e44e30 100644 --- a/include/linux/migrate_mode.h +++ b/include/linux/migrate_mode.h @@ -6,11 +6,16 @@ * on most operations but not ->writepage as the potential stall time * is too significant * MIGRATE_SYNC will block when migrating pages + * MIGRTATE_DISCARD will discard clean cache page instead of migration + * + * MIGRATE_ASYNC, MIGRATE_SYNC_LIGHT, MIGRATE_SYNC shouldn't be used + * together with OR flag. */ enum migrate_mode { - MIGRATE_ASYNC, - MIGRATE_SYNC_LIGHT, - MIGRATE_SYNC, + MIGRATE_ASYNC = 1 << 0, + MIGRATE_SYNC_LIGHT = 1 << 1, + MIGRATE_SYNC = 1 << 2, + MIGRATE_DISCARD = 1 << 3, }; #endif /* MIGRATE_MODE_H_INCLUDED */ diff --git a/mm/migrate.c b/mm/migrate.c index 77ed2d7..90be7a9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -225,7 +225,7 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, struct buffer_head *bh = head; /* Simple case, sync compaction */ - if (mode != MIGRATE_ASYNC) { + if (!(mode & MIGRATE_ASYNC)) { do { get_bh(bh); lock_buffer(bh); @@ -313,7 +313,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, * the mapping back due to an elevated page count, we would have to * block waiting on other references to be dropped. */ - if (mode == MIGRATE_ASYNC && head && + if (mode & MIGRATE_ASYNC && head && !buffer_migrate_lock_buffers(head, mode)) { page_unfreeze_refs(page, expected_count); spin_unlock_irq(&mapping->tree_lock); @@ -521,7 +521,7 @@ int buffer_migrate_page(struct address_space *mapping, * with an IRQ-safe spinlock held. In the sync case, the buffers * need to be locked now */ - if (mode != MIGRATE_ASYNC) + if (!(mode & MIGRATE_ASYNC)) BUG_ON(!buffer_migrate_lock_buffers(head, mode)); ClearPagePrivate(page); @@ -603,7 +603,7 @@ static int fallback_migrate_page(struct address_space *mapping, { if (PageDirty(page)) { /* Only writeback pages in full synchronous migration */ - if (mode != MIGRATE_SYNC) + if (!(mode & MIGRATE_SYNC)) return -EBUSY; return writeout(mapping, page); } @@ -678,6 +678,19 @@ static int move_to_new_page(struct page *newpage, struct page *page, return rc; } +static int discard_page(struct page *page) +{ + int ret = -EAGAIN; + + struct address_space *mapping = page_mapping(page); + if (page_has_private(page)) + if (!try_to_release_page(page, GFP_KERNEL)) + return ret; + if (remove_mapping(mapping, page)) + ret = 0; + return ret; +} + static int __unmap_and_move(struct page *page, struct page *newpage, int force, bool offlining, enum migrate_mode mode) { @@ -685,9 +698,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage, int remap_swapcache = 1; struct mem_cgroup *mem; struct anon_vma *anon_vma = NULL; + enum ttu_flags ttu_flags; + bool discard_mode = false; + bool file = false; if (!trylock_page(page)) { - if (!force || mode == MIGRATE_ASYNC) + if (!force || mode & MIGRATE_ASYNC) goto out; /* @@ -733,7 +749,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, * the retry loop is too short and in the sync-light case, * the overhead of stalling is too much */ - if (mode != MIGRATE_SYNC) { + if (!(mode & MIGRATE_SYNC)) { rc = -EBUSY; goto uncharge; } @@ -799,12 +815,32 @@ static int __unmap_and_move(struct page *page, struct page *newpage, goto skip_unmap; } + file = page_is_file_cache(page); + ttu_flags = TTU_IGNORE_ACCESS; +retry: + if (!(mode & MIGRATE_DISCARD) || !file || PageDirty(page)) + ttu_flags |= (TTU_MIGRATION | TTU_IGNORE_MLOCK); + else + discard_mode = true; + /* Establish migration ptes or remove ptes */ - try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); + rc = try_to_unmap(page, ttu_flags); skip_unmap: - if (!page_mapped(page)) - rc = move_to_new_page(newpage, page, remap_swapcache, mode); + if (rc == SWAP_SUCCESS) { + if (!discard_mode) + rc = move_to_new_page(newpage, page, + remap_swapcache, mode); + else { + + rc = discard_page(page); + goto uncharge; + } + } else if (rc == SWAP_MLOCK && discard_mode) { + mode &= ~MIGRATE_DISCARD; + discard_mode = false; + goto retry; + } if (rc && remap_swapcache) remove_migration_ptes(page, page); @@ -907,7 +943,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, rc = -EAGAIN; if (!trylock_page(hpage)) { - if (!force || mode != MIGRATE_SYNC) + if (!force || !(mode & MIGRATE_SYNC)) goto out; lock_page(hpage); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ba3100a..e14b960 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5670,7 +5670,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) ret = migrate_pages(&cc.migratepages, __alloc_contig_migrate_alloc, - 0, false, MIGRATE_SYNC); + 0, false, MIGRATE_SYNC|MIGRATE_DISCARD); } putback_lru_pages(&cc.migratepages); -- 1.7.9.5