From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752790Ab2BTRXo (ORCPT ); Mon, 20 Feb 2012 12:23:44 -0500 Received: from mail-bk0-f46.google.com ([209.85.214.46]:43752 "EHLO mail-bk0-f46.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752544Ab2BTRXm (ORCPT ); Mon, 20 Feb 2012 12:23:42 -0500 Authentication-Results: mr.google.com; spf=pass (google.com: domain of koct9i@gmail.com designates 10.204.129.71 as permitted sender) smtp.mail=koct9i@gmail.com; dkim=pass header.i=koct9i@gmail.com Subject: [PATCH v2 17/22] mm: handle lruvec relocks on lumpy reclaim To: linux-mm@kvack.org, Andrew Morton , linux-kernel@vger.kernel.org From: Konstantin Khlebnikov Cc: Hugh Dickins , KAMEZAWA Hiroyuki Date: Mon, 20 Feb 2012 21:23:39 +0400 Message-ID: <20120220172339.22196.87364.stgit@zurg> In-Reply-To: <20120220171138.22196.65847.stgit@zurg> References: <20120220171138.22196.65847.stgit@zurg> User-Agent: StGit/0.15 MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Prepare for lock splitting in lumly reclaim logic. Now move_active_pages_to_lru() and putback_inactive_pages() can put pages into different lruvecs. * relock book before SetPageLRU() * update reclaim_stat pointer after relocks * return currently locked lruvec Signed-off-by: Konstantin Khlebnikov --- mm/vmscan.c | 48 ++++++++++++++++++++++++++++++++++-------------- 1 files changed, 34 insertions(+), 14 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 4dba1df..39b4525 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1120,6 +1120,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, unsigned long *nr_scanned, struct scan_control *sc, isolate_mode_t mode, int active, int file) { + struct lruvec *cursor_lruvec = lruvec; struct list_head *src; unsigned long nr_taken = 0; unsigned long nr_lumpy_taken = 0; @@ -1203,14 +1204,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, !PageSwapCache(cursor_page)) break; + /* Switch cursor_lruvec lock for lumpy isolate */ + if (!catch_page_lruvec(&cursor_lruvec, cursor_page)) + continue; + if (__isolate_lru_page(cursor_page, mode, file) == 0) { unsigned int isolated_pages; - struct lruvec *cursor_lruvec; int cursor_lru = page_lru(cursor_page); list_move(&cursor_page->lru, dst); isolated_pages = hpage_nr_pages(cursor_page); - cursor_lruvec = page_lruvec(cursor_page); cursor_lruvec->pages_count[cursor_lru] -= isolated_pages; VM_BUG_ON((long)cursor_lruvec-> @@ -1241,6 +1244,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, } } + /* Restore original lruvec lock */ + cursor_lruvec = __relock_page_lruvec(cursor_lruvec, page); + /* If we break out of the loop above, lumpy reclaim failed */ if (pfn < end_pfn) nr_lumpy_failed++; @@ -1331,7 +1337,10 @@ static int too_many_isolated(struct zone *zone, int file, return isolated > inactive; } -static noinline_for_stack void +/* + * Returns currently locked lruvec + */ +static noinline_for_stack struct lruvec * putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) { @@ -1353,11 +1362,14 @@ putback_inactive_pages(struct lruvec *lruvec, lock_lruvec_irq(lruvec); continue; } + + /* can differ only on lumpy reclaim */ + lruvec = __relock_page_lruvec(lruvec, page); + reclaim_stat = &lruvec->reclaim_stat; + SetPageLRU(page); lru = page_lru(page); - /* can differ only on lumpy reclaim */ - lruvec = page_lruvec(page); add_page_to_lru_list(lruvec, page, lru); if (is_active_lru(lru)) { int file = is_file_lru(lru); @@ -1382,6 +1394,8 @@ putback_inactive_pages(struct lruvec *lruvec, * To save our caller's stack, now use input list for pages to free. */ list_splice(&pages_to_free, page_list); + + return lruvec; } static noinline_for_stack void @@ -1551,7 +1565,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, __count_vm_events(KSWAPD_STEAL, nr_reclaimed); __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); - putback_inactive_pages(lruvec, &page_list); + lruvec = putback_inactive_pages(lruvec, &page_list); __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); @@ -1610,12 +1624,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, * * The downside is that we have to touch page->_count against each page. * But we had to alter page->flags anyway. + * + * Returns currently locked lruvec */ -static void move_active_pages_to_lru(struct lruvec *lruvec, - struct list_head *list, - struct list_head *pages_to_free, - enum lru_list lru) +static struct lruvec * +move_active_pages_to_lru(struct lruvec *lruvec, + struct list_head *list, + struct list_head *pages_to_free, + enum lru_list lru) { unsigned long pgmoved = 0; struct page *page; @@ -1637,11 +1654,12 @@ static void move_active_pages_to_lru(struct lruvec *lruvec, page = lru_to_page(list); + /* can differ only on lumpy reclaim */ + lruvec = __relock_page_lruvec(lruvec, page); + VM_BUG_ON(PageLRU(page)); SetPageLRU(page); - /* can differ only on lumpy reclaim */ - lruvec = page_lruvec(page); list_move(&page->lru, &lruvec->pages_lru[lru]); numpages = hpage_nr_pages(page); lruvec->pages_count[lru] += numpages; @@ -1663,6 +1681,8 @@ static void move_active_pages_to_lru(struct lruvec *lruvec, __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, pgmoved); if (!is_active_lru(lru)) __count_vm_events(PGDEACTIVATE, pgmoved); + + return lruvec; } static void shrink_active_list(unsigned long nr_to_scan, @@ -1751,9 +1771,9 @@ static void shrink_active_list(unsigned long nr_to_scan, */ reclaim_stat->recent_rotated[file] += nr_rotated; - move_active_pages_to_lru(lruvec, &l_active, &l_hold, + lruvec = move_active_pages_to_lru(lruvec, &l_active, &l_hold, LRU_ACTIVE + file * LRU_FILE); - move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, + lruvec = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, LRU_BASE + file * LRU_FILE); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); unlock_lruvec_irq(lruvec);