linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Alex Shi <alex.shi@linux.alibaba.com>
To: cgroups@vger.kernel.org, akpm@linux-foundation.org,
	mgorman@techsingularity.net, tj@kernel.org, hughd@google.com,
	khlebnikov@yandex-team.ru, daniel.m.jordan@oracle.com,
	yang.shi@linux.alibaba.com, willy@infradead.org,
	hannes@cmpxchg.org, lkp@intel.com
Cc: Alex Shi <alex.shi@linux.alibaba.com>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [PATCH v9 10/20] mm/lru: take PageLRU first in moving page between lru lists
Date: Mon,  2 Mar 2020 19:00:20 +0800	[thread overview]
Message-ID: <1583146830-169516-11-git-send-email-alex.shi@linux.alibaba.com> (raw)
In-Reply-To: <1583146830-169516-1-git-send-email-alex.shi@linux.alibaba.com>

Current move_fn do moving with PageLRU in lru_lock protection. Moving
include a lru isolation and a lru adding. As to the isolation part,
we need take PageLRU before move_fn, that add a extra PageLRU guard
to block other isolations. and set lru bit back after page settled on
lru list.

This makes TestClearPageLRU as isolation's necessary condition in
page moving between lru lists.

Another page moving between lru lists is check_move_unevictable_pages
func, we need to take PageLRu temporarilly same as move_fn.

Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org
---
 mm/swap.c   | 42 ++++++++++++++++++++++++------------------
 mm/vmscan.c |  3 ++-
 2 files changed, 26 insertions(+), 19 deletions(-)

diff --git a/mm/swap.c b/mm/swap.c
index 8e71bdd04a1a..16af7c8369fe 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -187,7 +187,7 @@ int get_kernel_page(unsigned long start, int write, struct page **pages)
 
 static void pagevec_lru_move_fn(struct pagevec *pvec,
 	void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
-	void *arg)
+	void *arg, bool isolation)
 {
 	int i;
 	struct pglist_data *pgdat = NULL;
@@ -198,6 +198,10 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
 		struct page *page = pvec->pages[i];
 		struct pglist_data *pagepgdat = page_pgdat(page);
 
+		if (isolation && !TestClearPageLRU(page))
+			continue;
+
+		/* every page should be isolated from lru */
 		if (pagepgdat != pgdat) {
 			if (pgdat)
 				spin_unlock_irqrestore(&pgdat->lru_lock, flags);
@@ -207,6 +211,9 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
 
 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
 		(*move_fn)(page, lruvec, arg);
+
+		if (isolation)
+			SetPageLRU(page);
 	}
 	if (pgdat)
 		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
@@ -219,7 +226,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
 {
 	int *pgmoved = arg;
 
-	if (PageLRU(page) && !PageUnevictable(page)) {
+	if (!PageUnevictable(page)) {
 		del_page_from_lru_list(page, lruvec, page_lru(page));
 		ClearPageActive(page);
 		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
@@ -235,7 +242,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
 {
 	int pgmoved = 0;
 
-	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
+	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved, true);
 	__count_vm_events(PGROTATED, pgmoved);
 }
 
@@ -272,7 +279,7 @@ void update_page_reclaim_stat(struct lruvec *lruvec, int file, int rotated)
 static void __activate_page(struct page *page, struct lruvec *lruvec,
 			    void *arg)
 {
-	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+	if (!PageActive(page) && !PageUnevictable(page)) {
 		int file = page_is_file_cache(page);
 		int lru = page_lru_base_type(page);
 
@@ -293,7 +300,7 @@ static void activate_page_drain(int cpu)
 	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
 
 	if (pagevec_count(pvec))
-		pagevec_lru_move_fn(pvec, __activate_page, NULL);
+		pagevec_lru_move_fn(pvec, __activate_page, NULL, true);
 }
 
 static bool need_activate_page_drain(int cpu)
@@ -309,7 +316,7 @@ void activate_page(struct page *page)
 
 		get_page(page);
 		if (!pagevec_add(pvec, page) || PageCompound(page))
-			pagevec_lru_move_fn(pvec, __activate_page, NULL);
+			pagevec_lru_move_fn(pvec, __activate_page, NULL, true);
 		put_cpu_var(activate_page_pvecs);
 	}
 }
@@ -501,9 +508,6 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 	int lru, file;
 	bool active;
 
-	if (!PageLRU(page))
-		return;
-
 	if (PageUnevictable(page))
 		return;
 
@@ -544,7 +548,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 			    void *arg)
 {
-	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+	if (PageActive(page) && !PageUnevictable(page)) {
 		int file = page_is_file_cache(page);
 		int lru = page_lru_base_type(page);
 
@@ -561,7 +565,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
 			    void *arg)
 {
-	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+	if (PageAnon(page) && PageSwapBacked(page) &&
 	    !PageSwapCache(page) && !PageUnevictable(page)) {
 		bool active = PageActive(page);
 
@@ -607,15 +611,15 @@ void lru_add_drain_cpu(int cpu)
 
 	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
 	if (pagevec_count(pvec))
-		pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+		pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL, true);
 
 	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
 	if (pagevec_count(pvec))
-		pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+		pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL, true);
 
 	pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
 	if (pagevec_count(pvec))
-		pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
+		pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL, true);
 
 	activate_page_drain(cpu);
 }
@@ -641,7 +645,8 @@ void deactivate_file_page(struct page *page)
 		struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
 
 		if (!pagevec_add(pvec, page) || PageCompound(page))
-			pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+			pagevec_lru_move_fn(pvec,
+					lru_deactivate_file_fn, NULL, true);
 		put_cpu_var(lru_deactivate_file_pvecs);
 	}
 }
@@ -661,7 +666,8 @@ void deactivate_page(struct page *page)
 
 		get_page(page);
 		if (!pagevec_add(pvec, page) || PageCompound(page))
-			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+			pagevec_lru_move_fn(pvec,
+					lru_deactivate_fn, NULL, true);
 		put_cpu_var(lru_deactivate_pvecs);
 	}
 }
@@ -681,7 +687,7 @@ void mark_page_lazyfree(struct page *page)
 
 		get_page(page);
 		if (!pagevec_add(pvec, page) || PageCompound(page))
-			pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
+			pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL, true);
 		put_cpu_var(lru_lazyfree_pvecs);
 	}
 }
@@ -941,7 +947,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
  */
 void __pagevec_lru_add(struct pagevec *pvec)
 {
-	pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
+	pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL, false);
 }
 EXPORT_SYMBOL(__pagevec_lru_add);
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bc2ec3fe4f48..efaa4f41044e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4343,7 +4343,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
 		}
 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
 
-		if (!PageLRU(page) || !PageUnevictable(page))
+		if (!TestClearPageLRU(page) || !PageUnevictable(page))
 			continue;
 
 		if (page_evictable(page)) {
@@ -4354,6 +4354,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
 			del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
 			add_page_to_lru_list(page, lruvec, lru);
 			pgrescued++;
+			SetPageLRU(page);
 		}
 	}
 
-- 
1.8.3.1



  parent reply	other threads:[~2020-03-02 11:01 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1583146830-169516-1-git-send-email-alex.shi@linux.alibaba.com>
2020-03-02 11:00 ` [PATCH v9 01/20] mm/vmscan: remove unnecessary lruvec adding Alex Shi
2020-03-02 11:00 ` [PATCH v9 02/20] mm/memcg: fold lock_page_lru into commit_charge Alex Shi
2020-03-02 11:00 ` [PATCH v9 03/20] mm/page_idle: no unlikely double check for idle page counting Alex Shi
2020-03-02 11:00 ` [PATCH v9 04/20] mm/thp: move lru_add_page_tail func to huge_memory.c Alex Shi
2020-03-04  7:47   ` Kirill A. Shutemov
2020-03-04  8:13     ` Alex Shi
2020-03-02 11:00 ` [PATCH v9 05/20] mm/thp: clean up lru_add_page_tail Alex Shi
2020-03-02 11:00 ` [PATCH v9 06/20] mm/thp: narrow lru locking Alex Shi
2020-03-04  8:02   ` Kirill A. Shutemov
2020-03-04  8:51     ` Alex Shi
2020-03-02 11:00 ` [PATCH v9 07/20] mm/lru: introduce TestClearPageLRU Alex Shi
2020-03-02 22:11   ` Andrew Morton
2020-03-03  4:11     ` Alex Shi
2020-03-04  0:46       ` Andrew Morton
2020-03-04  7:06         ` Alex Shi
2020-03-04  9:03           ` Rong Chen
2020-03-04  9:37             ` Alex Shi
2020-03-02 11:00 ` [PATCH v9 08/20] mm/lru: add page isolation precondition in __isolate_lru_page Alex Shi
2020-03-02 11:00 ` [PATCH v9 09/20] mm/mlock: ClearPageLRU before get lru lock in munlock page isolation Alex Shi
2020-03-02 11:00 ` Alex Shi [this message]
2020-03-02 11:00 ` [PATCH v9 11/20] mm/memcg: move SetPageLRU out of lru_lock in commit_charge Alex Shi
2020-03-02 11:00 ` [PATCH v9 12/20] mm/mlock: clean up __munlock_isolate_lru_page Alex Shi
2020-03-02 11:00 ` [PATCH v9 13/20] mm/lru: replace pgdat lru_lock with lruvec lock Alex Shi
2020-03-02 11:00 ` [PATCH v9 14/20] mm/lru: introduce the relock_page_lruvec function Alex Shi
2020-03-02 11:00 ` [PATCH v9 15/20] mm/mlock: optimize munlock_pagevec by relocking Alex Shi
2020-03-02 11:00 ` [PATCH v9 16/20] mm/swap: only change the lru_lock iff page's lruvec is different Alex Shi
2020-03-02 11:00 ` [PATCH v9 17/20] mm/pgdat: remove pgdat lru_lock Alex Shi
2020-03-02 11:00 ` [PATCH v9 18/20] mm/lru: revise the comments of lru_lock Alex Shi
2020-03-02 11:00 ` [PATCH v9 19/20] mm/lru: add debug checking for page memcg moving Alex Shi
2020-03-02 11:00 ` [PATCH v9 20/20] mm/memcg: add debug checking in lock_page_memcg Alex Shi
2020-03-04  3:13 ` [PATCH v9 02/20] mm/memcg: fold lock_page_lru into commit_charge Hillf Danton
2020-03-04  7:19   ` Alex Shi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1583146830-169516-11-git-send-email-alex.shi@linux.alibaba.com \
    --to=alex.shi@linux.alibaba.com \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=daniel.m.jordan@oracle.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=khlebnikov@yandex-team.ru \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lkp@intel.com \
    --cc=mgorman@techsingularity.net \
    --cc=tj@kernel.org \
    --cc=willy@infradead.org \
    --cc=yang.shi@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).