All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yu Zhao <yuzhao@google.com>
To: Stephen Rothwell <sfr@rothwell.id.au>, linux-mm@kvack.org
Cc: "Andi Kleen" <ak@linux.intel.com>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Aneesh Kumar" <aneesh.kumar@linux.ibm.com>,
	"Barry Song" <21cnbao@gmail.com>,
	"Catalin Marinas" <catalin.marinas@arm.com>,
	"Dave Hansen" <dave.hansen@linux.intel.com>,
	"Hillf Danton" <hdanton@sina.com>, "Jens Axboe" <axboe@kernel.dk>,
	"Jesse Barnes" <jsbarnes@google.com>,
	"Johannes Weiner" <hannes@cmpxchg.org>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Linus Torvalds" <torvalds@linux-foundation.org>,
	"Matthew Wilcox" <willy@infradead.org>,
	"Mel Gorman" <mgorman@suse.de>,
	"Michael Larabel" <Michael@michaellarabel.com>,
	"Michal Hocko" <mhocko@kernel.org>,
	"Mike Rapoport" <rppt@kernel.org>,
	"Rik van Riel" <riel@surriel.com>,
	"Vlastimil Babka" <vbabka@suse.cz>,
	"Will Deacon" <will@kernel.org>,
	"Ying Huang" <ying.huang@intel.com>,
	linux-arm-kernel@lists.infradead.org, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org, page-reclaim@google.com,
	x86@kernel.org, "Yu Zhao" <yuzhao@google.com>,
	"Barry Song" <baohua@kernel.org>,
	"Brian Geffon" <bgeffon@google.com>,
	"Jan Alexander Steffens" <heftig@archlinux.org>,
	"Oleksandr Natalenko" <oleksandr@natalenko.name>,
	"Steven Barrett" <steven@liquorix.net>,
	"Suleiman Souhlal" <suleiman@google.com>,
	"Daniel Byrne" <djbyrne@mtu.edu>,
	"Donald Carr" <d@chaos-reins.com>,
	"Holger Hoffstätte" <holger@applied-asynchrony.com>,
	"Konstantin Kharlamov" <Hi-Angel@yandex.ru>,
	"Shuang Zhai" <szhai2@cs.rochester.edu>,
	"Sofia Trinh" <sofia.trinh@edi.works>,
	"Vaibhav Jain" <vaibhav@linux.ibm.com>
Subject: [PATCH v10 03/14] mm/vmscan.c: refactor shrink_node()
Date: Wed,  6 Apr 2022 21:15:15 -0600	[thread overview]
Message-ID: <20220407031525.2368067-4-yuzhao@google.com> (raw)
In-Reply-To: <20220407031525.2368067-1-yuzhao@google.com>

This patch refactors shrink_node() to improve readability for the
upcoming changes to mm/vmscan.c.

Signed-off-by: Yu Zhao <yuzhao@google.com>
Reviewed-by: Barry Song <baohua@kernel.org>
Acked-by: Brian Geffon <bgeffon@google.com>
Acked-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
Acked-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Acked-by: Steven Barrett <steven@liquorix.net>
Acked-by: Suleiman Souhlal <suleiman@google.com>
Tested-by: Daniel Byrne <djbyrne@mtu.edu>
Tested-by: Donald Carr <d@chaos-reins.com>
Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
Tested-by: Shuang Zhai <szhai2@cs.rochester.edu>
Tested-by: Sofia Trinh <sofia.trinh@edi.works>
Tested-by: Vaibhav Jain <vaibhav@linux.ibm.com>
---
 mm/vmscan.c | 198 +++++++++++++++++++++++++++-------------------------
 1 file changed, 104 insertions(+), 94 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1678802e03e7..2232cb55af41 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2644,6 +2644,109 @@ enum scan_balance {
 	SCAN_FILE,
 };
 
+static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
+{
+	unsigned long file;
+	struct lruvec *target_lruvec;
+
+	target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
+
+	/*
+	 * Flush the memory cgroup stats, so that we read accurate per-memcg
+	 * lruvec stats for heuristics.
+	 */
+	mem_cgroup_flush_stats();
+
+	/*
+	 * Determine the scan balance between anon and file LRUs.
+	 */
+	spin_lock_irq(&target_lruvec->lru_lock);
+	sc->anon_cost = target_lruvec->anon_cost;
+	sc->file_cost = target_lruvec->file_cost;
+	spin_unlock_irq(&target_lruvec->lru_lock);
+
+	/*
+	 * Target desirable inactive:active list ratios for the anon
+	 * and file LRU lists.
+	 */
+	if (!sc->force_deactivate) {
+		unsigned long refaults;
+
+		refaults = lruvec_page_state(target_lruvec,
+				WORKINGSET_ACTIVATE_ANON);
+		if (refaults != target_lruvec->refaults[0] ||
+			inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
+			sc->may_deactivate |= DEACTIVATE_ANON;
+		else
+			sc->may_deactivate &= ~DEACTIVATE_ANON;
+
+		/*
+		 * When refaults are being observed, it means a new
+		 * workingset is being established. Deactivate to get
+		 * rid of any stale active pages quickly.
+		 */
+		refaults = lruvec_page_state(target_lruvec,
+				WORKINGSET_ACTIVATE_FILE);
+		if (refaults != target_lruvec->refaults[1] ||
+		    inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
+			sc->may_deactivate |= DEACTIVATE_FILE;
+		else
+			sc->may_deactivate &= ~DEACTIVATE_FILE;
+	} else
+		sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
+
+	/*
+	 * If we have plenty of inactive file pages that aren't
+	 * thrashing, try to reclaim those first before touching
+	 * anonymous pages.
+	 */
+	file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
+	if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
+		sc->cache_trim_mode = 1;
+	else
+		sc->cache_trim_mode = 0;
+
+	/*
+	 * Prevent the reclaimer from falling into the cache trap: as
+	 * cache pages start out inactive, every cache fault will tip
+	 * the scan balance towards the file LRU.  And as the file LRU
+	 * shrinks, so does the window for rotation from references.
+	 * This means we have a runaway feedback loop where a tiny
+	 * thrashing file LRU becomes infinitely more attractive than
+	 * anon pages.  Try to detect this based on file LRU size.
+	 */
+	if (!cgroup_reclaim(sc)) {
+		unsigned long total_high_wmark = 0;
+		unsigned long free, anon;
+		int z;
+
+		free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
+		file = node_page_state(pgdat, NR_ACTIVE_FILE) +
+			   node_page_state(pgdat, NR_INACTIVE_FILE);
+
+		for (z = 0; z < MAX_NR_ZONES; z++) {
+			struct zone *zone = &pgdat->node_zones[z];
+
+			if (!managed_zone(zone))
+				continue;
+
+			total_high_wmark += high_wmark_pages(zone);
+		}
+
+		/*
+		 * Consider anon: if that's low too, this isn't a
+		 * runaway file reclaim problem, but rather just
+		 * extreme pressure. Reclaim as per usual then.
+		 */
+		anon = node_page_state(pgdat, NR_INACTIVE_ANON);
+
+		sc->file_is_tiny =
+			file + free <= total_high_wmark &&
+			!(sc->may_deactivate & DEACTIVATE_ANON) &&
+			anon >> sc->priority;
+	}
+}
+
 /*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
@@ -3114,109 +3217,16 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 	unsigned long nr_reclaimed, nr_scanned;
 	struct lruvec *target_lruvec;
 	bool reclaimable = false;
-	unsigned long file;
 
 	target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
 
 again:
-	/*
-	 * Flush the memory cgroup stats, so that we read accurate per-memcg
-	 * lruvec stats for heuristics.
-	 */
-	mem_cgroup_flush_stats();
-
 	memset(&sc->nr, 0, sizeof(sc->nr));
 
 	nr_reclaimed = sc->nr_reclaimed;
 	nr_scanned = sc->nr_scanned;
 
-	/*
-	 * Determine the scan balance between anon and file LRUs.
-	 */
-	spin_lock_irq(&target_lruvec->lru_lock);
-	sc->anon_cost = target_lruvec->anon_cost;
-	sc->file_cost = target_lruvec->file_cost;
-	spin_unlock_irq(&target_lruvec->lru_lock);
-
-	/*
-	 * Target desirable inactive:active list ratios for the anon
-	 * and file LRU lists.
-	 */
-	if (!sc->force_deactivate) {
-		unsigned long refaults;
-
-		refaults = lruvec_page_state(target_lruvec,
-				WORKINGSET_ACTIVATE_ANON);
-		if (refaults != target_lruvec->refaults[0] ||
-			inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
-			sc->may_deactivate |= DEACTIVATE_ANON;
-		else
-			sc->may_deactivate &= ~DEACTIVATE_ANON;
-
-		/*
-		 * When refaults are being observed, it means a new
-		 * workingset is being established. Deactivate to get
-		 * rid of any stale active pages quickly.
-		 */
-		refaults = lruvec_page_state(target_lruvec,
-				WORKINGSET_ACTIVATE_FILE);
-		if (refaults != target_lruvec->refaults[1] ||
-		    inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
-			sc->may_deactivate |= DEACTIVATE_FILE;
-		else
-			sc->may_deactivate &= ~DEACTIVATE_FILE;
-	} else
-		sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
-
-	/*
-	 * If we have plenty of inactive file pages that aren't
-	 * thrashing, try to reclaim those first before touching
-	 * anonymous pages.
-	 */
-	file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
-	if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
-		sc->cache_trim_mode = 1;
-	else
-		sc->cache_trim_mode = 0;
-
-	/*
-	 * Prevent the reclaimer from falling into the cache trap: as
-	 * cache pages start out inactive, every cache fault will tip
-	 * the scan balance towards the file LRU.  And as the file LRU
-	 * shrinks, so does the window for rotation from references.
-	 * This means we have a runaway feedback loop where a tiny
-	 * thrashing file LRU becomes infinitely more attractive than
-	 * anon pages.  Try to detect this based on file LRU size.
-	 */
-	if (!cgroup_reclaim(sc)) {
-		unsigned long total_high_wmark = 0;
-		unsigned long free, anon;
-		int z;
-
-		free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
-		file = node_page_state(pgdat, NR_ACTIVE_FILE) +
-			   node_page_state(pgdat, NR_INACTIVE_FILE);
-
-		for (z = 0; z < MAX_NR_ZONES; z++) {
-			struct zone *zone = &pgdat->node_zones[z];
-			if (!managed_zone(zone))
-				continue;
-
-			total_high_wmark += high_wmark_pages(zone);
-		}
-
-		/*
-		 * Consider anon: if that's low too, this isn't a
-		 * runaway file reclaim problem, but rather just
-		 * extreme pressure. Reclaim as per usual then.
-		 */
-		anon = node_page_state(pgdat, NR_INACTIVE_ANON);
-
-		sc->file_is_tiny =
-			file + free <= total_high_wmark &&
-			!(sc->may_deactivate & DEACTIVATE_ANON) &&
-			anon >> sc->priority;
-	}
+	prepare_scan_count(pgdat, sc);
 
 	shrink_node_memcgs(pgdat, sc);
 
-- 
2.35.1.1094.g7c7d902a7c-goog


WARNING: multiple messages have this Message-ID (diff)
From: Yu Zhao <yuzhao@google.com>
To: Stephen Rothwell <sfr@rothwell.id.au>, linux-mm@kvack.org
Cc: "Andi Kleen" <ak@linux.intel.com>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Aneesh Kumar" <aneesh.kumar@linux.ibm.com>,
	"Barry Song" <21cnbao@gmail.com>,
	"Catalin Marinas" <catalin.marinas@arm.com>,
	"Dave Hansen" <dave.hansen@linux.intel.com>,
	"Hillf Danton" <hdanton@sina.com>, "Jens Axboe" <axboe@kernel.dk>,
	"Jesse Barnes" <jsbarnes@google.com>,
	"Johannes Weiner" <hannes@cmpxchg.org>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Linus Torvalds" <torvalds@linux-foundation.org>,
	"Matthew Wilcox" <willy@infradead.org>,
	"Mel Gorman" <mgorman@suse.de>,
	"Michael Larabel" <Michael@michaellarabel.com>,
	"Michal Hocko" <mhocko@kernel.org>,
	"Mike Rapoport" <rppt@kernel.org>,
	"Rik van Riel" <riel@surriel.com>,
	"Vlastimil Babka" <vbabka@suse.cz>,
	"Will Deacon" <will@kernel.org>,
	"Ying Huang" <ying.huang@intel.com>,
	linux-arm-kernel@lists.infradead.org, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org, page-reclaim@google.com,
	x86@kernel.org, "Yu Zhao" <yuzhao@google.com>,
	"Barry Song" <baohua@kernel.org>,
	"Brian Geffon" <bgeffon@google.com>,
	"Jan Alexander Steffens" <heftig@archlinux.org>,
	"Oleksandr Natalenko" <oleksandr@natalenko.name>,
	"Steven Barrett" <steven@liquorix.net>,
	"Suleiman Souhlal" <suleiman@google.com>,
	"Daniel Byrne" <djbyrne@mtu.edu>,
	"Donald Carr" <d@chaos-reins.com>,
	"Holger Hoffstätte" <holger@applied-asynchrony.com>,
	"Konstantin Kharlamov" <Hi-Angel@yandex.ru>,
	"Shuang Zhai" <szhai2@cs.rochester.edu>,
	"Sofia Trinh" <sofia.trinh@edi.works>,
	"Vaibhav Jain" <vaibhav@linux.ibm.com>
Subject: [PATCH v10 03/14] mm/vmscan.c: refactor shrink_node()
Date: Wed,  6 Apr 2022 21:15:15 -0600	[thread overview]
Message-ID: <20220407031525.2368067-4-yuzhao@google.com> (raw)
In-Reply-To: <20220407031525.2368067-1-yuzhao@google.com>

This patch refactors shrink_node() to improve readability for the
upcoming changes to mm/vmscan.c.

Signed-off-by: Yu Zhao <yuzhao@google.com>
Reviewed-by: Barry Song <baohua@kernel.org>
Acked-by: Brian Geffon <bgeffon@google.com>
Acked-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
Acked-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Acked-by: Steven Barrett <steven@liquorix.net>
Acked-by: Suleiman Souhlal <suleiman@google.com>
Tested-by: Daniel Byrne <djbyrne@mtu.edu>
Tested-by: Donald Carr <d@chaos-reins.com>
Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
Tested-by: Shuang Zhai <szhai2@cs.rochester.edu>
Tested-by: Sofia Trinh <sofia.trinh@edi.works>
Tested-by: Vaibhav Jain <vaibhav@linux.ibm.com>
---
 mm/vmscan.c | 198 +++++++++++++++++++++++++++-------------------------
 1 file changed, 104 insertions(+), 94 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1678802e03e7..2232cb55af41 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2644,6 +2644,109 @@ enum scan_balance {
 	SCAN_FILE,
 };
 
+static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
+{
+	unsigned long file;
+	struct lruvec *target_lruvec;
+
+	target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
+
+	/*
+	 * Flush the memory cgroup stats, so that we read accurate per-memcg
+	 * lruvec stats for heuristics.
+	 */
+	mem_cgroup_flush_stats();
+
+	/*
+	 * Determine the scan balance between anon and file LRUs.
+	 */
+	spin_lock_irq(&target_lruvec->lru_lock);
+	sc->anon_cost = target_lruvec->anon_cost;
+	sc->file_cost = target_lruvec->file_cost;
+	spin_unlock_irq(&target_lruvec->lru_lock);
+
+	/*
+	 * Target desirable inactive:active list ratios for the anon
+	 * and file LRU lists.
+	 */
+	if (!sc->force_deactivate) {
+		unsigned long refaults;
+
+		refaults = lruvec_page_state(target_lruvec,
+				WORKINGSET_ACTIVATE_ANON);
+		if (refaults != target_lruvec->refaults[0] ||
+			inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
+			sc->may_deactivate |= DEACTIVATE_ANON;
+		else
+			sc->may_deactivate &= ~DEACTIVATE_ANON;
+
+		/*
+		 * When refaults are being observed, it means a new
+		 * workingset is being established. Deactivate to get
+		 * rid of any stale active pages quickly.
+		 */
+		refaults = lruvec_page_state(target_lruvec,
+				WORKINGSET_ACTIVATE_FILE);
+		if (refaults != target_lruvec->refaults[1] ||
+		    inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
+			sc->may_deactivate |= DEACTIVATE_FILE;
+		else
+			sc->may_deactivate &= ~DEACTIVATE_FILE;
+	} else
+		sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
+
+	/*
+	 * If we have plenty of inactive file pages that aren't
+	 * thrashing, try to reclaim those first before touching
+	 * anonymous pages.
+	 */
+	file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
+	if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
+		sc->cache_trim_mode = 1;
+	else
+		sc->cache_trim_mode = 0;
+
+	/*
+	 * Prevent the reclaimer from falling into the cache trap: as
+	 * cache pages start out inactive, every cache fault will tip
+	 * the scan balance towards the file LRU.  And as the file LRU
+	 * shrinks, so does the window for rotation from references.
+	 * This means we have a runaway feedback loop where a tiny
+	 * thrashing file LRU becomes infinitely more attractive than
+	 * anon pages.  Try to detect this based on file LRU size.
+	 */
+	if (!cgroup_reclaim(sc)) {
+		unsigned long total_high_wmark = 0;
+		unsigned long free, anon;
+		int z;
+
+		free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
+		file = node_page_state(pgdat, NR_ACTIVE_FILE) +
+			   node_page_state(pgdat, NR_INACTIVE_FILE);
+
+		for (z = 0; z < MAX_NR_ZONES; z++) {
+			struct zone *zone = &pgdat->node_zones[z];
+
+			if (!managed_zone(zone))
+				continue;
+
+			total_high_wmark += high_wmark_pages(zone);
+		}
+
+		/*
+		 * Consider anon: if that's low too, this isn't a
+		 * runaway file reclaim problem, but rather just
+		 * extreme pressure. Reclaim as per usual then.
+		 */
+		anon = node_page_state(pgdat, NR_INACTIVE_ANON);
+
+		sc->file_is_tiny =
+			file + free <= total_high_wmark &&
+			!(sc->may_deactivate & DEACTIVATE_ANON) &&
+			anon >> sc->priority;
+	}
+}
+
 /*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
@@ -3114,109 +3217,16 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 	unsigned long nr_reclaimed, nr_scanned;
 	struct lruvec *target_lruvec;
 	bool reclaimable = false;
-	unsigned long file;
 
 	target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
 
 again:
-	/*
-	 * Flush the memory cgroup stats, so that we read accurate per-memcg
-	 * lruvec stats for heuristics.
-	 */
-	mem_cgroup_flush_stats();
-
 	memset(&sc->nr, 0, sizeof(sc->nr));
 
 	nr_reclaimed = sc->nr_reclaimed;
 	nr_scanned = sc->nr_scanned;
 
-	/*
-	 * Determine the scan balance between anon and file LRUs.
-	 */
-	spin_lock_irq(&target_lruvec->lru_lock);
-	sc->anon_cost = target_lruvec->anon_cost;
-	sc->file_cost = target_lruvec->file_cost;
-	spin_unlock_irq(&target_lruvec->lru_lock);
-
-	/*
-	 * Target desirable inactive:active list ratios for the anon
-	 * and file LRU lists.
-	 */
-	if (!sc->force_deactivate) {
-		unsigned long refaults;
-
-		refaults = lruvec_page_state(target_lruvec,
-				WORKINGSET_ACTIVATE_ANON);
-		if (refaults != target_lruvec->refaults[0] ||
-			inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
-			sc->may_deactivate |= DEACTIVATE_ANON;
-		else
-			sc->may_deactivate &= ~DEACTIVATE_ANON;
-
-		/*
-		 * When refaults are being observed, it means a new
-		 * workingset is being established. Deactivate to get
-		 * rid of any stale active pages quickly.
-		 */
-		refaults = lruvec_page_state(target_lruvec,
-				WORKINGSET_ACTIVATE_FILE);
-		if (refaults != target_lruvec->refaults[1] ||
-		    inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
-			sc->may_deactivate |= DEACTIVATE_FILE;
-		else
-			sc->may_deactivate &= ~DEACTIVATE_FILE;
-	} else
-		sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
-
-	/*
-	 * If we have plenty of inactive file pages that aren't
-	 * thrashing, try to reclaim those first before touching
-	 * anonymous pages.
-	 */
-	file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
-	if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
-		sc->cache_trim_mode = 1;
-	else
-		sc->cache_trim_mode = 0;
-
-	/*
-	 * Prevent the reclaimer from falling into the cache trap: as
-	 * cache pages start out inactive, every cache fault will tip
-	 * the scan balance towards the file LRU.  And as the file LRU
-	 * shrinks, so does the window for rotation from references.
-	 * This means we have a runaway feedback loop where a tiny
-	 * thrashing file LRU becomes infinitely more attractive than
-	 * anon pages.  Try to detect this based on file LRU size.
-	 */
-	if (!cgroup_reclaim(sc)) {
-		unsigned long total_high_wmark = 0;
-		unsigned long free, anon;
-		int z;
-
-		free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
-		file = node_page_state(pgdat, NR_ACTIVE_FILE) +
-			   node_page_state(pgdat, NR_INACTIVE_FILE);
-
-		for (z = 0; z < MAX_NR_ZONES; z++) {
-			struct zone *zone = &pgdat->node_zones[z];
-			if (!managed_zone(zone))
-				continue;
-
-			total_high_wmark += high_wmark_pages(zone);
-		}
-
-		/*
-		 * Consider anon: if that's low too, this isn't a
-		 * runaway file reclaim problem, but rather just
-		 * extreme pressure. Reclaim as per usual then.
-		 */
-		anon = node_page_state(pgdat, NR_INACTIVE_ANON);
-
-		sc->file_is_tiny =
-			file + free <= total_high_wmark &&
-			!(sc->may_deactivate & DEACTIVATE_ANON) &&
-			anon >> sc->priority;
-	}
+	prepare_scan_count(pgdat, sc);
 
 	shrink_node_memcgs(pgdat, sc);
 
-- 
2.35.1.1094.g7c7d902a7c-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-04-07  3:16 UTC|newest]

Thread overview: 198+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-07  3:15 [PATCH v10 00/14] Multi-Gen LRU Framework Yu Zhao
2022-04-07  3:15 ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 01/14] mm: x86, arm64: add arch_has_hw_pte_young() Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 02/14] mm: x86: add CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-07  3:15 ` Yu Zhao [this message]
2022-04-07  3:15   ` [PATCH v10 03/14] mm/vmscan.c: refactor shrink_node() Yu Zhao
2022-04-16  6:48   ` Miaohe Lin
2022-04-16  6:48     ` Miaohe Lin
2022-04-07  3:15 ` [PATCH v10 04/14] Revert "include/linux/mm_inline.h: fold __update_lru_size() into its sole caller" Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-16  6:50   ` Miaohe Lin
2022-04-16  6:50     ` Miaohe Lin
2022-04-07  3:15 ` [PATCH v10 05/14] mm: multi-gen LRU: groundwork Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-12  2:16   ` Andrew Morton
2022-04-12  2:16     ` Andrew Morton
2022-04-12  7:06     ` Peter Zijlstra
2022-04-12  7:06       ` Peter Zijlstra
2022-04-20  0:39       ` Yu Zhao
2022-04-20  0:39         ` Yu Zhao
2022-04-20 20:07         ` Linus Torvalds
2022-04-20 20:07           ` Linus Torvalds
2022-04-26 22:39     ` Yu Zhao
2022-04-26 22:39       ` Yu Zhao
2022-04-26 23:42       ` Andrew Morton
2022-04-26 23:42         ` Andrew Morton
2022-04-27  1:18         ` Yu Zhao
2022-04-27  1:18           ` Yu Zhao
2022-04-27  1:34           ` Andrew Morton
2022-04-27  1:34             ` Andrew Morton
2022-04-07  3:15 ` [PATCH v10 06/14] mm: multi-gen LRU: minimal implementation Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-14  6:03   ` Barry Song
2022-04-14  6:03     ` Barry Song
2022-04-14 20:36     ` Yu Zhao
2022-04-14 20:36       ` Yu Zhao
2022-04-14 21:39       ` Andrew Morton
2022-04-14 21:39         ` Andrew Morton
2022-04-14 22:14         ` Yu Zhao
2022-04-14 22:14           ` Yu Zhao
2022-04-15 10:15         ` Barry Song
2022-04-15 10:15           ` Barry Song
2022-04-15 20:17           ` Yu Zhao
2022-04-15 20:17             ` Yu Zhao
2022-04-15 10:26       ` Barry Song
2022-04-15 10:26         ` Barry Song
2022-04-15 20:18         ` Yu Zhao
2022-04-15 20:18           ` Yu Zhao
2022-04-14 11:47   ` Chen Wandun
2022-04-14 11:47     ` Chen Wandun
2022-04-14 20:53     ` Yu Zhao
2022-04-14 20:53       ` Yu Zhao
2022-04-15  2:23       ` Chen Wandun
2022-04-15  2:23         ` Chen Wandun
2022-04-15  5:25         ` Yu Zhao
2022-04-15  5:25           ` Yu Zhao
2022-04-15  6:31           ` Chen Wandun
2022-04-15  6:31             ` Chen Wandun
2022-04-15  6:44             ` Yu Zhao
2022-04-15  6:44               ` Yu Zhao
2022-04-15  9:27               ` Chen Wandun
2022-04-15  9:27                 ` Chen Wandun
2022-04-18  9:58   ` Barry Song
2022-04-18  9:58     ` Barry Song
2022-04-19  0:53     ` Yu Zhao
2022-04-19  0:53       ` Yu Zhao
2022-04-19  4:25       ` Barry Song
2022-04-19  4:25         ` Barry Song
2022-04-19  4:36         ` Barry Song
2022-04-19  4:36           ` Barry Song
2022-04-19 22:25           ` Yu Zhao
2022-04-19 22:25             ` Yu Zhao
2022-04-19 22:20         ` Yu Zhao
2022-04-19 22:20           ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 07/14] mm: multi-gen LRU: exploit locality in rmap Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-27  4:32   ` Aneesh Kumar K.V
2022-04-27  4:32     ` Aneesh Kumar K.V
2022-04-27  4:38     ` Yu Zhao
2022-04-27  4:38       ` Yu Zhao
2022-04-27  5:31       ` Aneesh Kumar K V
2022-04-27  5:31         ` Aneesh Kumar K V
2022-04-27  6:00         ` Yu Zhao
2022-04-27  6:00           ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 08/14] mm: multi-gen LRU: support page table walks Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-12  2:16   ` Andrew Morton
2022-04-12  2:16     ` Andrew Morton
2022-04-12  7:10     ` Peter Zijlstra
2022-04-12  7:10       ` Peter Zijlstra
2022-04-15  5:30       ` Yu Zhao
2022-04-15  5:30         ` Yu Zhao
2022-04-15  1:14     ` Yu Zhao
2022-04-15  1:14       ` Yu Zhao
2022-04-15  1:56       ` Andrew Morton
2022-04-15  1:56         ` Andrew Morton
2022-04-15  6:25         ` Yu Zhao
2022-04-15  6:25           ` Yu Zhao
2022-04-15 19:15           ` Andrew Morton
2022-04-15 19:15             ` Andrew Morton
2022-04-15 20:11             ` Yu Zhao
2022-04-15 20:11               ` Yu Zhao
2022-04-15 21:32               ` Andrew Morton
2022-04-15 21:32                 ` Andrew Morton
2022-04-15 21:36                 ` Linus Torvalds
2022-04-15 21:36                   ` Linus Torvalds
2022-04-15 22:57                   ` Yu Zhao
2022-04-15 22:57                     ` Yu Zhao
2022-04-15 23:03                     ` Linus Torvalds
2022-04-15 23:03                       ` Linus Torvalds
2022-04-15 23:24                       ` [page-reclaim] " Jesse Barnes
2022-04-15 23:24                         ` Jesse Barnes
2022-04-15 23:31                         ` Matthew Wilcox
2022-04-15 23:31                           ` Matthew Wilcox
2022-04-15 23:37                           ` Jesse Barnes
2022-04-15 23:37                             ` Jesse Barnes
2022-04-15 23:49                       ` Yu Zhao
2022-04-15 23:49                         ` Yu Zhao
2022-04-16 16:32                 ` Justin Forbes
2022-04-16 16:32                   ` Justin Forbes
2022-04-19 22:32                   ` Yu Zhao
2022-04-19 22:32                     ` Yu Zhao
2022-04-29 14:10   ` zhong jiang
2022-04-29 14:10     ` zhong jiang
2022-04-30  8:34     ` Yu Zhao
2022-04-30  8:34       ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 09/14] mm: multi-gen LRU: optimize multiple memcgs Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 10/14] mm: multi-gen LRU: kill switch Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-12  2:16   ` Andrew Morton
2022-04-12  2:16     ` Andrew Morton
2022-04-26 20:57     ` Yu Zhao
2022-04-26 20:57       ` Yu Zhao
2022-04-26 22:22       ` Andrew Morton
2022-04-26 22:22         ` Andrew Morton
2022-04-27  1:11         ` Yu Zhao
2022-04-27  1:11           ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 11/14] mm: multi-gen LRU: thrashing prevention Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 12/14] mm: multi-gen LRU: debugfs interface Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-12  2:16   ` Andrew Morton
2022-04-12  2:16     ` Andrew Morton
2022-04-16  0:03     ` Yu Zhao
2022-04-16  0:03       ` Yu Zhao
2022-04-16  4:20       ` Andrew Morton
2022-04-16  4:20         ` Andrew Morton
2022-04-26  6:59         ` Yu Zhao
2022-04-26  6:59           ` Yu Zhao
2022-04-26 21:30           ` Andrew Morton
2022-04-26 21:30             ` Andrew Morton
2022-04-26 22:15             ` Yu Zhao
2022-04-26 22:15               ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 13/14] mm: multi-gen LRU: admin guide Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-07 12:41   ` Bagas Sanjaya
2022-04-07 12:41     ` Bagas Sanjaya
2022-04-07 12:51     ` Jonathan Corbet
2022-04-07 12:51       ` Jonathan Corbet
2022-04-12  2:16   ` Andrew Morton
2022-04-12  2:16     ` Andrew Morton
2022-04-16  2:22     ` Yu Zhao
2022-04-16  2:22       ` Yu Zhao
2022-04-07  3:15 ` [PATCH v10 14/14] mm: multi-gen LRU: design doc Yu Zhao
2022-04-07  3:15   ` Yu Zhao
2022-04-07 11:39   ` Huang Shijie
2022-04-07 11:39     ` Huang Shijie
2022-04-07 12:41   ` Bagas Sanjaya
2022-04-07 12:41     ` Bagas Sanjaya
2022-04-07 12:52     ` Jonathan Corbet
2022-04-07 12:52       ` Jonathan Corbet
2022-04-08  4:48       ` Bagas Sanjaya
2022-04-08  4:48         ` Bagas Sanjaya
2022-04-12  2:16   ` Andrew Morton
2022-04-12  2:16     ` Andrew Morton
2022-04-26  7:42     ` Yu Zhao
2022-04-26  7:42       ` Yu Zhao
2022-04-07  3:24 ` [PATCH v10 00/14] Multi-Gen LRU Framework Yu Zhao
2022-04-07  3:24   ` Yu Zhao
2022-04-07  8:31   ` Stephen Rothwell
2022-04-07  8:31     ` Stephen Rothwell
2022-04-07  9:08     ` Yu Zhao
2022-04-07  9:08       ` Yu Zhao
2022-04-07  9:41     ` Yu Zhao
2022-04-07  9:41       ` Yu Zhao
2022-04-07 12:13       ` Stephen Rothwell
2022-04-07 12:13         ` Stephen Rothwell
2022-04-08  2:08         ` Yu Zhao
2022-04-08  2:08           ` Yu Zhao
2022-04-12  2:15 ` Andrew Morton
2022-04-12  2:15   ` Andrew Morton
2022-04-14  5:06 ` Andrew Morton
2022-04-14  5:06   ` Andrew Morton
2022-04-20  0:50   ` Yu Zhao
2022-04-20  0:50     ` Yu Zhao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220407031525.2368067-4-yuzhao@google.com \
    --to=yuzhao@google.com \
    --cc=21cnbao@gmail.com \
    --cc=Hi-Angel@yandex.ru \
    --cc=Michael@michaellarabel.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=axboe@kernel.dk \
    --cc=baohua@kernel.org \
    --cc=bgeffon@google.com \
    --cc=catalin.marinas@arm.com \
    --cc=corbet@lwn.net \
    --cc=d@chaos-reins.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=djbyrne@mtu.edu \
    --cc=hannes@cmpxchg.org \
    --cc=hdanton@sina.com \
    --cc=heftig@archlinux.org \
    --cc=holger@applied-asynchrony.com \
    --cc=jsbarnes@google.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@kernel.org \
    --cc=oleksandr@natalenko.name \
    --cc=page-reclaim@google.com \
    --cc=riel@surriel.com \
    --cc=rppt@kernel.org \
    --cc=sfr@rothwell.id.au \
    --cc=sofia.trinh@edi.works \
    --cc=steven@liquorix.net \
    --cc=suleiman@google.com \
    --cc=szhai2@cs.rochester.edu \
    --cc=torvalds@linux-foundation.org \
    --cc=vaibhav@linux.ibm.com \
    --cc=vbabka@suse.cz \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=ying.huang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.