linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Pengfei Li <fly@kernel.page>
To: akpm@linux-foundation.org
Cc: mgorman@techsingularity.net, mhocko@kernel.org, vbabka@suse.cz,
	cl@linux.com, iamjoonsoo.kim@lge.com, guro@fb.com,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Pengfei Li <fly@kernel.page>
Subject: [RFC v1 09/19] mm, vmscan: use for_each_node in shrink_zones()
Date: Thu, 21 Nov 2019 23:18:01 +0800	[thread overview]
Message-ID: <20191121151811.49742-10-fly@kernel.page> (raw)
In-Reply-To: <20191121151811.49742-1-fly@kernel.page>

In shrink_zones(), we want to traverse node instead of zone, so
use for_each_node instead of for_each_zone.

Signed-off-by: Pengfei Li <fly@kernel.page>
---
 mm/vmscan.c | 53 ++++++++++++++++++++++++++++++-----------------------
 1 file changed, 30 insertions(+), 23 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index b5256ef682c2..2b0e51525c3a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2910,6 +2910,25 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
 	return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
 }
 
+static bool
+node_compaction_ready(struct nlist_traverser *t, struct scan_control *sc)
+{
+	bool node_ready = true;
+	struct zone *zone;
+
+	do {
+		zone = traverser_zone(t);
+
+		if (compaction_ready(zone, sc))
+			sc->compaction_ready = true;
+		else
+			node_ready = false;
+
+	} while (t->usable_zones);
+
+	return node_ready;
+}
+
 /*
  * This is the direct reclaim path, for page-allocating processes.  We only
  * try to reclaim pages from zones which will satisfy the caller's allocation
@@ -2920,12 +2939,12 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
  */
 static void shrink_zones(struct nodelist *nodelist, struct scan_control *sc)
 {
-	struct nlist_traverser t;
-	struct zone *zone;
 	unsigned long nr_soft_reclaimed;
 	unsigned long nr_soft_scanned;
 	gfp_t orig_mask;
-	pg_data_t *last_pgdat = NULL;
+	pg_data_t *pgdat;
+	struct nlist_traverser t;
+	int node;
 
 	/*
 	 * If the number of buffer_heads in the machine exceeds the maximum
@@ -2938,14 +2957,17 @@ static void shrink_zones(struct nodelist *nodelist, struct scan_control *sc)
 		sc->reclaim_idx = gfp_zone(sc->gfp_mask);
 	}
 
-	for_each_zone_nlist_nodemask(zone, &t, nodelist,
+	for_each_node_nlist_nodemask(node, &t, nodelist,
 					sc->reclaim_idx, sc->nodemask) {
+
+		pgdat = NODE_DATA(node);
+
 		/*
 		 * Take care memory controller reclaiming has small influence
 		 * to global LRU.
 		 */
 		if (!cgroup_reclaim(sc)) {
-			if (!cpuset_zone_allowed(zone,
+			if (!cpuset_node_allowed(node,
 						 GFP_KERNEL | __GFP_HARDWALL))
 				continue;
 
@@ -2960,18 +2982,7 @@ static void shrink_zones(struct nodelist *nodelist, struct scan_control *sc)
 			 */
 			if (IS_ENABLED(CONFIG_COMPACTION) &&
 			    sc->order > PAGE_ALLOC_COSTLY_ORDER &&
-			    compaction_ready(zone, sc)) {
-				sc->compaction_ready = true;
-				continue;
-			}
-
-			/*
-			 * Shrink each node in the zonelist once. If the
-			 * zonelist is ordered by zone (not the default) then a
-			 * node may be shrunk multiple times but in that case
-			 * the user prefers lower zones being preserved.
-			 */
-			if (zone->zone_pgdat == last_pgdat)
+			    node_compaction_ready(&t, sc))
 				continue;
 
 			/*
@@ -2981,7 +2992,7 @@ static void shrink_zones(struct nodelist *nodelist, struct scan_control *sc)
 			 * and balancing, not for a memcg's limit.
 			 */
 			nr_soft_scanned = 0;
-			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
+			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat,
 						sc->order, sc->gfp_mask,
 						&nr_soft_scanned);
 			sc->nr_reclaimed += nr_soft_reclaimed;
@@ -2989,11 +3000,7 @@ static void shrink_zones(struct nodelist *nodelist, struct scan_control *sc)
 			/* need some check for avoid more shrink_zone() */
 		}
 
-		/* See comment about same check for global reclaim above */
-		if (zone->zone_pgdat == last_pgdat)
-			continue;
-		last_pgdat = zone->zone_pgdat;
-		shrink_node(zone->zone_pgdat, sc);
+		shrink_node(pgdat, sc);
 	}
 
 	/*
-- 
2.23.0


  parent reply	other threads:[~2019-11-21 15:21 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-21 15:17 [RFC v1 00/19] Modify zonelist to nodelist v1 Pengfei Li
2019-11-21 15:17 ` [RFC v1 01/19] mm, mmzone: modify zonelist to nodelist Pengfei Li
2019-11-21 15:17 ` [RFC v1 02/19] mm, hugetlb: use for_each_node in dequeue_huge_page_nodemask() Pengfei Li
2019-11-21 15:17 ` [RFC v1 03/19] mm, oom_kill: use for_each_node in constrained_alloc() Pengfei Li
2019-11-21 15:17 ` [RFC v1 04/19] mm, slub: use for_each_node in get_any_partial() Pengfei Li
2019-11-21 15:17 ` [RFC v1 05/19] mm, slab: use for_each_node in fallback_alloc() Pengfei Li
2019-11-21 15:17 ` [RFC v1 06/19] mm, vmscan: use for_each_node in do_try_to_free_pages() Pengfei Li
2019-11-21 15:17 ` [RFC v1 07/19] mm, vmscan: use first_node in throttle_direct_reclaim() Pengfei Li
2019-11-21 15:18 ` [RFC v1 08/19] mm, vmscan: pass pgdat to wakeup_kswapd() Pengfei Li
2019-11-21 15:18 ` Pengfei Li [this message]
2019-11-21 15:18 ` [RFC v1 10/19] mm, page_alloc: use for_each_node in wake_all_kswapds() Pengfei Li
2019-11-21 15:18 ` [RFC v1 11/19] mm, mempolicy: use first_node in mempolicy_slab_node() Pengfei Li
2019-11-21 15:18 ` [RFC v1 12/19] mm, mempolicy: use first_node in mpol_misplaced() Pengfei Li
2019-11-21 15:18 ` [RFC v1 13/19] mm, page_alloc: use first_node in local_memory_node() Pengfei Li
2019-11-21 15:18 ` [RFC v1 14/19] mm, compaction: rename compaction_zonelist_suitable Pengfei Li
2019-11-21 15:18 ` [RFC v1 15/19] mm, mm_init: rename mminit_verify_zonelist Pengfei Li
2019-11-21 15:18 ` [RFC v1 16/19] mm, page_alloc: cleanup build_zonelists Pengfei Li
2019-11-21 15:18 ` [RFC v1 17/19] mm, memory_hotplug: cleanup online_pages() Pengfei Li
2019-11-21 15:18 ` [RFC v1 18/19] kernel, sysctl: cleanup numa_zonelist_order Pengfei Li
2019-11-21 15:18 ` [RFC v1 19/19] mm, mmzone: cleanup zonelist in comments Pengfei Li
2019-11-21 18:04 ` [RFC v1 00/19] Modify zonelist to nodelist v1 Michal Hocko
2019-11-22 15:05   ` Pengfei Li
2019-11-25  8:40     ` Michal Hocko
2019-11-25 14:46       ` Pengfei Li
2019-11-25 15:46         ` Michal Hocko
2019-11-22 10:03 ` David Hildenbrand
2019-11-22 15:49   ` Pengfei Li
2019-11-22 15:53     ` Christopher Lameter
2019-11-22 16:06       ` David Hildenbrand
2019-11-22 17:36       ` Pengfei Li
2019-11-22 18:24         ` Christopher Lameter
     [not found] ` <2019112215245905276118@gmail.com>
2019-11-22 10:14   ` David Hildenbrand
2019-11-22 15:28   ` Pengfei Li
2019-11-22 15:53     ` Qian Cai
2019-11-25  8:39       ` Michal Hocko
2019-11-26 15:30         ` Qian Cai
2019-11-26 15:41           ` Michal Hocko
2019-11-26 19:04             ` Qian Cai
2019-11-27  8:50               ` Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191121151811.49742-10-fly@kernel.page \
    --to=fly@kernel.page \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=guro@fb.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@techsingularity.net \
    --cc=mhocko@kernel.org \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).