All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mm/vmscan: respect cpuset policy during page demotion
@ 2022-10-26  7:43 ` Feng Tang
  0 siblings, 0 replies; 98+ messages in thread
From: Feng Tang @ 2022-10-26  7:43 UTC (permalink / raw)
  To: Andrew Morton, Johannes Weiner, Michal Hocko, Tejun Heo,
	Zefan Li, Waiman Long, ying.huang, aneesh.kumar, linux-mm,
	cgroups
  Cc: linux-kernel, dave.hansen, tim.c.chen, fengwei.yin, Feng Tang

In page reclaim path, memory could be demoted from faster memory tier
to slower memory tier. Currently, there is no check about cpuset's
memory policy, that even if the target demotion node is not allowd
by cpuset, the demotion will still happen, which breaks the cpuset
semantics.

So add cpuset policy check in the demotion path and skip demotion
if the demotion targets are not allowed by cpuset.

Signed-off-by: Feng Tang <feng.tang@intel.com>
---
Hi reviewers,

For easy bisectable, I combined the cpuset change and mm change
in one patch, if you prefer to separate them, I can turn it into
2 patches.

Thanks,
Feng

 include/linux/cpuset.h |  6 ++++++
 kernel/cgroup/cpuset.c | 29 +++++++++++++++++++++++++++++
 mm/vmscan.c            | 35 ++++++++++++++++++++++++++++++++---
 3 files changed, 67 insertions(+), 3 deletions(-)

diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index d58e0476ee8e..6fcce2bd2631 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -178,6 +178,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
 	task_unlock(current);
 }
 
+extern void cpuset_get_allowed_mem_nodes(struct cgroup *cgroup,
+						nodemask_t *nmask);
 #else /* !CONFIG_CPUSETS */
 
 static inline bool cpusets_enabled(void) { return false; }
@@ -299,6 +301,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
 	return false;
 }
 
+static inline void cpuset_get_allowed_mem_nodes(struct cgroup *cgroup,
+						nodemask_t *nmask)
+{
+}
 #endif /* !CONFIG_CPUSETS */
 
 #endif /* _LINUX_CPUSET_H */
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 3ea2e836e93e..cbb118c0502f 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -3750,6 +3750,35 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
 	return mask;
 }
 
+/*
+ * Retrieve the allowed memory nodemask for a cgroup.
+ *
+ * Set *nmask to cpuset's effective allowed nodemask for cgroup v2,
+ * and NODE_MASK_ALL (means no constraint) for cgroup v1 where there
+ * is no guaranteed association from a cgroup to a cpuset.
+ */
+void cpuset_get_allowed_mem_nodes(struct cgroup *cgroup, nodemask_t *nmask)
+{
+	struct cgroup_subsys_state *css;
+	struct cpuset *cs;
+
+	if (!is_in_v2_mode()) {
+		*nmask = NODE_MASK_ALL;
+		return;
+	}
+
+	rcu_read_lock();
+	css = cgroup_e_css(cgroup, &cpuset_cgrp_subsys);
+	if (css) {
+		css_get(css);
+		cs = css_cs(css);
+		*nmask = cs->effective_mems;
+		css_put(css);
+	}
+
+	rcu_read_unlock();
+}
+
 /**
  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
  * @nodemask: the nodemask to be checked
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 18f6497994ec..c205d98283bc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1537,9 +1537,21 @@ static struct page *alloc_demote_page(struct page *page, unsigned long private)
 {
 	struct page *target_page;
 	nodemask_t *allowed_mask;
-	struct migration_target_control *mtc;
+	struct migration_target_control *mtc = (void *)private;
 
-	mtc = (struct migration_target_control *)private;
+#if IS_ENABLED(CONFIG_MEMCG) && IS_ENABLED(CONFIG_CPUSETS)
+	struct mem_cgroup *memcg;
+	nodemask_t cpuset_nmask;
+
+	memcg = page_memcg(page);
+	cpuset_get_allowed_mem_nodes(memcg->css.cgroup, &cpuset_nmask);
+
+	if (!node_isset(mtc->nid, cpuset_nmask)) {
+		if (mtc->nmask)
+			nodes_and(*mtc->nmask, *mtc->nmask, cpuset_nmask);
+		return alloc_migration_target(page, (unsigned long)mtc);
+	}
+#endif
 
 	allowed_mask = mtc->nmask;
 	/*
@@ -1649,6 +1661,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 		enum folio_references references = FOLIOREF_RECLAIM;
 		bool dirty, writeback;
 		unsigned int nr_pages;
+		bool skip_this_demotion = false;
 
 		cond_resched();
 
@@ -1658,6 +1671,22 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 		if (!folio_trylock(folio))
 			goto keep;
 
+#if IS_ENABLED(CONFIG_MEMCG) && IS_ENABLED(CONFIG_CPUSETS)
+		if (do_demote_pass) {
+			struct mem_cgroup *memcg;
+			nodemask_t nmask, nmask1;
+
+			node_get_allowed_targets(pgdat, &nmask);
+			memcg = folio_memcg(folio);
+			if (memcg)
+				cpuset_get_allowed_mem_nodes(memcg->css.cgroup,
+								&nmask1);
+
+			if (!nodes_intersects(nmask, nmask1))
+				skip_this_demotion = true;
+		}
+#endif
+
 		VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
 
 		nr_pages = folio_nr_pages(folio);
@@ -1799,7 +1828,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 		 * Before reclaiming the folio, try to relocate
 		 * its contents to another node.
 		 */
-		if (do_demote_pass &&
+		if (do_demote_pass && !skip_this_demotion &&
 		    (thp_migration_supported() || !folio_test_large(folio))) {
 			list_add(&folio->lru, &demote_folios);
 			folio_unlock(folio);
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 98+ messages in thread

end of thread, other threads:[~2022-11-07  8:17 UTC | newest]

Thread overview: 98+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-26  7:43 [PATCH] mm/vmscan: respect cpuset policy during page demotion Feng Tang
2022-10-26  7:43 ` Feng Tang
2022-10-26  7:49 ` Aneesh Kumar K V
2022-10-26  7:49   ` Aneesh Kumar K V
2022-10-26  8:00   ` Feng Tang
2022-10-26  8:00     ` Feng Tang
2022-10-26  9:19     ` Michal Hocko
2022-10-26  9:19       ` Michal Hocko
2022-10-26 10:42       ` Aneesh Kumar K V
2022-10-26 10:42         ` Aneesh Kumar K V
2022-10-26 11:02         ` Michal Hocko
2022-10-26 11:02           ` Michal Hocko
2022-10-26 12:08           ` Aneesh Kumar K V
2022-10-26 12:08             ` Aneesh Kumar K V
2022-10-26 12:21             ` Michal Hocko
2022-10-26 12:21               ` Michal Hocko
2022-10-26 12:35               ` Aneesh Kumar K V
2022-10-26 12:35                 ` Aneesh Kumar K V
2022-10-27  9:02                 ` Michal Hocko
2022-10-27  9:02                   ` Michal Hocko
2022-10-27 10:16                   ` Aneesh Kumar K V
2022-10-27 10:16                     ` Aneesh Kumar K V
2022-10-27 13:05                     ` Michal Hocko
2022-10-27 13:05                       ` Michal Hocko
2022-10-26 12:20       ` Feng Tang
2022-10-26 12:20         ` Feng Tang
2022-10-26 15:59         ` Michal Hocko
2022-10-26 15:59           ` Michal Hocko
2022-10-26 17:57           ` Yang Shi
2022-10-26 17:57             ` Yang Shi
2022-10-27  7:11             ` Feng Tang
2022-10-27  7:11               ` Feng Tang
2022-10-27  7:45               ` Huang, Ying
2022-10-27  7:45                 ` Huang, Ying
2022-10-27  7:51                 ` Feng Tang
2022-10-27  7:51                   ` Feng Tang
2022-10-27 17:55               ` Yang Shi
2022-10-27 17:55                 ` Yang Shi
2022-10-28  3:37                 ` Feng Tang
2022-10-28  3:37                   ` Feng Tang
2022-10-28  5:54                   ` Huang, Ying
2022-10-28  5:54                     ` Huang, Ying
2022-10-28 17:23                     ` Yang Shi
2022-10-28 17:23                       ` Yang Shi
2022-10-31  1:56                       ` Huang, Ying
2022-10-31  1:56                         ` Huang, Ying
2022-10-31  2:19                       ` Feng Tang
2022-10-31  2:19                         ` Feng Tang
2022-10-28  5:09                 ` Aneesh Kumar K V
2022-10-28  5:09                   ` Aneesh Kumar K V
2022-10-28 17:16                   ` Yang Shi
2022-10-28 17:16                     ` Yang Shi
2022-10-31  1:53                     ` Huang, Ying
2022-10-31  1:53                       ` Huang, Ying
2022-10-27  6:47           ` Huang, Ying
2022-10-27  6:47             ` Huang, Ying
2022-10-27  7:10             ` Michal Hocko
2022-10-27  7:10               ` Michal Hocko
2022-10-27  7:39               ` Huang, Ying
2022-10-27  7:39                 ` Huang, Ying
2022-10-27  8:01                 ` Michal Hocko
2022-10-27  8:01                   ` Michal Hocko
2022-10-27  9:31                   ` Huang, Ying
2022-10-27  9:31                     ` Huang, Ying
2022-10-27 12:29                     ` Michal Hocko
2022-10-27 12:29                       ` Michal Hocko
2022-10-27 23:22                       ` Huang, Ying
2022-10-27 23:22                         ` Huang, Ying
2022-10-31  8:40                         ` Michal Hocko
2022-10-31  8:40                           ` Michal Hocko
2022-10-31  8:51                           ` Huang, Ying
2022-10-31  8:51                             ` Huang, Ying
2022-10-31  9:18                             ` Michal Hocko
2022-10-31  9:18                               ` Michal Hocko
2022-10-31 14:09                           ` Feng Tang
2022-10-31 14:09                             ` Feng Tang
2022-10-31 14:32                             ` Michal Hocko
2022-10-31 14:32                               ` Michal Hocko
2022-11-07  8:05                               ` Feng Tang
2022-11-07  8:05                                 ` Feng Tang
2022-11-07  8:17                                 ` Michal Hocko
2022-11-07  8:17                                   ` Michal Hocko
2022-11-01  3:17                     ` Huang, Ying
2022-11-01  3:17                       ` Huang, Ying
2022-10-26  8:26 ` Yin, Fengwei
2022-10-26  8:26   ` Yin, Fengwei
2022-10-26  8:37   ` Feng Tang
2022-10-26  8:37     ` Feng Tang
2022-10-26 14:36 ` Waiman Long
2022-10-26 14:36   ` Waiman Long
2022-10-27  5:57   ` Feng Tang
2022-10-27  5:57     ` Feng Tang
2022-10-27  5:13 ` Huang, Ying
2022-10-27  5:13   ` Huang, Ying
2022-10-27  5:49   ` Feng Tang
2022-10-27  5:49     ` Feng Tang
2022-10-27  6:05     ` Huang, Ying
2022-10-27  6:05       ` Huang, Ying

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.