All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mm/memcg: add allocstall to memory.stat
@ 2019-04-11 11:59 Yafang Shao
  2019-04-11 12:26 ` Michal Hocko
  0 siblings, 1 reply; 13+ messages in thread
From: Yafang Shao @ 2019-04-11 11:59 UTC (permalink / raw)
  To: hannes, chris, mhocko; +Cc: akpm, cgroups, linux-mm, shaoyafang, Yafang Shao

The current item 'pgscan' is for pages in the memcg,
which indicates how many pages owned by this memcg are scanned.
While these pages may not scanned by the taskes in this memcg, even for
PGSCAN_DIRECT.

Sometimes we need an item to indicate whehter the tasks in this memcg
under memory pressure or not.
So this new item allocstall is added into memory.stat.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 Documentation/admin-guide/cgroup-v2.rst |  3 +++
 include/linux/memcontrol.h              | 18 ++++++++++++++++++
 mm/memcontrol.c                         | 18 +-----------------
 mm/vmscan.c                             |  2 ++
 4 files changed, 24 insertions(+), 17 deletions(-)

diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 19c4e78..a06f17a 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1221,6 +1221,9 @@ PAGE_SIZE multiple when read back.
 		Part of "slab" that cannot be reclaimed on memory
 		pressure.
 
+          allocstall
+                The number of direct reclaim the tasks in this memcg entering
+
 	  pgfault
 		Total number of page faults incurred
 
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1565831..7fe9c57 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -45,6 +45,7 @@ enum memcg_stat_item {
 	MEMCG_SOCK,
 	/* XXX: why are these zone and not node counters? */
 	MEMCG_KERNEL_STACK_KB,
+	MEMCG_ALLOCSTALL,
 	MEMCG_NR_STAT,
 };
 
@@ -412,6 +413,23 @@ static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
 
 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
 
+/**
+ * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
+ */
+static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
+{
+	if (unlikely(current->active_memcg)) {
+		struct mem_cgroup *memcg = root_mem_cgroup;
+
+		rcu_read_lock();
+		if (css_tryget_online(&current->active_memcg->css))
+			memcg = current->active_memcg;
+		rcu_read_unlock();
+		return memcg;
+	}
+	return get_mem_cgroup_from_mm(current->mm);
+}
+
 static inline
 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 10af4dd..780659f9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -853,23 +853,6 @@ struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
 EXPORT_SYMBOL(get_mem_cgroup_from_page);
 
 /**
- * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
- */
-static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
-{
-	if (unlikely(current->active_memcg)) {
-		struct mem_cgroup *memcg = root_mem_cgroup;
-
-		rcu_read_lock();
-		if (css_tryget_online(&current->active_memcg->css))
-			memcg = current->active_memcg;
-		rcu_read_unlock();
-		return memcg;
-	}
-	return get_mem_cgroup_from_mm(current->mm);
-}
-
-/**
  * mem_cgroup_iter - iterate over memory cgroup hierarchy
  * @root: hierarchy root
  * @prev: previously returned memcg, NULL on first invocation
@@ -5624,6 +5607,7 @@ static int memory_stat_show(struct seq_file *m, void *v)
 
 	/* Accumulated memory events */
 
+	seq_printf(m, "allocstall %lu\n", acc.vmevents[MEMCG_ALLOCSTALL]);
 	seq_printf(m, "pgfault %lu\n", acc.vmevents[PGFAULT]);
 	seq_printf(m, "pgmajfault %lu\n", acc.vmevents[PGMAJFAULT]);
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 347c9b3..3ff8b1b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3024,6 +3024,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 	if (global_reclaim(sc))
 		__count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
 
+	count_memcg_events(get_mem_cgroup_from_current(), MEMCG_ALLOCSTALL, 1);
+
 	do {
 		vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
 				sc->priority);
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2019-04-12  9:48 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-11 11:59 [PATCH] mm/memcg: add allocstall to memory.stat Yafang Shao
2019-04-11 12:26 ` Michal Hocko
2019-04-11 12:41   ` Yafang Shao
2019-04-11 13:39     ` Michal Hocko
2019-04-11 13:54       ` Yafang Shao
2019-04-11 15:10         ` Michal Hocko
2019-04-12  1:32           ` Yafang Shao
2019-04-12  6:34             ` Michal Hocko
2019-04-12  8:10               ` Yafang Shao
2019-04-12  9:09                 ` Michal Hocko
2019-04-12  9:29                   ` Yafang Shao
2019-04-12  9:36                     ` Michal Hocko
2019-04-12  9:48                       ` Yafang Shao

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.