linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/4] mm: memcontrol: clean up memory.events counting function
@ 2017-04-04 22:01 Johannes Weiner
  2017-04-04 22:01 ` [PATCH 2/4] mm: memcontrol: re-use global VM event enum Johannes Weiner
                   ` (4 more replies)
  0 siblings, 5 replies; 14+ messages in thread
From: Johannes Weiner @ 2017-04-04 22:01 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel,
	kernel-team

We only ever count single events, drop the @nr parameter. Rename the
function accordingly. Remove low-information kerneldoc.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 include/linux/memcontrol.h | 18 +++++-------------
 mm/memcontrol.c            |  8 ++++----
 mm/vmscan.c                |  2 +-
 3 files changed, 10 insertions(+), 18 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index cfa91a3ca0ca..bc0c16e284c0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -287,17 +287,10 @@ static inline bool mem_cgroup_disabled(void)
 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
 }
 
-/**
- * mem_cgroup_events - count memory events against a cgroup
- * @memcg: the memory cgroup
- * @idx: the event index
- * @nr: the number of events to account for
- */
-static inline void mem_cgroup_events(struct mem_cgroup *memcg,
-		       enum mem_cgroup_events_index idx,
-		       unsigned int nr)
+static inline void mem_cgroup_event(struct mem_cgroup *memcg,
+				    enum mem_cgroup_events_index idx)
 {
-	this_cpu_add(memcg->stat->events[idx], nr);
+	this_cpu_inc(memcg->stat->events[idx]);
 	cgroup_file_notify(&memcg->events_file);
 }
 
@@ -614,9 +607,8 @@ static inline bool mem_cgroup_disabled(void)
 	return true;
 }
 
-static inline void mem_cgroup_events(struct mem_cgroup *memcg,
-				     enum mem_cgroup_events_index idx,
-				     unsigned int nr)
+static inline void mem_cgroup_event(struct mem_cgroup *memcg,
+				    enum mem_cgroup_events_index idx)
 {
 }
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 108d5b097db1..1ffa3ad201ea 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1825,7 +1825,7 @@ static void reclaim_high(struct mem_cgroup *memcg,
 	do {
 		if (page_counter_read(&memcg->memory) <= memcg->high)
 			continue;
-		mem_cgroup_events(memcg, MEMCG_HIGH, 1);
+		mem_cgroup_event(memcg, MEMCG_HIGH);
 		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
 	} while ((memcg = parent_mem_cgroup(memcg)));
 }
@@ -1916,7 +1916,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
 	if (!gfpflags_allow_blocking(gfp_mask))
 		goto nomem;
 
-	mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
+	mem_cgroup_event(mem_over_limit, MEMCG_MAX);
 
 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
 						    gfp_mask, may_swap);
@@ -1959,7 +1959,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
 	if (fatal_signal_pending(current))
 		goto force;
 
-	mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
+	mem_cgroup_event(mem_over_limit, MEMCG_OOM);
 
 	mem_cgroup_oom(mem_over_limit, gfp_mask,
 		       get_order(nr_pages * PAGE_SIZE));
@@ -5142,7 +5142,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
 			continue;
 		}
 
-		mem_cgroup_events(memcg, MEMCG_OOM, 1);
+		mem_cgroup_event(memcg, MEMCG_OOM);
 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
 			break;
 	}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b3f62cf37097..18731310ca36 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2526,7 +2526,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 					sc->memcg_low_skipped = 1;
 					continue;
 				}
-				mem_cgroup_events(memcg, MEMCG_LOW, 1);
+				mem_cgroup_event(memcg, MEMCG_LOW);
 			}
 
 			reclaimed = sc->nr_reclaimed;
-- 
2.12.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 2/4] mm: memcontrol: re-use global VM event enum
  2017-04-04 22:01 [PATCH 1/4] mm: memcontrol: clean up memory.events counting function Johannes Weiner
@ 2017-04-04 22:01 ` Johannes Weiner
  2017-04-06  8:49   ` Vladimir Davydov
  2017-04-07 12:47   ` Michal Hocko
  2017-04-04 22:01 ` [PATCH 3/4] mm: memcontrol: re-use node VM page state enum Johannes Weiner
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 14+ messages in thread
From: Johannes Weiner @ 2017-04-04 22:01 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel,
	kernel-team

The current duplication is a high-maintenance mess, and it's painful
to add new items.

This increases the size of the event array, but we'll eventually want
most of the VM events tracked on a per-cgroup basis anyway.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 include/linux/memcontrol.h | 45 ++++++++++++---------------------------
 mm/memcontrol.c            | 53 ++++++++++++++++++++++++----------------------
 2 files changed, 42 insertions(+), 56 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bc0c16e284c0..0bb5f055bd26 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -69,20 +69,6 @@ struct mem_cgroup_reclaim_cookie {
 	unsigned int generation;
 };
 
-enum mem_cgroup_events_index {
-	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
-	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
-	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
-	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
-	MEM_CGROUP_EVENTS_NSTATS,
-	/* default hierarchy events */
-	MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
-	MEMCG_HIGH,
-	MEMCG_MAX,
-	MEMCG_OOM,
-	MEMCG_NR_EVENTS,
-};
-
 /*
  * Per memcg event counter is incremented at every pagein/pageout. With THP,
  * it will be incremated by the number of pages. This counter is used for
@@ -106,6 +92,15 @@ struct mem_cgroup_id {
 	atomic_t ref;
 };
 
+/* Cgroup-specific events, on top of universal VM events */
+enum memcg_event_item {
+	MEMCG_LOW = NR_VM_EVENT_ITEMS,
+	MEMCG_HIGH,
+	MEMCG_MAX,
+	MEMCG_OOM,
+	MEMCG_NR_EVENTS,
+};
+
 struct mem_cgroup_stat_cpu {
 	long count[MEMCG_NR_STAT];
 	unsigned long events[MEMCG_NR_EVENTS];
@@ -288,9 +283,9 @@ static inline bool mem_cgroup_disabled(void)
 }
 
 static inline void mem_cgroup_event(struct mem_cgroup *memcg,
-				    enum mem_cgroup_events_index idx)
+				    enum memcg_event_item event)
 {
-	this_cpu_inc(memcg->stat->events[idx]);
+	this_cpu_inc(memcg->stat->events[event]);
 	cgroup_file_notify(&memcg->events_file);
 }
 
@@ -575,20 +570,8 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
 
 	rcu_read_lock();
 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
-	if (unlikely(!memcg))
-		goto out;
-
-	switch (idx) {
-	case PGFAULT:
-		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
-		break;
-	case PGMAJFAULT:
-		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
-		break;
-	default:
-		BUG();
-	}
-out:
+	if (likely(memcg))
+		this_cpu_inc(memcg->stat->events[idx]);
 	rcu_read_unlock();
 }
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -608,7 +591,7 @@ static inline bool mem_cgroup_disabled(void)
 }
 
 static inline void mem_cgroup_event(struct mem_cgroup *memcg,
-				    enum mem_cgroup_events_index idx)
+				    enum memcg_event_item event)
 {
 }
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1ffa3ad201ea..6b42887e5f14 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -111,13 +111,6 @@ static const char * const mem_cgroup_stat_names[] = {
 	"swap",
 };
 
-static const char * const mem_cgroup_events_names[] = {
-	"pgpgin",
-	"pgpgout",
-	"pgfault",
-	"pgmajfault",
-};
-
 static const char * const mem_cgroup_lru_names[] = {
 	"inactive_anon",
 	"active_anon",
@@ -571,13 +564,13 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
  */
 
 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
-					    enum mem_cgroup_events_index idx)
+					    enum memcg_event_item event)
 {
 	unsigned long val = 0;
 	int cpu;
 
 	for_each_possible_cpu(cpu)
-		val += per_cpu(memcg->stat->events[idx], cpu);
+		val += per_cpu(memcg->stat->events[event], cpu);
 	return val;
 }
 
@@ -608,9 +601,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 
 	/* pagein of a big page is an event. So, ignore page size */
 	if (nr_pages > 0)
-		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
+		__this_cpu_inc(memcg->stat->events[PGPGIN]);
 	else {
-		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
+		__this_cpu_inc(memcg->stat->events[PGPGOUT]);
 		nr_pages = -nr_pages; /* for event */
 	}
 
@@ -3119,6 +3112,21 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
 }
 #endif /* CONFIG_NUMA */
 
+/* Universal VM events cgroup1 shows, original sort order */
+unsigned int memcg1_events[] = {
+	PGPGIN,
+	PGPGOUT,
+	PGFAULT,
+	PGMAJFAULT,
+};
+
+static const char *const memcg1_event_names[] = {
+	"pgpgin",
+	"pgpgout",
+	"pgfault",
+	"pgmajfault",
+};
+
 static int memcg_stat_show(struct seq_file *m, void *v)
 {
 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
@@ -3128,8 +3136,6 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 
 	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
 		     MEM_CGROUP_STAT_NSTATS);
-	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
-		     MEM_CGROUP_EVENTS_NSTATS);
 	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
 
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
@@ -3139,9 +3145,9 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
 	}
 
-	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
-		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
-			   mem_cgroup_read_events(memcg, i));
+	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
+		seq_printf(m, "%s %lu\n", memcg1_event_names[i],
+			   mem_cgroup_read_events(memcg, memcg1_events[i]));
 
 	for (i = 0; i < NR_LRU_LISTS; i++)
 		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
@@ -3169,13 +3175,12 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 		seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
 	}
 
-	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
+	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
 		unsigned long long val = 0;
 
 		for_each_mem_cgroup_tree(mi, memcg)
-			val += mem_cgroup_read_events(mi, i);
-		seq_printf(m, "total_%s %llu\n",
-			   mem_cgroup_events_names[i], val);
+			val += mem_cgroup_read_events(mi, memcg1_events[i]);
+		seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
 	}
 
 	for (i = 0; i < NR_LRU_LISTS; i++) {
@@ -5222,10 +5227,8 @@ static int memory_stat_show(struct seq_file *m, void *v)
 
 	/* Accumulated memory events */
 
-	seq_printf(m, "pgfault %lu\n",
-		   events[MEM_CGROUP_EVENTS_PGFAULT]);
-	seq_printf(m, "pgmajfault %lu\n",
-		   events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+	seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
+	seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
 
 	seq_printf(m, "workingset_refault %lu\n",
 		   stat[MEMCG_WORKINGSET_REFAULT]);
@@ -5493,7 +5496,7 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
 	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
 	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], nr_shmem);
-	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
+	__this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout);
 	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 	memcg_check_events(memcg, dummy_page);
 	local_irq_restore(flags);
-- 
2.12.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 3/4] mm: memcontrol: re-use node VM page state enum
  2017-04-04 22:01 [PATCH 1/4] mm: memcontrol: clean up memory.events counting function Johannes Weiner
  2017-04-04 22:01 ` [PATCH 2/4] mm: memcontrol: re-use global VM event enum Johannes Weiner
@ 2017-04-04 22:01 ` Johannes Weiner
  2017-04-06  8:59   ` Vladimir Davydov
  2017-04-04 22:01 ` [PATCH 4/4] mm: memcontrol: use node page state naming scheme for memcg Johannes Weiner
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 14+ messages in thread
From: Johannes Weiner @ 2017-04-04 22:01 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel,
	kernel-team

The current duplication is a high-maintenance mess, and it's painful
to add new items or query memcg state from the rest of the VM.

This increases the size of the stat array marginally, but we should
aim to track all these stats on a per-cgroup level anyway.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 include/linux/memcontrol.h | 100 +++++++++++++++------------------
 mm/memcontrol.c            | 135 +++++++++++++++++++++++----------------------
 mm/page-writeback.c        |  10 ++--
 mm/rmap.c                  |   4 +-
 mm/vmscan.c                |   5 +-
 mm/workingset.c            |   7 +--
 6 files changed, 123 insertions(+), 138 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0bb5f055bd26..0fa1f5de6841 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,40 +35,45 @@ struct page;
 struct mm_struct;
 struct kmem_cache;
 
-/*
- * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
- * These two lists should keep in accord with each other.
- */
-enum mem_cgroup_stat_index {
-	/*
-	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
-	 */
-	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
-	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
-	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
-	MEM_CGROUP_STAT_SHMEM,		/* # of pages charged as shmem */
-	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
-	MEM_CGROUP_STAT_DIRTY,          /* # of dirty pages in page cache */
-	MEM_CGROUP_STAT_WRITEBACK,	/* # of pages under writeback */
-	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
-	MEM_CGROUP_STAT_NSTATS,
-	/* default hierarchy stats */
-	MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
+/* Cgroup-specific page state, on top of universal node page state */
+enum memcg_stat_item {
+	MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
+	MEMCG_RSS,
+	MEMCG_RSS_HUGE,
+	MEMCG_SWAP,
+	MEMCG_SOCK,
+	/* XXX: why are these zone and not node counters? */
+	MEMCG_KERNEL_STACK_KB,
 	MEMCG_SLAB_RECLAIMABLE,
 	MEMCG_SLAB_UNRECLAIMABLE,
-	MEMCG_SOCK,
-	MEMCG_WORKINGSET_REFAULT,
-	MEMCG_WORKINGSET_ACTIVATE,
-	MEMCG_WORKINGSET_NODERECLAIM,
 	MEMCG_NR_STAT,
 };
 
+/* Cgroup-specific events, on top of universal VM events */
+enum memcg_event_item {
+	MEMCG_LOW = NR_VM_EVENT_ITEMS,
+	MEMCG_HIGH,
+	MEMCG_MAX,
+	MEMCG_OOM,
+	MEMCG_NR_EVENTS,
+};
+
 struct mem_cgroup_reclaim_cookie {
 	pg_data_t *pgdat;
 	int priority;
 	unsigned int generation;
 };
 
+#ifdef CONFIG_MEMCG
+
+#define MEM_CGROUP_ID_SHIFT	16
+#define MEM_CGROUP_ID_MAX	USHRT_MAX
+
+struct mem_cgroup_id {
+	int id;
+	atomic_t ref;
+};
+
 /*
  * Per memcg event counter is incremented at every pagein/pageout. With THP,
  * it will be incremated by the number of pages. This counter is used for
@@ -82,25 +87,6 @@ enum mem_cgroup_events_target {
 	MEM_CGROUP_NTARGETS,
 };
 
-#ifdef CONFIG_MEMCG
-
-#define MEM_CGROUP_ID_SHIFT	16
-#define MEM_CGROUP_ID_MAX	USHRT_MAX
-
-struct mem_cgroup_id {
-	int id;
-	atomic_t ref;
-};
-
-/* Cgroup-specific events, on top of universal VM events */
-enum memcg_event_item {
-	MEMCG_LOW = NR_VM_EVENT_ITEMS,
-	MEMCG_HIGH,
-	MEMCG_MAX,
-	MEMCG_OOM,
-	MEMCG_NR_EVENTS,
-};
-
 struct mem_cgroup_stat_cpu {
 	long count[MEMCG_NR_STAT];
 	unsigned long events[MEMCG_NR_EVENTS];
@@ -487,7 +473,7 @@ void lock_page_memcg(struct page *page);
 void unlock_page_memcg(struct page *page);
 
 static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-						 enum mem_cgroup_stat_index idx)
+						 enum memcg_stat_item idx)
 {
 	long val = 0;
 	int cpu;
@@ -502,20 +488,20 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
 }
 
 static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
-				   enum mem_cgroup_stat_index idx, int val)
+					  enum memcg_stat_item idx, int val)
 {
 	if (!mem_cgroup_disabled())
 		this_cpu_add(memcg->stat->count[idx], val);
 }
 
 static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
-				   enum mem_cgroup_stat_index idx)
+				       enum memcg_stat_item idx)
 {
 	mem_cgroup_update_stat(memcg, idx, 1);
 }
 
 static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
-				   enum mem_cgroup_stat_index idx)
+				       enum memcg_stat_item idx)
 {
 	mem_cgroup_update_stat(memcg, idx, -1);
 }
@@ -538,20 +524,20 @@ static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
  * Kernel pages are an exception to this, since they'll never move.
  */
 static inline void mem_cgroup_update_page_stat(struct page *page,
-				 enum mem_cgroup_stat_index idx, int val)
+				 enum memcg_stat_item idx, int val)
 {
 	if (page->mem_cgroup)
 		mem_cgroup_update_stat(page->mem_cgroup, idx, val);
 }
 
 static inline void mem_cgroup_inc_page_stat(struct page *page,
-					    enum mem_cgroup_stat_index idx)
+					    enum memcg_stat_item idx)
 {
 	mem_cgroup_update_page_stat(page, idx, 1);
 }
 
 static inline void mem_cgroup_dec_page_stat(struct page *page,
-					    enum mem_cgroup_stat_index idx)
+					    enum memcg_stat_item idx)
 {
 	mem_cgroup_update_page_stat(page, idx, -1);
 }
@@ -760,33 +746,33 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
 }
 
 static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
-				   enum mem_cgroup_stat_index idx, int val)
+					  enum memcg_stat_item idx, int val)
 {
 }
 
 static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
-				   enum mem_cgroup_stat_index idx)
+				       enum memcg_stat_item idx)
 {
 }
 
 static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
-				   enum mem_cgroup_stat_index idx)
+				       enum memcg_stat_item idx)
 {
 }
 
 static inline void mem_cgroup_update_page_stat(struct page *page,
-					       enum mem_cgroup_stat_index idx,
+					       enum memcg_stat_item idx,
 					       int nr)
 {
 }
 
 static inline void mem_cgroup_inc_page_stat(struct page *page,
-					    enum mem_cgroup_stat_index idx)
+					    enum memcg_stat_item idx)
 {
 }
 
 static inline void mem_cgroup_dec_page_stat(struct page *page,
-					    enum mem_cgroup_stat_index idx)
+					    enum memcg_stat_item idx)
 {
 }
 
@@ -906,7 +892,7 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
  * @val: number of pages (positive or negative)
  */
 static inline void memcg_kmem_update_page_stat(struct page *page,
-				enum mem_cgroup_stat_index idx, int val)
+				enum memcg_stat_item idx, int val)
 {
 	if (memcg_kmem_enabled() && page->mem_cgroup)
 		this_cpu_add(page->mem_cgroup->stat->count[idx], val);
@@ -935,7 +921,7 @@ static inline void memcg_put_cache_ids(void)
 }
 
 static inline void memcg_kmem_update_page_stat(struct page *page,
-				enum mem_cgroup_stat_index idx, int val)
+				enum memcg_stat_item idx, int val)
 {
 }
 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6b42887e5f14..6fe4c7fafbfc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -100,18 +100,7 @@ static bool do_memsw_account(void)
 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
 }
 
-static const char * const mem_cgroup_stat_names[] = {
-	"cache",
-	"rss",
-	"rss_huge",
-	"shmem",
-	"mapped_file",
-	"dirty",
-	"writeback",
-	"swap",
-};
-
-static const char * const mem_cgroup_lru_names[] = {
+static const char *const mem_cgroup_lru_names[] = {
 	"inactive_anon",
 	"active_anon",
 	"inactive_file",
@@ -583,20 +572,16 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 	 * counted as CACHE even if it's on ANON LRU.
 	 */
 	if (PageAnon(page))
-		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
-				nr_pages);
+		__this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
 	else {
-		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
-				nr_pages);
+		__this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
 		if (PageSwapBacked(page))
-			__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SHMEM],
-				       nr_pages);
+			__this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
 	}
 
 	if (compound) {
 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
-		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
-				nr_pages);
+		__this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
 	}
 
 	/* pagein of a big page is an event. So, ignore page size */
@@ -1125,6 +1110,28 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
 	return false;
 }
 
+unsigned int memcg1_stats[] = {
+	MEMCG_CACHE,
+	MEMCG_RSS,
+	MEMCG_RSS_HUGE,
+	NR_SHMEM,
+	NR_FILE_MAPPED,
+	NR_FILE_DIRTY,
+	NR_WRITEBACK,
+	MEMCG_SWAP,
+};
+
+static const char *const memcg1_stat_names[] = {
+	"cache",
+	"rss",
+	"rss_huge",
+	"shmem",
+	"mapped_file",
+	"dirty",
+	"writeback",
+	"swap",
+};
+
 #define K(x) ((x) << (PAGE_SHIFT-10))
 /**
  * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
@@ -1169,11 +1176,11 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 		pr_cont_cgroup_path(iter->css.cgroup);
 		pr_cont(":");
 
-		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
+		for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
+			if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
 				continue;
-			pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
-				K(mem_cgroup_read_stat(iter, i)));
+			pr_cont(" %s:%luKB", memcg1_stat_names[i],
+				K(mem_cgroup_read_stat(iter, memcg1_stats[i])));
 		}
 
 		for (i = 0; i < NR_LRU_LISTS; i++)
@@ -2362,7 +2369,7 @@ void mem_cgroup_split_huge_fixup(struct page *head)
 	for (i = 1; i < HPAGE_PMD_NR; i++)
 		head[i].mem_cgroup = head->mem_cgroup;
 
-	__this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
+	__this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
 		       HPAGE_PMD_NR);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -2372,7 +2379,7 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
 					 bool charge)
 {
 	int val = (charge) ? 1 : -1;
-	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
+	this_cpu_add(memcg->stat->count[MEMCG_SWAP], val);
 }
 
 /**
@@ -2731,13 +2738,10 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
 		struct mem_cgroup *iter;
 
 		for_each_mem_cgroup_tree(iter, memcg) {
-			val += mem_cgroup_read_stat(iter,
-					MEM_CGROUP_STAT_CACHE);
-			val += mem_cgroup_read_stat(iter,
-					MEM_CGROUP_STAT_RSS);
+			val += mem_cgroup_read_stat(iter, MEMCG_CACHE);
+			val += mem_cgroup_read_stat(iter, MEMCG_RSS);
 			if (swap)
-				val += mem_cgroup_read_stat(iter,
-						MEM_CGROUP_STAT_SWAP);
+				val += mem_cgroup_read_stat(iter, MEMCG_SWAP);
 		}
 	} else {
 		if (!swap)
@@ -3134,15 +3138,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 	struct mem_cgroup *mi;
 	unsigned int i;
 
-	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
-		     MEM_CGROUP_STAT_NSTATS);
+	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
 	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
 
-	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
+	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
+		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
 			continue;
-		seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
-			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
+		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
+			   mem_cgroup_read_stat(memcg, memcg1_stats[i]) *
+			   PAGE_SIZE);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
@@ -3165,14 +3169,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
 			   (u64)memsw * PAGE_SIZE);
 
-	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
 		unsigned long long val = 0;
 
-		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
+		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
 			continue;
 		for_each_mem_cgroup_tree(mi, memcg)
-			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-		seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
+			val += mem_cgroup_read_stat(mi, memcg1_stats[i]) *
+			PAGE_SIZE;
+		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
@@ -3645,10 +3650,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
 	struct mem_cgroup *parent;
 
-	*pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
+	*pdirty = mem_cgroup_read_stat(memcg, NR_FILE_DIRTY);
 
 	/* this should eventually include NR_UNSTABLE_NFS */
-	*pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
+	*pwriteback = mem_cgroup_read_stat(memcg, NR_WRITEBACK);
 	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
 						     (1 << LRU_ACTIVE_FILE));
 	*pheadroom = PAGE_COUNTER_MAX;
@@ -4504,10 +4509,8 @@ static int mem_cgroup_move_account(struct page *page,
 	spin_lock_irqsave(&from->move_lock, flags);
 
 	if (!anon && page_mapped(page)) {
-		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
-			       nr_pages);
-		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
-			       nr_pages);
+		__this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
+		__this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
 	}
 
 	/*
@@ -4519,18 +4522,16 @@ static int mem_cgroup_move_account(struct page *page,
 		struct address_space *mapping = page_mapping(page);
 
 		if (mapping_cap_account_dirty(mapping)) {
-			__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
+			__this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
 				       nr_pages);
-			__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
+			__this_cpu_add(to->stat->count[NR_FILE_DIRTY],
 				       nr_pages);
 		}
 	}
 
 	if (PageWriteback(page)) {
-		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
-			       nr_pages);
-		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
-			       nr_pages);
+		__this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
+		__this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
 	}
 
 	/*
@@ -5190,9 +5191,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
 	tree_events(memcg, events);
 
 	seq_printf(m, "anon %llu\n",
-		   (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
+		   (u64)stat[MEMCG_RSS] * PAGE_SIZE);
 	seq_printf(m, "file %llu\n",
-		   (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
+		   (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
 	seq_printf(m, "kernel_stack %llu\n",
 		   (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
 	seq_printf(m, "slab %llu\n",
@@ -5202,13 +5203,13 @@ static int memory_stat_show(struct seq_file *m, void *v)
 		   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
 
 	seq_printf(m, "shmem %llu\n",
-		   (u64)stat[MEM_CGROUP_STAT_SHMEM] * PAGE_SIZE);
+		   (u64)stat[NR_SHMEM] * PAGE_SIZE);
 	seq_printf(m, "file_mapped %llu\n",
-		   (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
+		   (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
 	seq_printf(m, "file_dirty %llu\n",
-		   (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
+		   (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
 	seq_printf(m, "file_writeback %llu\n",
-		   (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
+		   (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
 
 	for (i = 0; i < NR_LRU_LISTS; i++) {
 		struct mem_cgroup *mi;
@@ -5231,11 +5232,11 @@ static int memory_stat_show(struct seq_file *m, void *v)
 	seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
 
 	seq_printf(m, "workingset_refault %lu\n",
-		   stat[MEMCG_WORKINGSET_REFAULT]);
+		   stat[WORKINGSET_REFAULT]);
 	seq_printf(m, "workingset_activate %lu\n",
-		   stat[MEMCG_WORKINGSET_ACTIVATE]);
+		   stat[WORKINGSET_ACTIVATE]);
 	seq_printf(m, "workingset_nodereclaim %lu\n",
-		   stat[MEMCG_WORKINGSET_NODERECLAIM]);
+		   stat[WORKINGSET_NODERECLAIM]);
 
 	return 0;
 }
@@ -5492,10 +5493,10 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 	}
 
 	local_irq_save(flags);
-	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
-	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
-	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
-	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], nr_shmem);
+	__this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon);
+	__this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file);
+	__this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge);
+	__this_cpu_sub(memcg->stat->count[NR_SHMEM], nr_shmem);
 	__this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout);
 	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 	memcg_check_events(memcg, dummy_page);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 33df0583edb9..777711203809 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2427,7 +2427,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
 		inode_attach_wb(inode, page);
 		wb = inode_to_wb(inode);
 
-		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
+		mem_cgroup_inc_page_stat(page, NR_FILE_DIRTY);
 		__inc_node_page_state(page, NR_FILE_DIRTY);
 		__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 		__inc_node_page_state(page, NR_DIRTIED);
@@ -2449,7 +2449,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
 			  struct bdi_writeback *wb)
 {
 	if (mapping_cap_account_dirty(mapping)) {
-		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
+		mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
 		dec_node_page_state(page, NR_FILE_DIRTY);
 		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 		dec_wb_stat(wb, WB_RECLAIMABLE);
@@ -2706,7 +2706,7 @@ int clear_page_dirty_for_io(struct page *page)
 		 */
 		wb = unlocked_inode_to_wb_begin(inode, &locked);
 		if (TestClearPageDirty(page)) {
-			mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
+			mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
 			dec_node_page_state(page, NR_FILE_DIRTY);
 			dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 			dec_wb_stat(wb, WB_RECLAIMABLE);
@@ -2753,7 +2753,7 @@ int test_clear_page_writeback(struct page *page)
 		ret = TestClearPageWriteback(page);
 	}
 	if (ret) {
-		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
+		mem_cgroup_dec_page_stat(page, NR_WRITEBACK);
 		dec_node_page_state(page, NR_WRITEBACK);
 		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 		inc_node_page_state(page, NR_WRITTEN);
@@ -2808,7 +2808,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
 		ret = TestSetPageWriteback(page);
 	}
 	if (!ret) {
-		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
+		mem_cgroup_inc_page_stat(page, NR_WRITEBACK);
 		inc_node_page_state(page, NR_WRITEBACK);
 		inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 	}
diff --git a/mm/rmap.c b/mm/rmap.c
index a69a2a70d057..f6bbfcf01422 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1158,7 +1158,7 @@ void page_add_file_rmap(struct page *page, bool compound)
 			goto out;
 	}
 	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
-	mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
+	mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, nr);
 out:
 	unlock_page_memcg(page);
 }
@@ -1198,7 +1198,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
 	 * pte lock(a spinlock) is held, which implies preemption disabled.
 	 */
 	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
-	mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
+	mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, -nr);
 
 	if (unlikely(PageMlocked(page)))
 		clear_page_mlock(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 18731310ca36..d77c97552ed3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2046,8 +2046,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
 	active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
 
 	if (memcg)
-		refaults = mem_cgroup_read_stat(memcg,
-						MEMCG_WORKINGSET_ACTIVATE);
+		refaults = mem_cgroup_read_stat(memcg, WORKINGSET_ACTIVATE);
 	else
 		refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
 
@@ -2742,7 +2741,7 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
 
 		if (memcg)
 			refaults = mem_cgroup_read_stat(memcg,
-						MEMCG_WORKINGSET_ACTIVATE);
+							WORKINGSET_ACTIVATE);
 		else
 			refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
 
diff --git a/mm/workingset.c b/mm/workingset.c
index 51c6f61d4cea..37fc1057cd86 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -289,11 +289,11 @@ bool workingset_refault(void *shadow)
 	refault_distance = (refault - eviction) & EVICTION_MASK;
 
 	inc_node_state(pgdat, WORKINGSET_REFAULT);
-	mem_cgroup_inc_stat(memcg, MEMCG_WORKINGSET_REFAULT);
+	mem_cgroup_inc_stat(memcg, WORKINGSET_REFAULT);
 
 	if (refault_distance <= active_file) {
 		inc_node_state(pgdat, WORKINGSET_ACTIVATE);
-		mem_cgroup_inc_stat(memcg, MEMCG_WORKINGSET_ACTIVATE);
+		mem_cgroup_inc_stat(memcg, WORKINGSET_ACTIVATE);
 		rcu_read_unlock();
 		return true;
 	}
@@ -475,8 +475,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
 	if (WARN_ON_ONCE(node->exceptional))
 		goto out_invalid;
 	inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
-	mem_cgroup_inc_page_stat(virt_to_page(node),
-				 MEMCG_WORKINGSET_NODERECLAIM);
+	mem_cgroup_inc_page_stat(virt_to_page(node), WORKINGSET_NODERECLAIM);
 	__radix_tree_delete_node(&mapping->page_tree, node,
 				 workingset_update_node, mapping);
 
-- 
2.12.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 4/4] mm: memcontrol: use node page state naming scheme for memcg
  2017-04-04 22:01 [PATCH 1/4] mm: memcontrol: clean up memory.events counting function Johannes Weiner
  2017-04-04 22:01 ` [PATCH 2/4] mm: memcontrol: re-use global VM event enum Johannes Weiner
  2017-04-04 22:01 ` [PATCH 3/4] mm: memcontrol: re-use node VM page state enum Johannes Weiner
@ 2017-04-04 22:01 ` Johannes Weiner
  2017-04-06  9:01   ` Vladimir Davydov
  2017-04-07 12:54   ` Michal Hocko
  2017-04-06  8:31 ` [PATCH 1/4] mm: memcontrol: clean up memory.events counting function Vladimir Davydov
  2017-04-07 12:20 ` Michal Hocko
  4 siblings, 2 replies; 14+ messages in thread
From: Johannes Weiner @ 2017-04-04 22:01 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel,
	kernel-team

The memory controllers stat function names are awkwardly long and
arbitrarily different from the zone and node stat functions.

The current interface is named:

  mem_cgroup_read_stat()
  mem_cgroup_update_stat()
  mem_cgroup_inc_stat()
  mem_cgroup_dec_stat()
  mem_cgroup_update_page_stat()
  mem_cgroup_inc_page_stat()
  mem_cgroup_dec_page_stat()

This patch renames it to match the corresponding node stat functions:

  memcg_page_state()		[node_page_state()]
  mod_memcg_state()		[mod_node_state()]
  inc_memcg_state()		[inc_node_state()]
  dec_memcg_state()		[dec_node_state()]
  mod_memcg_page_state()	[mod_node_page_state()]
  inc_memcg_page_state()	[inc_node_page_state()]
  dec_memcg_page_state()	[dec_node_page_state()]

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 include/linux/memcontrol.h | 73 +++++++++++++++++++++++-----------------------
 mm/memcontrol.c            | 38 ++++++++++++------------
 mm/page-writeback.c        | 10 +++----
 mm/rmap.c                  |  4 +--
 mm/vmscan.c                |  5 ++--
 mm/workingset.c            |  6 ++--
 6 files changed, 68 insertions(+), 68 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0fa1f5de6841..899949bbb2f9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -472,8 +472,8 @@ extern int do_swap_account;
 void lock_page_memcg(struct page *page);
 void unlock_page_memcg(struct page *page);
 
-static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-						 enum memcg_stat_item idx)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
+					     enum memcg_stat_item idx)
 {
 	long val = 0;
 	int cpu;
@@ -487,27 +487,27 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
 	return val;
 }
 
-static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
-					  enum memcg_stat_item idx, int val)
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+				   enum memcg_stat_item idx, int val)
 {
 	if (!mem_cgroup_disabled())
 		this_cpu_add(memcg->stat->count[idx], val);
 }
 
-static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
-				       enum memcg_stat_item idx)
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+				   enum memcg_stat_item idx)
 {
-	mem_cgroup_update_stat(memcg, idx, 1);
+	mod_memcg_state(memcg, idx, 1);
 }
 
-static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
-				       enum memcg_stat_item idx)
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+				   enum memcg_stat_item idx)
 {
-	mem_cgroup_update_stat(memcg, idx, -1);
+	mod_memcg_state(memcg, idx, -1);
 }
 
 /**
- * mem_cgroup_update_page_stat - update page state statistics
+ * mod_memcg_page_state - update page state statistics
  * @page: the page
  * @idx: page state item to account
  * @val: number of pages (positive or negative)
@@ -518,28 +518,28 @@ static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
  *
  *   lock_page(page) or lock_page_memcg(page)
  *   if (TestClearPageState(page))
- *     mem_cgroup_update_page_stat(page, state, -1);
+ *     mod_memcg_page_state(page, state, -1);
  *   unlock_page(page) or unlock_page_memcg(page)
  *
  * Kernel pages are an exception to this, since they'll never move.
  */
-static inline void mem_cgroup_update_page_stat(struct page *page,
-				 enum memcg_stat_item idx, int val)
+static inline void mod_memcg_page_state(struct page *page,
+					enum memcg_stat_item idx, int val)
 {
 	if (page->mem_cgroup)
-		mem_cgroup_update_stat(page->mem_cgroup, idx, val);
+		mod_memcg_state(page->mem_cgroup, idx, val);
 }
 
-static inline void mem_cgroup_inc_page_stat(struct page *page,
-					    enum memcg_stat_item idx)
+static inline void inc_memcg_page_state(struct page *page,
+					enum memcg_stat_item idx)
 {
-	mem_cgroup_update_page_stat(page, idx, 1);
+	mod_memcg_page_state(page, idx, 1);
 }
 
-static inline void mem_cgroup_dec_page_stat(struct page *page,
-					    enum memcg_stat_item idx)
+static inline void dec_memcg_page_state(struct page *page,
+					enum memcg_stat_item idx)
 {
-	mem_cgroup_update_page_stat(page, idx, -1);
+	mod_memcg_page_state(page, idx, -1);
 }
 
 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
@@ -739,40 +739,41 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
 	return false;
 }
 
-static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-						 enum mem_cgroup_stat_index idx)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
+					     enum memcg_stat_item idx)
 {
 	return 0;
 }
 
-static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
-					  enum memcg_stat_item idx, int val)
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+				   enum memcg_stat_item idx,
+				   int nr)
 {
 }
 
-static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
-				       enum memcg_stat_item idx)
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+				   enum memcg_stat_item idx)
 {
 }
 
-static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
-				       enum memcg_stat_item idx)
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+				   enum memcg_stat_item idx)
 {
 }
 
-static inline void mem_cgroup_update_page_stat(struct page *page,
-					       enum memcg_stat_item idx,
-					       int nr)
+static inline void mod_memcg_page_state(struct page *page,
+					enum memcg_stat_item idx,
+					int nr)
 {
 }
 
-static inline void mem_cgroup_inc_page_stat(struct page *page,
-					    enum memcg_stat_item idx)
+static inline void inc_memcg_page_state(struct page *page,
+					enum memcg_stat_item idx)
 {
 }
 
-static inline void mem_cgroup_dec_page_stat(struct page *page,
-					    enum memcg_stat_item idx)
+static inline void dec_memcg_page_state(struct page *page,
+					enum memcg_stat_item idx)
 {
 }
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6fe4c7fafbfc..ff73899af61a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -552,8 +552,8 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
  * implemented.
  */
 
-static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
-					    enum memcg_event_item event)
+static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
+				      enum memcg_event_item event)
 {
 	unsigned long val = 0;
 	int cpu;
@@ -1180,7 +1180,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 			if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
 				continue;
 			pr_cont(" %s:%luKB", memcg1_stat_names[i],
-				K(mem_cgroup_read_stat(iter, memcg1_stats[i])));
+				K(memcg_page_state(iter, memcg1_stats[i])));
 		}
 
 		for (i = 0; i < NR_LRU_LISTS; i++)
@@ -2713,7 +2713,7 @@ static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
 
 	for_each_mem_cgroup_tree(iter, memcg) {
 		for (i = 0; i < MEMCG_NR_STAT; i++)
-			stat[i] += mem_cgroup_read_stat(iter, i);
+			stat[i] += memcg_page_state(iter, i);
 	}
 }
 
@@ -2726,7 +2726,7 @@ static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
 
 	for_each_mem_cgroup_tree(iter, memcg) {
 		for (i = 0; i < MEMCG_NR_EVENTS; i++)
-			events[i] += mem_cgroup_read_events(iter, i);
+			events[i] += memcg_sum_events(iter, i);
 	}
 }
 
@@ -2738,10 +2738,10 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
 		struct mem_cgroup *iter;
 
 		for_each_mem_cgroup_tree(iter, memcg) {
-			val += mem_cgroup_read_stat(iter, MEMCG_CACHE);
-			val += mem_cgroup_read_stat(iter, MEMCG_RSS);
+			val += memcg_page_state(iter, MEMCG_CACHE);
+			val += memcg_page_state(iter, MEMCG_RSS);
 			if (swap)
-				val += mem_cgroup_read_stat(iter, MEMCG_SWAP);
+				val += memcg_page_state(iter, MEMCG_SWAP);
 		}
 	} else {
 		if (!swap)
@@ -3145,13 +3145,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
 			continue;
 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
-			   mem_cgroup_read_stat(memcg, memcg1_stats[i]) *
+			   memcg_page_state(memcg, memcg1_stats[i]) *
 			   PAGE_SIZE);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
 		seq_printf(m, "%s %lu\n", memcg1_event_names[i],
-			   mem_cgroup_read_events(memcg, memcg1_events[i]));
+			   memcg_sum_events(memcg, memcg1_events[i]));
 
 	for (i = 0; i < NR_LRU_LISTS; i++)
 		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
@@ -3175,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
 			continue;
 		for_each_mem_cgroup_tree(mi, memcg)
-			val += mem_cgroup_read_stat(mi, memcg1_stats[i]) *
+			val += memcg_page_state(mi, memcg1_stats[i]) *
 			PAGE_SIZE;
 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
 	}
@@ -3184,7 +3184,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 		unsigned long long val = 0;
 
 		for_each_mem_cgroup_tree(mi, memcg)
-			val += mem_cgroup_read_events(mi, memcg1_events[i]);
+			val += memcg_sum_events(mi, memcg1_events[i]);
 		seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
 	}
 
@@ -3650,10 +3650,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
 	struct mem_cgroup *parent;
 
-	*pdirty = mem_cgroup_read_stat(memcg, NR_FILE_DIRTY);
+	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
 
 	/* this should eventually include NR_UNSTABLE_NFS */
-	*pwriteback = mem_cgroup_read_stat(memcg, NR_WRITEBACK);
+	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
 	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
 						     (1 << LRU_ACTIVE_FILE));
 	*pheadroom = PAGE_COUNTER_MAX;
@@ -4515,7 +4515,7 @@ static int mem_cgroup_move_account(struct page *page,
 
 	/*
 	 * move_lock grabbed above and caller set from->moving_account, so
-	 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
+	 * mod_memcg_page_state will serialize updates to PageDirty.
 	 * So mapping should be stable for dirty pages.
 	 */
 	if (!anon && PageDirty(page)) {
@@ -5161,10 +5161,10 @@ static int memory_events_show(struct seq_file *m, void *v)
 {
 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
 
-	seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
-	seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
-	seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
-	seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
+	seq_printf(m, "low %lu\n", memcg_sum_events(memcg, MEMCG_LOW));
+	seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
+	seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
+	seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
 
 	return 0;
 }
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 777711203809..2359608d2568 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2427,7 +2427,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
 		inode_attach_wb(inode, page);
 		wb = inode_to_wb(inode);
 
-		mem_cgroup_inc_page_stat(page, NR_FILE_DIRTY);
+		inc_memcg_page_state(page, NR_FILE_DIRTY);
 		__inc_node_page_state(page, NR_FILE_DIRTY);
 		__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 		__inc_node_page_state(page, NR_DIRTIED);
@@ -2449,7 +2449,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
 			  struct bdi_writeback *wb)
 {
 	if (mapping_cap_account_dirty(mapping)) {
-		mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
+		dec_memcg_page_state(page, NR_FILE_DIRTY);
 		dec_node_page_state(page, NR_FILE_DIRTY);
 		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 		dec_wb_stat(wb, WB_RECLAIMABLE);
@@ -2706,7 +2706,7 @@ int clear_page_dirty_for_io(struct page *page)
 		 */
 		wb = unlocked_inode_to_wb_begin(inode, &locked);
 		if (TestClearPageDirty(page)) {
-			mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
+			dec_memcg_page_state(page, NR_FILE_DIRTY);
 			dec_node_page_state(page, NR_FILE_DIRTY);
 			dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 			dec_wb_stat(wb, WB_RECLAIMABLE);
@@ -2753,7 +2753,7 @@ int test_clear_page_writeback(struct page *page)
 		ret = TestClearPageWriteback(page);
 	}
 	if (ret) {
-		mem_cgroup_dec_page_stat(page, NR_WRITEBACK);
+		dec_memcg_page_state(page, NR_WRITEBACK);
 		dec_node_page_state(page, NR_WRITEBACK);
 		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 		inc_node_page_state(page, NR_WRITTEN);
@@ -2808,7 +2808,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
 		ret = TestSetPageWriteback(page);
 	}
 	if (!ret) {
-		mem_cgroup_inc_page_stat(page, NR_WRITEBACK);
+		inc_memcg_page_state(page, NR_WRITEBACK);
 		inc_node_page_state(page, NR_WRITEBACK);
 		inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 	}
diff --git a/mm/rmap.c b/mm/rmap.c
index f6bbfcf01422..e116a8e80468 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1158,7 +1158,7 @@ void page_add_file_rmap(struct page *page, bool compound)
 			goto out;
 	}
 	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
-	mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, nr);
+	mod_memcg_page_state(page, NR_FILE_MAPPED, nr);
 out:
 	unlock_page_memcg(page);
 }
@@ -1198,7 +1198,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
 	 * pte lock(a spinlock) is held, which implies preemption disabled.
 	 */
 	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
-	mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, -nr);
+	mod_memcg_page_state(page, NR_FILE_MAPPED, -nr);
 
 	if (unlikely(PageMlocked(page)))
 		clear_page_mlock(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d77c97552ed3..eac4a9a73ba9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2046,7 +2046,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
 	active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
 
 	if (memcg)
-		refaults = mem_cgroup_read_stat(memcg, WORKINGSET_ACTIVATE);
+		refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
 	else
 		refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
 
@@ -2740,8 +2740,7 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
 		struct lruvec *lruvec;
 
 		if (memcg)
-			refaults = mem_cgroup_read_stat(memcg,
-							WORKINGSET_ACTIVATE);
+			refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
 		else
 			refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
 
diff --git a/mm/workingset.c b/mm/workingset.c
index 37fc1057cd86..b8c9ab678479 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -289,11 +289,11 @@ bool workingset_refault(void *shadow)
 	refault_distance = (refault - eviction) & EVICTION_MASK;
 
 	inc_node_state(pgdat, WORKINGSET_REFAULT);
-	mem_cgroup_inc_stat(memcg, WORKINGSET_REFAULT);
+	inc_memcg_state(memcg, WORKINGSET_REFAULT);
 
 	if (refault_distance <= active_file) {
 		inc_node_state(pgdat, WORKINGSET_ACTIVATE);
-		mem_cgroup_inc_stat(memcg, WORKINGSET_ACTIVATE);
+		inc_memcg_state(memcg, WORKINGSET_ACTIVATE);
 		rcu_read_unlock();
 		return true;
 	}
@@ -475,7 +475,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
 	if (WARN_ON_ONCE(node->exceptional))
 		goto out_invalid;
 	inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
-	mem_cgroup_inc_page_stat(virt_to_page(node), WORKINGSET_NODERECLAIM);
+	inc_memcg_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
 	__radix_tree_delete_node(&mapping->page_tree, node,
 				 workingset_update_node, mapping);
 
-- 
2.12.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/4] mm: memcontrol: clean up memory.events counting function
  2017-04-04 22:01 [PATCH 1/4] mm: memcontrol: clean up memory.events counting function Johannes Weiner
                   ` (2 preceding siblings ...)
  2017-04-04 22:01 ` [PATCH 4/4] mm: memcontrol: use node page state naming scheme for memcg Johannes Weiner
@ 2017-04-06  8:31 ` Vladimir Davydov
  2017-04-07 12:20 ` Michal Hocko
  4 siblings, 0 replies; 14+ messages in thread
From: Vladimir Davydov @ 2017-04-06  8:31 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel,
	kernel-team

On Tue, Apr 04, 2017 at 06:01:45PM -0400, Johannes Weiner wrote:
> We only ever count single events, drop the @nr parameter. Rename the
> function accordingly. Remove low-information kerneldoc.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/4] mm: memcontrol: re-use global VM event enum
  2017-04-04 22:01 ` [PATCH 2/4] mm: memcontrol: re-use global VM event enum Johannes Weiner
@ 2017-04-06  8:49   ` Vladimir Davydov
  2017-04-10 14:17     ` Johannes Weiner
  2017-04-07 12:47   ` Michal Hocko
  1 sibling, 1 reply; 14+ messages in thread
From: Vladimir Davydov @ 2017-04-06  8:49 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel,
	kernel-team

On Tue, Apr 04, 2017 at 06:01:46PM -0400, Johannes Weiner wrote:
> The current duplication is a high-maintenance mess, and it's painful
> to add new items.
> 
> This increases the size of the event array, but we'll eventually want
> most of the VM events tracked on a per-cgroup basis anyway.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Although the increase in the mem_cgroup struct introduced by this patch
looks scary, I agree this is a reasonable step toward unification of
vmstat, as most vm_even_item entries do make sense to be accounted per
cgroup as well.

Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>

> @@ -608,9 +601,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
>  
>  	/* pagein of a big page is an event. So, ignore page size */
>  	if (nr_pages > 0)
> -		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
> +		__this_cpu_inc(memcg->stat->events[PGPGIN]);
>  	else {
> -		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
> +		__this_cpu_inc(memcg->stat->events[PGPGOUT]);
>  		nr_pages = -nr_pages; /* for event */
>  	}

AFAIR this doesn't exactly match system-wide PGPGIN/PGPGOUT: they are
supposed to account only paging events involving IO while currently they
include faulting in zero pages and zapping a process address space.
Probably, this should be revised before rolling out to cgroup v2.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/4] mm: memcontrol: re-use node VM page state enum
  2017-04-04 22:01 ` [PATCH 3/4] mm: memcontrol: re-use node VM page state enum Johannes Weiner
@ 2017-04-06  8:59   ` Vladimir Davydov
  0 siblings, 0 replies; 14+ messages in thread
From: Vladimir Davydov @ 2017-04-06  8:59 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel,
	kernel-team

On Tue, Apr 04, 2017 at 06:01:47PM -0400, Johannes Weiner wrote:
> The current duplication is a high-maintenance mess, and it's painful
> to add new items or query memcg state from the rest of the VM.
> 
> This increases the size of the stat array marginally, but we should
> aim to track all these stats on a per-cgroup level anyway.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 4/4] mm: memcontrol: use node page state naming scheme for memcg
  2017-04-04 22:01 ` [PATCH 4/4] mm: memcontrol: use node page state naming scheme for memcg Johannes Weiner
@ 2017-04-06  9:01   ` Vladimir Davydov
  2017-04-07 12:54   ` Michal Hocko
  1 sibling, 0 replies; 14+ messages in thread
From: Vladimir Davydov @ 2017-04-06  9:01 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel,
	kernel-team

On Tue, Apr 04, 2017 at 06:01:48PM -0400, Johannes Weiner wrote:
> The memory controllers stat function names are awkwardly long and
> arbitrarily different from the zone and node stat functions.
> 
> The current interface is named:
> 
>   mem_cgroup_read_stat()
>   mem_cgroup_update_stat()
>   mem_cgroup_inc_stat()
>   mem_cgroup_dec_stat()
>   mem_cgroup_update_page_stat()
>   mem_cgroup_inc_page_stat()
>   mem_cgroup_dec_page_stat()
> 
> This patch renames it to match the corresponding node stat functions:
> 
>   memcg_page_state()		[node_page_state()]
>   mod_memcg_state()		[mod_node_state()]
>   inc_memcg_state()		[inc_node_state()]
>   dec_memcg_state()		[dec_node_state()]
>   mod_memcg_page_state()	[mod_node_page_state()]
>   inc_memcg_page_state()	[inc_node_page_state()]
>   dec_memcg_page_state()	[dec_node_page_state()]
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Looks neat.

Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/4] mm: memcontrol: clean up memory.events counting function
  2017-04-04 22:01 [PATCH 1/4] mm: memcontrol: clean up memory.events counting function Johannes Weiner
                   ` (3 preceding siblings ...)
  2017-04-06  8:31 ` [PATCH 1/4] mm: memcontrol: clean up memory.events counting function Vladimir Davydov
@ 2017-04-07 12:20 ` Michal Hocko
  4 siblings, 0 replies; 14+ messages in thread
From: Michal Hocko @ 2017-04-07 12:20 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel,
	kernel-team

On Tue 04-04-17 18:01:45, Johannes Weiner wrote:
> We only ever count single events, drop the @nr parameter. Rename the
> function accordingly. Remove low-information kerneldoc.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  include/linux/memcontrol.h | 18 +++++-------------
>  mm/memcontrol.c            |  8 ++++----
>  mm/vmscan.c                |  2 +-
>  3 files changed, 10 insertions(+), 18 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index cfa91a3ca0ca..bc0c16e284c0 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -287,17 +287,10 @@ static inline bool mem_cgroup_disabled(void)
>  	return !cgroup_subsys_enabled(memory_cgrp_subsys);
>  }
>  
> -/**
> - * mem_cgroup_events - count memory events against a cgroup
> - * @memcg: the memory cgroup
> - * @idx: the event index
> - * @nr: the number of events to account for
> - */
> -static inline void mem_cgroup_events(struct mem_cgroup *memcg,
> -		       enum mem_cgroup_events_index idx,
> -		       unsigned int nr)
> +static inline void mem_cgroup_event(struct mem_cgroup *memcg,
> +				    enum mem_cgroup_events_index idx)
>  {
> -	this_cpu_add(memcg->stat->events[idx], nr);
> +	this_cpu_inc(memcg->stat->events[idx]);
>  	cgroup_file_notify(&memcg->events_file);
>  }
>  
> @@ -614,9 +607,8 @@ static inline bool mem_cgroup_disabled(void)
>  	return true;
>  }
>  
> -static inline void mem_cgroup_events(struct mem_cgroup *memcg,
> -				     enum mem_cgroup_events_index idx,
> -				     unsigned int nr)
> +static inline void mem_cgroup_event(struct mem_cgroup *memcg,
> +				    enum mem_cgroup_events_index idx)
>  {
>  }
>  
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 108d5b097db1..1ffa3ad201ea 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1825,7 +1825,7 @@ static void reclaim_high(struct mem_cgroup *memcg,
>  	do {
>  		if (page_counter_read(&memcg->memory) <= memcg->high)
>  			continue;
> -		mem_cgroup_events(memcg, MEMCG_HIGH, 1);
> +		mem_cgroup_event(memcg, MEMCG_HIGH);
>  		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
>  	} while ((memcg = parent_mem_cgroup(memcg)));
>  }
> @@ -1916,7 +1916,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
>  	if (!gfpflags_allow_blocking(gfp_mask))
>  		goto nomem;
>  
> -	mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
> +	mem_cgroup_event(mem_over_limit, MEMCG_MAX);
>  
>  	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
>  						    gfp_mask, may_swap);
> @@ -1959,7 +1959,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
>  	if (fatal_signal_pending(current))
>  		goto force;
>  
> -	mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
> +	mem_cgroup_event(mem_over_limit, MEMCG_OOM);
>  
>  	mem_cgroup_oom(mem_over_limit, gfp_mask,
>  		       get_order(nr_pages * PAGE_SIZE));
> @@ -5142,7 +5142,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
>  			continue;
>  		}
>  
> -		mem_cgroup_events(memcg, MEMCG_OOM, 1);
> +		mem_cgroup_event(memcg, MEMCG_OOM);
>  		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
>  			break;
>  	}
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index b3f62cf37097..18731310ca36 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2526,7 +2526,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
>  					sc->memcg_low_skipped = 1;
>  					continue;
>  				}
> -				mem_cgroup_events(memcg, MEMCG_LOW, 1);
> +				mem_cgroup_event(memcg, MEMCG_LOW);
>  			}
>  
>  			reclaimed = sc->nr_reclaimed;
> -- 
> 2.12.1
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/4] mm: memcontrol: re-use global VM event enum
  2017-04-04 22:01 ` [PATCH 2/4] mm: memcontrol: re-use global VM event enum Johannes Weiner
  2017-04-06  8:49   ` Vladimir Davydov
@ 2017-04-07 12:47   ` Michal Hocko
  2017-04-10 14:13     ` Johannes Weiner
  1 sibling, 1 reply; 14+ messages in thread
From: Michal Hocko @ 2017-04-07 12:47 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel,
	kernel-team

I do agree that we should share global and memcg specific events constants
but I am not sure we want to share all of them. Would it make sense to
reorganize the global enum and put those that are shared to the
beginning? We wouldn't need the memcg specific translation then.

Anyway, two comments on the current implementation.

On Tue 04-04-17 18:01:46, Johannes Weiner wrote:
[...]
> +/* Cgroup-specific events, on top of universal VM events */
> +enum memcg_event_item {
> +	MEMCG_LOW = NR_VM_EVENT_ITEMS,
> +	MEMCG_HIGH,
> +	MEMCG_MAX,
> +	MEMCG_OOM,
> +	MEMCG_NR_EVENTS,
> +};

The above should mention that each supported global VM event should
provide the corresponding translation

[...]

here...
> +/* Universal VM events cgroup1 shows, original sort order */
> +unsigned int memcg1_events[] = {
> +	PGPGIN,
> +	PGPGOUT,
> +	PGFAULT,
> +	PGMAJFAULT,
> +};
> +
> +static const char *const memcg1_event_names[] = {
> +	"pgpgin",
> +	"pgpgout",
> +	"pgfault",
> +	"pgmajfault",
> +};

the naming doesn't make it easier to undestand why we need this.
global2memcg_event?

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 4/4] mm: memcontrol: use node page state naming scheme for memcg
  2017-04-04 22:01 ` [PATCH 4/4] mm: memcontrol: use node page state naming scheme for memcg Johannes Weiner
  2017-04-06  9:01   ` Vladimir Davydov
@ 2017-04-07 12:54   ` Michal Hocko
  1 sibling, 0 replies; 14+ messages in thread
From: Michal Hocko @ 2017-04-07 12:54 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel,
	kernel-team

On Tue 04-04-17 18:01:48, Johannes Weiner wrote:
> The memory controllers stat function names are awkwardly long and
> arbitrarily different from the zone and node stat functions.
> 
> The current interface is named:
> 
>   mem_cgroup_read_stat()
>   mem_cgroup_update_stat()
>   mem_cgroup_inc_stat()
>   mem_cgroup_dec_stat()
>   mem_cgroup_update_page_stat()
>   mem_cgroup_inc_page_stat()
>   mem_cgroup_dec_page_stat()
> 
> This patch renames it to match the corresponding node stat functions:
> 
>   memcg_page_state()		[node_page_state()]
>   mod_memcg_state()		[mod_node_state()]
>   inc_memcg_state()		[inc_node_state()]
>   dec_memcg_state()		[dec_node_state()]
>   mod_memcg_page_state()	[mod_node_page_state()]
>   inc_memcg_page_state()	[inc_node_page_state()]
>   dec_memcg_page_state()	[dec_node_page_state()]

Yes this is definitely much better!

> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  include/linux/memcontrol.h | 73 +++++++++++++++++++++++-----------------------
>  mm/memcontrol.c            | 38 ++++++++++++------------
>  mm/page-writeback.c        | 10 +++----
>  mm/rmap.c                  |  4 +--
>  mm/vmscan.c                |  5 ++--
>  mm/workingset.c            |  6 ++--
>  6 files changed, 68 insertions(+), 68 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 0fa1f5de6841..899949bbb2f9 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -472,8 +472,8 @@ extern int do_swap_account;
>  void lock_page_memcg(struct page *page);
>  void unlock_page_memcg(struct page *page);
>  
> -static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
> -						 enum memcg_stat_item idx)
> +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
> +					     enum memcg_stat_item idx)
>  {
>  	long val = 0;
>  	int cpu;
> @@ -487,27 +487,27 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
>  	return val;
>  }
>  
> -static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
> -					  enum memcg_stat_item idx, int val)
> +static inline void mod_memcg_state(struct mem_cgroup *memcg,
> +				   enum memcg_stat_item idx, int val)
>  {
>  	if (!mem_cgroup_disabled())
>  		this_cpu_add(memcg->stat->count[idx], val);
>  }
>  
> -static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
> -				       enum memcg_stat_item idx)
> +static inline void inc_memcg_state(struct mem_cgroup *memcg,
> +				   enum memcg_stat_item idx)
>  {
> -	mem_cgroup_update_stat(memcg, idx, 1);
> +	mod_memcg_state(memcg, idx, 1);
>  }
>  
> -static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
> -				       enum memcg_stat_item idx)
> +static inline void dec_memcg_state(struct mem_cgroup *memcg,
> +				   enum memcg_stat_item idx)
>  {
> -	mem_cgroup_update_stat(memcg, idx, -1);
> +	mod_memcg_state(memcg, idx, -1);
>  }
>  
>  /**
> - * mem_cgroup_update_page_stat - update page state statistics
> + * mod_memcg_page_state - update page state statistics
>   * @page: the page
>   * @idx: page state item to account
>   * @val: number of pages (positive or negative)
> @@ -518,28 +518,28 @@ static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
>   *
>   *   lock_page(page) or lock_page_memcg(page)
>   *   if (TestClearPageState(page))
> - *     mem_cgroup_update_page_stat(page, state, -1);
> + *     mod_memcg_page_state(page, state, -1);
>   *   unlock_page(page) or unlock_page_memcg(page)
>   *
>   * Kernel pages are an exception to this, since they'll never move.
>   */
> -static inline void mem_cgroup_update_page_stat(struct page *page,
> -				 enum memcg_stat_item idx, int val)
> +static inline void mod_memcg_page_state(struct page *page,
> +					enum memcg_stat_item idx, int val)
>  {
>  	if (page->mem_cgroup)
> -		mem_cgroup_update_stat(page->mem_cgroup, idx, val);
> +		mod_memcg_state(page->mem_cgroup, idx, val);
>  }
>  
> -static inline void mem_cgroup_inc_page_stat(struct page *page,
> -					    enum memcg_stat_item idx)
> +static inline void inc_memcg_page_state(struct page *page,
> +					enum memcg_stat_item idx)
>  {
> -	mem_cgroup_update_page_stat(page, idx, 1);
> +	mod_memcg_page_state(page, idx, 1);
>  }
>  
> -static inline void mem_cgroup_dec_page_stat(struct page *page,
> -					    enum memcg_stat_item idx)
> +static inline void dec_memcg_page_state(struct page *page,
> +					enum memcg_stat_item idx)
>  {
> -	mem_cgroup_update_page_stat(page, idx, -1);
> +	mod_memcg_page_state(page, idx, -1);
>  }
>  
>  unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
> @@ -739,40 +739,41 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
>  	return false;
>  }
>  
> -static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
> -						 enum mem_cgroup_stat_index idx)
> +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
> +					     enum memcg_stat_item idx)
>  {
>  	return 0;
>  }
>  
> -static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
> -					  enum memcg_stat_item idx, int val)
> +static inline void mod_memcg_state(struct mem_cgroup *memcg,
> +				   enum memcg_stat_item idx,
> +				   int nr)
>  {
>  }
>  
> -static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
> -				       enum memcg_stat_item idx)
> +static inline void inc_memcg_state(struct mem_cgroup *memcg,
> +				   enum memcg_stat_item idx)
>  {
>  }
>  
> -static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
> -				       enum memcg_stat_item idx)
> +static inline void dec_memcg_state(struct mem_cgroup *memcg,
> +				   enum memcg_stat_item idx)
>  {
>  }
>  
> -static inline void mem_cgroup_update_page_stat(struct page *page,
> -					       enum memcg_stat_item idx,
> -					       int nr)
> +static inline void mod_memcg_page_state(struct page *page,
> +					enum memcg_stat_item idx,
> +					int nr)
>  {
>  }
>  
> -static inline void mem_cgroup_inc_page_stat(struct page *page,
> -					    enum memcg_stat_item idx)
> +static inline void inc_memcg_page_state(struct page *page,
> +					enum memcg_stat_item idx)
>  {
>  }
>  
> -static inline void mem_cgroup_dec_page_stat(struct page *page,
> -					    enum memcg_stat_item idx)
> +static inline void dec_memcg_page_state(struct page *page,
> +					enum memcg_stat_item idx)
>  {
>  }
>  
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 6fe4c7fafbfc..ff73899af61a 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -552,8 +552,8 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
>   * implemented.
>   */
>  
> -static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
> -					    enum memcg_event_item event)
> +static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
> +				      enum memcg_event_item event)
>  {
>  	unsigned long val = 0;
>  	int cpu;
> @@ -1180,7 +1180,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
>  			if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
>  				continue;
>  			pr_cont(" %s:%luKB", memcg1_stat_names[i],
> -				K(mem_cgroup_read_stat(iter, memcg1_stats[i])));
> +				K(memcg_page_state(iter, memcg1_stats[i])));
>  		}
>  
>  		for (i = 0; i < NR_LRU_LISTS; i++)
> @@ -2713,7 +2713,7 @@ static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
>  
>  	for_each_mem_cgroup_tree(iter, memcg) {
>  		for (i = 0; i < MEMCG_NR_STAT; i++)
> -			stat[i] += mem_cgroup_read_stat(iter, i);
> +			stat[i] += memcg_page_state(iter, i);
>  	}
>  }
>  
> @@ -2726,7 +2726,7 @@ static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
>  
>  	for_each_mem_cgroup_tree(iter, memcg) {
>  		for (i = 0; i < MEMCG_NR_EVENTS; i++)
> -			events[i] += mem_cgroup_read_events(iter, i);
> +			events[i] += memcg_sum_events(iter, i);
>  	}
>  }
>  
> @@ -2738,10 +2738,10 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
>  		struct mem_cgroup *iter;
>  
>  		for_each_mem_cgroup_tree(iter, memcg) {
> -			val += mem_cgroup_read_stat(iter, MEMCG_CACHE);
> -			val += mem_cgroup_read_stat(iter, MEMCG_RSS);
> +			val += memcg_page_state(iter, MEMCG_CACHE);
> +			val += memcg_page_state(iter, MEMCG_RSS);
>  			if (swap)
> -				val += mem_cgroup_read_stat(iter, MEMCG_SWAP);
> +				val += memcg_page_state(iter, MEMCG_SWAP);
>  		}
>  	} else {
>  		if (!swap)
> @@ -3145,13 +3145,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
>  			continue;
>  		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
> -			   mem_cgroup_read_stat(memcg, memcg1_stats[i]) *
> +			   memcg_page_state(memcg, memcg1_stats[i]) *
>  			   PAGE_SIZE);
>  	}
>  
>  	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
>  		seq_printf(m, "%s %lu\n", memcg1_event_names[i],
> -			   mem_cgroup_read_events(memcg, memcg1_events[i]));
> +			   memcg_sum_events(memcg, memcg1_events[i]));
>  
>  	for (i = 0; i < NR_LRU_LISTS; i++)
>  		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
> @@ -3175,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
>  			continue;
>  		for_each_mem_cgroup_tree(mi, memcg)
> -			val += mem_cgroup_read_stat(mi, memcg1_stats[i]) *
> +			val += memcg_page_state(mi, memcg1_stats[i]) *
>  			PAGE_SIZE;
>  		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
>  	}
> @@ -3184,7 +3184,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  		unsigned long long val = 0;
>  
>  		for_each_mem_cgroup_tree(mi, memcg)
> -			val += mem_cgroup_read_events(mi, memcg1_events[i]);
> +			val += memcg_sum_events(mi, memcg1_events[i]);
>  		seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
>  	}
>  
> @@ -3650,10 +3650,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
>  	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
>  	struct mem_cgroup *parent;
>  
> -	*pdirty = mem_cgroup_read_stat(memcg, NR_FILE_DIRTY);
> +	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
>  
>  	/* this should eventually include NR_UNSTABLE_NFS */
> -	*pwriteback = mem_cgroup_read_stat(memcg, NR_WRITEBACK);
> +	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
>  	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
>  						     (1 << LRU_ACTIVE_FILE));
>  	*pheadroom = PAGE_COUNTER_MAX;
> @@ -4515,7 +4515,7 @@ static int mem_cgroup_move_account(struct page *page,
>  
>  	/*
>  	 * move_lock grabbed above and caller set from->moving_account, so
> -	 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
> +	 * mod_memcg_page_state will serialize updates to PageDirty.
>  	 * So mapping should be stable for dirty pages.
>  	 */
>  	if (!anon && PageDirty(page)) {
> @@ -5161,10 +5161,10 @@ static int memory_events_show(struct seq_file *m, void *v)
>  {
>  	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
>  
> -	seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
> -	seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
> -	seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
> -	seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
> +	seq_printf(m, "low %lu\n", memcg_sum_events(memcg, MEMCG_LOW));
> +	seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
> +	seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
> +	seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
>  
>  	return 0;
>  }
> diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> index 777711203809..2359608d2568 100644
> --- a/mm/page-writeback.c
> +++ b/mm/page-writeback.c
> @@ -2427,7 +2427,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
>  		inode_attach_wb(inode, page);
>  		wb = inode_to_wb(inode);
>  
> -		mem_cgroup_inc_page_stat(page, NR_FILE_DIRTY);
> +		inc_memcg_page_state(page, NR_FILE_DIRTY);
>  		__inc_node_page_state(page, NR_FILE_DIRTY);
>  		__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
>  		__inc_node_page_state(page, NR_DIRTIED);
> @@ -2449,7 +2449,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
>  			  struct bdi_writeback *wb)
>  {
>  	if (mapping_cap_account_dirty(mapping)) {
> -		mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
> +		dec_memcg_page_state(page, NR_FILE_DIRTY);
>  		dec_node_page_state(page, NR_FILE_DIRTY);
>  		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
>  		dec_wb_stat(wb, WB_RECLAIMABLE);
> @@ -2706,7 +2706,7 @@ int clear_page_dirty_for_io(struct page *page)
>  		 */
>  		wb = unlocked_inode_to_wb_begin(inode, &locked);
>  		if (TestClearPageDirty(page)) {
> -			mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
> +			dec_memcg_page_state(page, NR_FILE_DIRTY);
>  			dec_node_page_state(page, NR_FILE_DIRTY);
>  			dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
>  			dec_wb_stat(wb, WB_RECLAIMABLE);
> @@ -2753,7 +2753,7 @@ int test_clear_page_writeback(struct page *page)
>  		ret = TestClearPageWriteback(page);
>  	}
>  	if (ret) {
> -		mem_cgroup_dec_page_stat(page, NR_WRITEBACK);
> +		dec_memcg_page_state(page, NR_WRITEBACK);
>  		dec_node_page_state(page, NR_WRITEBACK);
>  		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
>  		inc_node_page_state(page, NR_WRITTEN);
> @@ -2808,7 +2808,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
>  		ret = TestSetPageWriteback(page);
>  	}
>  	if (!ret) {
> -		mem_cgroup_inc_page_stat(page, NR_WRITEBACK);
> +		inc_memcg_page_state(page, NR_WRITEBACK);
>  		inc_node_page_state(page, NR_WRITEBACK);
>  		inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
>  	}
> diff --git a/mm/rmap.c b/mm/rmap.c
> index f6bbfcf01422..e116a8e80468 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1158,7 +1158,7 @@ void page_add_file_rmap(struct page *page, bool compound)
>  			goto out;
>  	}
>  	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
> -	mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, nr);
> +	mod_memcg_page_state(page, NR_FILE_MAPPED, nr);
>  out:
>  	unlock_page_memcg(page);
>  }
> @@ -1198,7 +1198,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
>  	 * pte lock(a spinlock) is held, which implies preemption disabled.
>  	 */
>  	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
> -	mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, -nr);
> +	mod_memcg_page_state(page, NR_FILE_MAPPED, -nr);
>  
>  	if (unlikely(PageMlocked(page)))
>  		clear_page_mlock(page);
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index d77c97552ed3..eac4a9a73ba9 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2046,7 +2046,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
>  	active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
>  
>  	if (memcg)
> -		refaults = mem_cgroup_read_stat(memcg, WORKINGSET_ACTIVATE);
> +		refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
>  	else
>  		refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
>  
> @@ -2740,8 +2740,7 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
>  		struct lruvec *lruvec;
>  
>  		if (memcg)
> -			refaults = mem_cgroup_read_stat(memcg,
> -							WORKINGSET_ACTIVATE);
> +			refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
>  		else
>  			refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
>  
> diff --git a/mm/workingset.c b/mm/workingset.c
> index 37fc1057cd86..b8c9ab678479 100644
> --- a/mm/workingset.c
> +++ b/mm/workingset.c
> @@ -289,11 +289,11 @@ bool workingset_refault(void *shadow)
>  	refault_distance = (refault - eviction) & EVICTION_MASK;
>  
>  	inc_node_state(pgdat, WORKINGSET_REFAULT);
> -	mem_cgroup_inc_stat(memcg, WORKINGSET_REFAULT);
> +	inc_memcg_state(memcg, WORKINGSET_REFAULT);
>  
>  	if (refault_distance <= active_file) {
>  		inc_node_state(pgdat, WORKINGSET_ACTIVATE);
> -		mem_cgroup_inc_stat(memcg, WORKINGSET_ACTIVATE);
> +		inc_memcg_state(memcg, WORKINGSET_ACTIVATE);
>  		rcu_read_unlock();
>  		return true;
>  	}
> @@ -475,7 +475,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
>  	if (WARN_ON_ONCE(node->exceptional))
>  		goto out_invalid;
>  	inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
> -	mem_cgroup_inc_page_stat(virt_to_page(node), WORKINGSET_NODERECLAIM);
> +	inc_memcg_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
>  	__radix_tree_delete_node(&mapping->page_tree, node,
>  				 workingset_update_node, mapping);
>  
> -- 
> 2.12.1
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/4] mm: memcontrol: re-use global VM event enum
  2017-04-07 12:47   ` Michal Hocko
@ 2017-04-10 14:13     ` Johannes Weiner
  2017-04-11 12:30       ` Michal Hocko
  0 siblings, 1 reply; 14+ messages in thread
From: Johannes Weiner @ 2017-04-10 14:13 UTC (permalink / raw)
  To: Michal Hocko
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel,
	kernel-team

On Fri, Apr 07, 2017 at 02:47:02PM +0200, Michal Hocko wrote:
> I do agree that we should share global and memcg specific events constants
> but I am not sure we want to share all of them. Would it make sense to
> reorganize the global enum and put those that are shared to the
> beginning? We wouldn't need the memcg specific translation then.

I'm not sure I follow. Which translation?

> Anyway, two comments on the current implementation.
> 
> On Tue 04-04-17 18:01:46, Johannes Weiner wrote:
> [...]
> > +/* Cgroup-specific events, on top of universal VM events */
> > +enum memcg_event_item {
> > +	MEMCG_LOW = NR_VM_EVENT_ITEMS,
> > +	MEMCG_HIGH,
> > +	MEMCG_MAX,
> > +	MEMCG_OOM,
> > +	MEMCG_NR_EVENTS,
> > +};
> 
> The above should mention that each supported global VM event should
> provide the corresponding translation
> 
> [...]
> 
> here...
> > +/* Universal VM events cgroup1 shows, original sort order */
> > +unsigned int memcg1_events[] = {
> > +	PGPGIN,
> > +	PGPGOUT,
> > +	PGFAULT,
> > +	PGMAJFAULT,
> > +};
> > +
> > +static const char *const memcg1_event_names[] = {
> > +	"pgpgin",
> > +	"pgpgout",
> > +	"pgfault",
> > +	"pgmajfault",
> > +};
> 
> the naming doesn't make it easier to undestand why we need this.
> global2memcg_event?

This is just to keep the file order consistent. It could have been
done like memory.stat in cgroup2, where we simply do

   seq_printf(s, "pgmajfault %lu\n", stat[PGMAJFAULT]);

but I didn't want to change the v1 code too much. So these two arrays
are just a sorted list of global VM events shown in v1's memory.stat.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/4] mm: memcontrol: re-use global VM event enum
  2017-04-06  8:49   ` Vladimir Davydov
@ 2017-04-10 14:17     ` Johannes Weiner
  0 siblings, 0 replies; 14+ messages in thread
From: Johannes Weiner @ 2017-04-10 14:17 UTC (permalink / raw)
  To: Vladimir Davydov
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel,
	kernel-team

On Thu, Apr 06, 2017 at 11:49:24AM +0300, Vladimir Davydov wrote:
> On Tue, Apr 04, 2017 at 06:01:46PM -0400, Johannes Weiner wrote:
> > The current duplication is a high-maintenance mess, and it's painful
> > to add new items.
> > 
> > This increases the size of the event array, but we'll eventually want
> > most of the VM events tracked on a per-cgroup basis anyway.
> > 
> > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> 
> Although the increase in the mem_cgroup struct introduced by this patch
> looks scary, I agree this is a reasonable step toward unification of
> vmstat, as most vm_even_item entries do make sense to be accounted per
> cgroup as well.
> 
> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>

Thanks!

> > @@ -608,9 +601,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
> >  
> >  	/* pagein of a big page is an event. So, ignore page size */
> >  	if (nr_pages > 0)
> > -		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
> > +		__this_cpu_inc(memcg->stat->events[PGPGIN]);
> >  	else {
> > -		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
> > +		__this_cpu_inc(memcg->stat->events[PGPGOUT]);
> >  		nr_pages = -nr_pages; /* for event */
> >  	}
> 
> AFAIR this doesn't exactly match system-wide PGPGIN/PGPGOUT: they are
> supposed to account only paging events involving IO while currently they
> include faulting in zero pages and zapping a process address space.
> Probably, this should be revised before rolling out to cgroup v2.

Yeah, that stat item doesn't make much sense in cgroup1. Cgroup2
doesn't export it at all right now. Should we export it in the future,
we can add special MEMCG1_PGPGIN_SILLY events and let cgroup2 use the
real thing. Or remove/fix the stats in cgroup1, because I cannot think
how this would be useful at all right now, anyway.

But it's ooold behavior, so no urgency to tackle this right now.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/4] mm: memcontrol: re-use global VM event enum
  2017-04-10 14:13     ` Johannes Weiner
@ 2017-04-11 12:30       ` Michal Hocko
  0 siblings, 0 replies; 14+ messages in thread
From: Michal Hocko @ 2017-04-11 12:30 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel,
	kernel-team

On Mon 10-04-17 10:13:34, Johannes Weiner wrote:
> On Fri, Apr 07, 2017 at 02:47:02PM +0200, Michal Hocko wrote:
> > I do agree that we should share global and memcg specific events constants
> > but I am not sure we want to share all of them. Would it make sense to
> > reorganize the global enum and put those that are shared to the
> > beginning? We wouldn't need the memcg specific translation then.
> 
> I'm not sure I follow. Which translation?

Sorry, I should have said s@translation@filtering@ by memcg1_events*

> > Anyway, two comments on the current implementation.
> > 
> > On Tue 04-04-17 18:01:46, Johannes Weiner wrote:
> > [...]
> > > +/* Cgroup-specific events, on top of universal VM events */
> > > +enum memcg_event_item {
> > > +	MEMCG_LOW = NR_VM_EVENT_ITEMS,
> > > +	MEMCG_HIGH,
> > > +	MEMCG_MAX,
> > > +	MEMCG_OOM,
> > > +	MEMCG_NR_EVENTS,
> > > +};
> > 
> > The above should mention that each supported global VM event should
> > provide the corresponding translation
> > 
> > [...]
> > 
> > here...
> > > +/* Universal VM events cgroup1 shows, original sort order */
> > > +unsigned int memcg1_events[] = {
> > > +	PGPGIN,
> > > +	PGPGOUT,
> > > +	PGFAULT,
> > > +	PGMAJFAULT,
> > > +};
> > > +
> > > +static const char *const memcg1_event_names[] = {
> > > +	"pgpgin",
> > > +	"pgpgout",
> > > +	"pgfault",
> > > +	"pgmajfault",
> > > +};
> > 
> > the naming doesn't make it easier to undestand why we need this.
> > global2memcg_event?
> 
> This is just to keep the file order consistent. It could have been
> done like memory.stat in cgroup2, where we simply do
> 
>    seq_printf(s, "pgmajfault %lu\n", stat[PGMAJFAULT]);
> 
> but I didn't want to change the v1 code too much. So these two arrays
> are just a sorted list of global VM events shown in v1's memory.stat.

You would still have to know which are the relevant parts of the global
starts that we account for memcg.
-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2017-04-11 12:30 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-04 22:01 [PATCH 1/4] mm: memcontrol: clean up memory.events counting function Johannes Weiner
2017-04-04 22:01 ` [PATCH 2/4] mm: memcontrol: re-use global VM event enum Johannes Weiner
2017-04-06  8:49   ` Vladimir Davydov
2017-04-10 14:17     ` Johannes Weiner
2017-04-07 12:47   ` Michal Hocko
2017-04-10 14:13     ` Johannes Weiner
2017-04-11 12:30       ` Michal Hocko
2017-04-04 22:01 ` [PATCH 3/4] mm: memcontrol: re-use node VM page state enum Johannes Weiner
2017-04-06  8:59   ` Vladimir Davydov
2017-04-04 22:01 ` [PATCH 4/4] mm: memcontrol: use node page state naming scheme for memcg Johannes Weiner
2017-04-06  9:01   ` Vladimir Davydov
2017-04-07 12:54   ` Michal Hocko
2017-04-06  8:31 ` [PATCH 1/4] mm: memcontrol: clean up memory.events counting function Vladimir Davydov
2017-04-07 12:20 ` Michal Hocko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).