All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
@ 2014-04-10 17:11 ` Jianyu Zhan
  0 siblings, 0 replies; 10+ messages in thread
From: Jianyu Zhan @ 2014-04-10 17:11 UTC (permalink / raw)
  To: hannes, mhocko, bsingharora, kamezawa.hiroyu, akpm
  Cc: cgroups, linux-mm, linux-kernel, nasa4836

Currently, mem_cgroup_read_stat() is used for user interface. The
user accounts memory usage by memory cgroup and he _always_ requires
exact value because he accounts memory. So we don't use quick-and-fuzzy
-read-and-do-periodic-synchronization way. Thus, we iterate all cpus
for one read.

And we mem_cgroup_usage() and mem_cgroup_recursive_stat() both finally
call into mem_cgroup_read_stat().

However, these *stat snapshot* operations are implemented in a quite
coarse way: it takes M*N iteration for each stat item(M=nr_memcgs,
N=nr_possible_cpus). There are two deficiencies:

1. for every stat item, we have to iterate over all percpu value, which
   is not so cache friendly.
2. for every stat item, we call mem_cgroup_read_stat() once, which
   increase the probablity of contending on pcp_counter_lock.

So, this patch improve this a bit. Concretely, for all interested stat
items, mark them in a bitmap, and then make mem_cgroup_read_stat() read
them all in one go.

This is more efficient, and to some degree make it more like *stat snapshot*.

Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
---
 mm/memcontrol.c | 91 +++++++++++++++++++++++++++++++++++++++------------------
 1 file changed, 62 insertions(+), 29 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 29501f0..009357e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -347,7 +347,7 @@ struct mem_cgroup {
 	struct mem_cgroup_stat_cpu __percpu *stat;
 	/*
 	 * used when a cpu is offlined or other synchronizations
-	 * See mem_cgroup_read_stat().
+	 * See mem_cgroup_read_stat_vec().
 	 */
 	struct mem_cgroup_stat_cpu nocpu_base;
 	spinlock_t pcp_counter_lock;
@@ -855,7 +855,13 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 	return mz;
 }
 
-/*
+/**
+ * @memcg: the mem_cgroup to account for.
+ * @stat_bitmask: a bitmap record which stat items to read,
+ *		each mem_cgroup_stat_index has its corresponding bit.
+ * @stat_vec: a stat vector to hold the stat value for returing, caller
+ *		shall take care of initializing it.
+ *
  * Implementation Note: reading percpu statistics for memcg.
  *
  * Both of vmstat[] and percpu_counter has threshold and do periodic
@@ -874,22 +880,25 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  * common workload, threashold and synchonization as vmstat[] should be
  * implemented.
  */
-static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-				 enum mem_cgroup_stat_index idx)
+static void mem_cgroup_read_stat_vec(struct mem_cgroup *memcg,
+				 unsigned long *stat_bitmask,
+				 long long *stat_vec)
 {
-	long val = 0;
 	int cpu;
+	int i;
 
 	get_online_cpus();
 	for_each_online_cpu(cpu)
-		val += per_cpu(memcg->stat->count[idx], cpu);
+		for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
+			stat_vec[i] += per_cpu(memcg->stat->count[i], cpu);
+
 #ifdef CONFIG_HOTPLUG_CPU
 	spin_lock(&memcg->pcp_counter_lock);
-	val += memcg->nocpu_base.count[idx];
+	for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
+		stat_vec[i] += memcg->nocpu_base.count[i];
 	spin_unlock(&memcg->pcp_counter_lock);
 #endif
 	put_online_cpus();
-	return val;
 }
 
 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
@@ -1674,6 +1683,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 	static DEFINE_MUTEX(oom_info_lock);
 	struct mem_cgroup *iter;
 	unsigned int i;
+	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
 
 	if (!p)
 		return;
@@ -1702,16 +1712,22 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
 		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
 
+	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	if (!do_swap_account)
+		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
 	for_each_mem_cgroup_tree(iter, memcg) {
+		long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
+
 		pr_info("Memory cgroup stats for ");
 		pr_cont_cgroup_path(iter->css.cgroup);
 		pr_cont(":");
 
+		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
 		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 				continue;
-			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
-				K(mem_cgroup_read_stat(iter, i)));
+			pr_cont(" %s:%lldKB", mem_cgroup_stat_names[i],
+				K(stat_vec[i]));
 		}
 
 		for (i = 0; i < NR_LRU_LISTS; i++)
@@ -4940,25 +4956,28 @@ out:
 	return retval;
 }
 
-
-static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
-					       enum mem_cgroup_stat_index idx)
+/* Callers should take care of initialize stat_vec array */
+static void mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
+					unsigned long *stat_bitmask,
+					long long *stat_vec)
 {
 	struct mem_cgroup *iter;
-	long val = 0;
+	int idx;
 
 	/* Per-cpu values can be negative, use a signed accumulator */
 	for_each_mem_cgroup_tree(iter, memcg)
-		val += mem_cgroup_read_stat(iter, idx);
+		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
 
-	if (val < 0) /* race ? */
-		val = 0;
-	return val;
+	for_each_set_bit(idx, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
+		if (stat_vec[idx] < 0) /* race ? */
+			stat_vec[idx] = 0;
 }
 
 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
 {
 	u64 val;
+	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
 
 	if (!mem_cgroup_is_root(memcg)) {
 		if (!swap)
@@ -4967,15 +4986,21 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
 			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
 	}
 
+
 	/*
 	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
 	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
 	 */
-	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
-	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
-
+	bitmap_zero(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	set_bit(MEM_CGROUP_STAT_CACHE, stat_bitmask);
+	set_bit(MEM_CGROUP_STAT_RSS, stat_bitmask);
 	if (swap)
-		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
+		set_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
+
+	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
+
+	val = stat_vec[MEM_CGROUP_STAT_CACHE] + stat_vec[MEM_CGROUP_STAT_RSS] +
+	      (swap ? stat_vec[MEM_CGROUP_STAT_SWAP] : 0);
 
 	return val << PAGE_SHIFT;
 }
@@ -5349,12 +5374,19 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
 	struct mem_cgroup *mi;
 	unsigned int i;
+	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
+
+	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	if (!do_swap_account)
+		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
+	mem_cgroup_read_stat_vec(memcg, stat_bitmask, stat_vec);
 
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
-		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
-			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
+		seq_printf(m, "%s %lld\n", mem_cgroup_stat_names[i],
+			   stat_vec[i] * PAGE_SIZE);
 	}
 
 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
@@ -5375,14 +5407,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 				   memsw_limit);
 	}
 
+	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++)
+		stat_vec[i] = 0;
+	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-		long long val = 0;
-
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
-		for_each_mem_cgroup_tree(mi, memcg)
-			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+
+		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i],
+				stat_vec[i] * PAGE_SIZE);
 	}
 
 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
-- 
1.9.0.GIT


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
@ 2014-04-10 17:11 ` Jianyu Zhan
  0 siblings, 0 replies; 10+ messages in thread
From: Jianyu Zhan @ 2014-04-10 17:11 UTC (permalink / raw)
  To: hannes, mhocko, bsingharora, kamezawa.hiroyu, akpm
  Cc: cgroups, linux-mm, linux-kernel, nasa4836

Currently, mem_cgroup_read_stat() is used for user interface. The
user accounts memory usage by memory cgroup and he _always_ requires
exact value because he accounts memory. So we don't use quick-and-fuzzy
-read-and-do-periodic-synchronization way. Thus, we iterate all cpus
for one read.

And we mem_cgroup_usage() and mem_cgroup_recursive_stat() both finally
call into mem_cgroup_read_stat().

However, these *stat snapshot* operations are implemented in a quite
coarse way: it takes M*N iteration for each stat item(M=nr_memcgs,
N=nr_possible_cpus). There are two deficiencies:

1. for every stat item, we have to iterate over all percpu value, which
   is not so cache friendly.
2. for every stat item, we call mem_cgroup_read_stat() once, which
   increase the probablity of contending on pcp_counter_lock.

So, this patch improve this a bit. Concretely, for all interested stat
items, mark them in a bitmap, and then make mem_cgroup_read_stat() read
them all in one go.

This is more efficient, and to some degree make it more like *stat snapshot*.

Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
---
 mm/memcontrol.c | 91 +++++++++++++++++++++++++++++++++++++++------------------
 1 file changed, 62 insertions(+), 29 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 29501f0..009357e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -347,7 +347,7 @@ struct mem_cgroup {
 	struct mem_cgroup_stat_cpu __percpu *stat;
 	/*
 	 * used when a cpu is offlined or other synchronizations
-	 * See mem_cgroup_read_stat().
+	 * See mem_cgroup_read_stat_vec().
 	 */
 	struct mem_cgroup_stat_cpu nocpu_base;
 	spinlock_t pcp_counter_lock;
@@ -855,7 +855,13 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 	return mz;
 }
 
-/*
+/**
+ * @memcg: the mem_cgroup to account for.
+ * @stat_bitmask: a bitmap record which stat items to read,
+ *		each mem_cgroup_stat_index has its corresponding bit.
+ * @stat_vec: a stat vector to hold the stat value for returing, caller
+ *		shall take care of initializing it.
+ *
  * Implementation Note: reading percpu statistics for memcg.
  *
  * Both of vmstat[] and percpu_counter has threshold and do periodic
@@ -874,22 +880,25 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  * common workload, threashold and synchonization as vmstat[] should be
  * implemented.
  */
-static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-				 enum mem_cgroup_stat_index idx)
+static void mem_cgroup_read_stat_vec(struct mem_cgroup *memcg,
+				 unsigned long *stat_bitmask,
+				 long long *stat_vec)
 {
-	long val = 0;
 	int cpu;
+	int i;
 
 	get_online_cpus();
 	for_each_online_cpu(cpu)
-		val += per_cpu(memcg->stat->count[idx], cpu);
+		for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
+			stat_vec[i] += per_cpu(memcg->stat->count[i], cpu);
+
 #ifdef CONFIG_HOTPLUG_CPU
 	spin_lock(&memcg->pcp_counter_lock);
-	val += memcg->nocpu_base.count[idx];
+	for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
+		stat_vec[i] += memcg->nocpu_base.count[i];
 	spin_unlock(&memcg->pcp_counter_lock);
 #endif
 	put_online_cpus();
-	return val;
 }
 
 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
@@ -1674,6 +1683,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 	static DEFINE_MUTEX(oom_info_lock);
 	struct mem_cgroup *iter;
 	unsigned int i;
+	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
 
 	if (!p)
 		return;
@@ -1702,16 +1712,22 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
 		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
 
+	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	if (!do_swap_account)
+		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
 	for_each_mem_cgroup_tree(iter, memcg) {
+		long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
+
 		pr_info("Memory cgroup stats for ");
 		pr_cont_cgroup_path(iter->css.cgroup);
 		pr_cont(":");
 
+		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
 		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 				continue;
-			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
-				K(mem_cgroup_read_stat(iter, i)));
+			pr_cont(" %s:%lldKB", mem_cgroup_stat_names[i],
+				K(stat_vec[i]));
 		}
 
 		for (i = 0; i < NR_LRU_LISTS; i++)
@@ -4940,25 +4956,28 @@ out:
 	return retval;
 }
 
-
-static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
-					       enum mem_cgroup_stat_index idx)
+/* Callers should take care of initialize stat_vec array */
+static void mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
+					unsigned long *stat_bitmask,
+					long long *stat_vec)
 {
 	struct mem_cgroup *iter;
-	long val = 0;
+	int idx;
 
 	/* Per-cpu values can be negative, use a signed accumulator */
 	for_each_mem_cgroup_tree(iter, memcg)
-		val += mem_cgroup_read_stat(iter, idx);
+		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
 
-	if (val < 0) /* race ? */
-		val = 0;
-	return val;
+	for_each_set_bit(idx, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
+		if (stat_vec[idx] < 0) /* race ? */
+			stat_vec[idx] = 0;
 }
 
 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
 {
 	u64 val;
+	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
 
 	if (!mem_cgroup_is_root(memcg)) {
 		if (!swap)
@@ -4967,15 +4986,21 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
 			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
 	}
 
+
 	/*
 	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
 	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
 	 */
-	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
-	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
-
+	bitmap_zero(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	set_bit(MEM_CGROUP_STAT_CACHE, stat_bitmask);
+	set_bit(MEM_CGROUP_STAT_RSS, stat_bitmask);
 	if (swap)
-		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
+		set_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
+
+	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
+
+	val = stat_vec[MEM_CGROUP_STAT_CACHE] + stat_vec[MEM_CGROUP_STAT_RSS] +
+	      (swap ? stat_vec[MEM_CGROUP_STAT_SWAP] : 0);
 
 	return val << PAGE_SHIFT;
 }
@@ -5349,12 +5374,19 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
 	struct mem_cgroup *mi;
 	unsigned int i;
+	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
+
+	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	if (!do_swap_account)
+		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
+	mem_cgroup_read_stat_vec(memcg, stat_bitmask, stat_vec);
 
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
-		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
-			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
+		seq_printf(m, "%s %lld\n", mem_cgroup_stat_names[i],
+			   stat_vec[i] * PAGE_SIZE);
 	}
 
 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
@@ -5375,14 +5407,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 				   memsw_limit);
 	}
 
+	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++)
+		stat_vec[i] = 0;
+	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-		long long val = 0;
-
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
-		for_each_mem_cgroup_tree(mi, memcg)
-			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+
+		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i],
+				stat_vec[i] * PAGE_SIZE);
 	}
 
 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
-- 
1.9.0.GIT

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
@ 2014-04-10 17:11 ` Jianyu Zhan
  0 siblings, 0 replies; 10+ messages in thread
From: Jianyu Zhan @ 2014-04-10 17:11 UTC (permalink / raw)
  To: hannes-druUgvl0LCNAfugRpC6u6w, mhocko-AlSwsSmVLrQ,
	bsingharora-Re5JQEeQqe8AvxtiuMwx3w,
	kamezawa.hiroyu-+CUm20s59erQFUHtdCDX3A,
	akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
  Cc: cgroups-u79uwXL29TY76Z2rM5mHXA, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	nasa4836-Re5JQEeQqe8AvxtiuMwx3w

Currently, mem_cgroup_read_stat() is used for user interface. The
user accounts memory usage by memory cgroup and he _always_ requires
exact value because he accounts memory. So we don't use quick-and-fuzzy
-read-and-do-periodic-synchronization way. Thus, we iterate all cpus
for one read.

And we mem_cgroup_usage() and mem_cgroup_recursive_stat() both finally
call into mem_cgroup_read_stat().

However, these *stat snapshot* operations are implemented in a quite
coarse way: it takes M*N iteration for each stat item(M=nr_memcgs,
N=nr_possible_cpus). There are two deficiencies:

1. for every stat item, we have to iterate over all percpu value, which
   is not so cache friendly.
2. for every stat item, we call mem_cgroup_read_stat() once, which
   increase the probablity of contending on pcp_counter_lock.

So, this patch improve this a bit. Concretely, for all interested stat
items, mark them in a bitmap, and then make mem_cgroup_read_stat() read
them all in one go.

This is more efficient, and to some degree make it more like *stat snapshot*.

Signed-off-by: Jianyu Zhan <nasa4836-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
 mm/memcontrol.c | 91 +++++++++++++++++++++++++++++++++++++++------------------
 1 file changed, 62 insertions(+), 29 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 29501f0..009357e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -347,7 +347,7 @@ struct mem_cgroup {
 	struct mem_cgroup_stat_cpu __percpu *stat;
 	/*
 	 * used when a cpu is offlined or other synchronizations
-	 * See mem_cgroup_read_stat().
+	 * See mem_cgroup_read_stat_vec().
 	 */
 	struct mem_cgroup_stat_cpu nocpu_base;
 	spinlock_t pcp_counter_lock;
@@ -855,7 +855,13 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 	return mz;
 }
 
-/*
+/**
+ * @memcg: the mem_cgroup to account for.
+ * @stat_bitmask: a bitmap record which stat items to read,
+ *		each mem_cgroup_stat_index has its corresponding bit.
+ * @stat_vec: a stat vector to hold the stat value for returing, caller
+ *		shall take care of initializing it.
+ *
  * Implementation Note: reading percpu statistics for memcg.
  *
  * Both of vmstat[] and percpu_counter has threshold and do periodic
@@ -874,22 +880,25 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  * common workload, threashold and synchonization as vmstat[] should be
  * implemented.
  */
-static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-				 enum mem_cgroup_stat_index idx)
+static void mem_cgroup_read_stat_vec(struct mem_cgroup *memcg,
+				 unsigned long *stat_bitmask,
+				 long long *stat_vec)
 {
-	long val = 0;
 	int cpu;
+	int i;
 
 	get_online_cpus();
 	for_each_online_cpu(cpu)
-		val += per_cpu(memcg->stat->count[idx], cpu);
+		for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
+			stat_vec[i] += per_cpu(memcg->stat->count[i], cpu);
+
 #ifdef CONFIG_HOTPLUG_CPU
 	spin_lock(&memcg->pcp_counter_lock);
-	val += memcg->nocpu_base.count[idx];
+	for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
+		stat_vec[i] += memcg->nocpu_base.count[i];
 	spin_unlock(&memcg->pcp_counter_lock);
 #endif
 	put_online_cpus();
-	return val;
 }
 
 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
@@ -1674,6 +1683,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 	static DEFINE_MUTEX(oom_info_lock);
 	struct mem_cgroup *iter;
 	unsigned int i;
+	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
 
 	if (!p)
 		return;
@@ -1702,16 +1712,22 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
 		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
 
+	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	if (!do_swap_account)
+		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
 	for_each_mem_cgroup_tree(iter, memcg) {
+		long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
+
 		pr_info("Memory cgroup stats for ");
 		pr_cont_cgroup_path(iter->css.cgroup);
 		pr_cont(":");
 
+		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
 		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 				continue;
-			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
-				K(mem_cgroup_read_stat(iter, i)));
+			pr_cont(" %s:%lldKB", mem_cgroup_stat_names[i],
+				K(stat_vec[i]));
 		}
 
 		for (i = 0; i < NR_LRU_LISTS; i++)
@@ -4940,25 +4956,28 @@ out:
 	return retval;
 }
 
-
-static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
-					       enum mem_cgroup_stat_index idx)
+/* Callers should take care of initialize stat_vec array */
+static void mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
+					unsigned long *stat_bitmask,
+					long long *stat_vec)
 {
 	struct mem_cgroup *iter;
-	long val = 0;
+	int idx;
 
 	/* Per-cpu values can be negative, use a signed accumulator */
 	for_each_mem_cgroup_tree(iter, memcg)
-		val += mem_cgroup_read_stat(iter, idx);
+		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
 
-	if (val < 0) /* race ? */
-		val = 0;
-	return val;
+	for_each_set_bit(idx, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
+		if (stat_vec[idx] < 0) /* race ? */
+			stat_vec[idx] = 0;
 }
 
 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
 {
 	u64 val;
+	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
 
 	if (!mem_cgroup_is_root(memcg)) {
 		if (!swap)
@@ -4967,15 +4986,21 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
 			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
 	}
 
+
 	/*
 	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
 	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
 	 */
-	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
-	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
-
+	bitmap_zero(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	set_bit(MEM_CGROUP_STAT_CACHE, stat_bitmask);
+	set_bit(MEM_CGROUP_STAT_RSS, stat_bitmask);
 	if (swap)
-		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
+		set_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
+
+	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
+
+	val = stat_vec[MEM_CGROUP_STAT_CACHE] + stat_vec[MEM_CGROUP_STAT_RSS] +
+	      (swap ? stat_vec[MEM_CGROUP_STAT_SWAP] : 0);
 
 	return val << PAGE_SHIFT;
 }
@@ -5349,12 +5374,19 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
 	struct mem_cgroup *mi;
 	unsigned int i;
+	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
+
+	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
+	if (!do_swap_account)
+		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
+	mem_cgroup_read_stat_vec(memcg, stat_bitmask, stat_vec);
 
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
-		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
-			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
+		seq_printf(m, "%s %lld\n", mem_cgroup_stat_names[i],
+			   stat_vec[i] * PAGE_SIZE);
 	}
 
 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
@@ -5375,14 +5407,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 				   memsw_limit);
 	}
 
+	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++)
+		stat_vec[i] = 0;
+	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-		long long val = 0;
-
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
-		for_each_mem_cgroup_tree(mi, memcg)
-			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+
+		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i],
+				stat_vec[i] * PAGE_SIZE);
 	}
 
 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
-- 
1.9.0.GIT

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
  2014-04-10 17:11 ` Jianyu Zhan
@ 2014-04-15  1:52   ` Johannes Weiner
  -1 siblings, 0 replies; 10+ messages in thread
From: Johannes Weiner @ 2014-04-15  1:52 UTC (permalink / raw)
  To: Jianyu Zhan
  Cc: mhocko, bsingharora, kamezawa.hiroyu, akpm, cgroups, linux-mm,
	linux-kernel

Hi Jianyu,

On Fri, Apr 11, 2014 at 01:11:08AM +0800, Jianyu Zhan wrote:
> Currently, mem_cgroup_read_stat() is used for user interface. The
> user accounts memory usage by memory cgroup and he _always_ requires
> exact value because he accounts memory. So we don't use quick-and-fuzzy
> -read-and-do-periodic-synchronization way. Thus, we iterate all cpus
> for one read.
> 
> And we mem_cgroup_usage() and mem_cgroup_recursive_stat() both finally
> call into mem_cgroup_read_stat().
> 
> However, these *stat snapshot* operations are implemented in a quite
> coarse way: it takes M*N iteration for each stat item(M=nr_memcgs,
> N=nr_possible_cpus). There are two deficiencies:
> 
> 1. for every stat item, we have to iterate over all percpu value, which
>    is not so cache friendly.
> 2. for every stat item, we call mem_cgroup_read_stat() once, which
>    increase the probablity of contending on pcp_counter_lock.
> 
> So, this patch improve this a bit. Concretely, for all interested stat
> items, mark them in a bitmap, and then make mem_cgroup_read_stat() read
> them all in one go.
> 
> This is more efficient, and to some degree make it more like *stat snapshot*.
> 
> Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
> ---
>  mm/memcontrol.c | 91 +++++++++++++++++++++++++++++++++++++++------------------
>  1 file changed, 62 insertions(+), 29 deletions(-)

This is when the user reads statistics or when OOM happens, neither of
which I would consider fast paths.  I don't think it's worth the extra
code, which looks more cumbersome than what we have.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
@ 2014-04-15  1:52   ` Johannes Weiner
  0 siblings, 0 replies; 10+ messages in thread
From: Johannes Weiner @ 2014-04-15  1:52 UTC (permalink / raw)
  To: Jianyu Zhan
  Cc: mhocko, bsingharora, kamezawa.hiroyu, akpm, cgroups, linux-mm,
	linux-kernel

Hi Jianyu,

On Fri, Apr 11, 2014 at 01:11:08AM +0800, Jianyu Zhan wrote:
> Currently, mem_cgroup_read_stat() is used for user interface. The
> user accounts memory usage by memory cgroup and he _always_ requires
> exact value because he accounts memory. So we don't use quick-and-fuzzy
> -read-and-do-periodic-synchronization way. Thus, we iterate all cpus
> for one read.
> 
> And we mem_cgroup_usage() and mem_cgroup_recursive_stat() both finally
> call into mem_cgroup_read_stat().
> 
> However, these *stat snapshot* operations are implemented in a quite
> coarse way: it takes M*N iteration for each stat item(M=nr_memcgs,
> N=nr_possible_cpus). There are two deficiencies:
> 
> 1. for every stat item, we have to iterate over all percpu value, which
>    is not so cache friendly.
> 2. for every stat item, we call mem_cgroup_read_stat() once, which
>    increase the probablity of contending on pcp_counter_lock.
> 
> So, this patch improve this a bit. Concretely, for all interested stat
> items, mark them in a bitmap, and then make mem_cgroup_read_stat() read
> them all in one go.
> 
> This is more efficient, and to some degree make it more like *stat snapshot*.
> 
> Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
> ---
>  mm/memcontrol.c | 91 +++++++++++++++++++++++++++++++++++++++------------------
>  1 file changed, 62 insertions(+), 29 deletions(-)

This is when the user reads statistics or when OOM happens, neither of
which I would consider fast paths.  I don't think it's worth the extra
code, which looks more cumbersome than what we have.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
  2014-04-10 17:11 ` Jianyu Zhan
  (?)
@ 2014-04-16  9:22   ` Michal Hocko
  -1 siblings, 0 replies; 10+ messages in thread
From: Michal Hocko @ 2014-04-16  9:22 UTC (permalink / raw)
  To: Jianyu Zhan
  Cc: hannes, bsingharora, kamezawa.hiroyu, akpm, cgroups, linux-mm,
	linux-kernel

On Fri 11-04-14 01:11:08, Jianyu Zhan wrote:
> Currently, mem_cgroup_read_stat() is used for user interface. The
> user accounts memory usage by memory cgroup and he _always_ requires
> exact value because he accounts memory. So we don't use quick-and-fuzzy
> -read-and-do-periodic-synchronization way. Thus, we iterate all cpus
> for one read.
> 
> And we mem_cgroup_usage() and mem_cgroup_recursive_stat() both finally
> call into mem_cgroup_read_stat().
> 
> However, these *stat snapshot* operations are implemented in a quite
> coarse way: it takes M*N iteration for each stat item(M=nr_memcgs,
> N=nr_possible_cpus). There are two deficiencies:
> 
> 1. for every stat item, we have to iterate over all percpu value, which
>    is not so cache friendly.
> 2. for every stat item, we call mem_cgroup_read_stat() once, which
>    increase the probablity of contending on pcp_counter_lock.
> 
> So, this patch improve this a bit.

How much and under what kind of load?

> Concretely, for all interested stat
> items, mark them in a bitmap, and then make mem_cgroup_read_stat() read
> them all in one go.
> 
> This is more efficient, and to some degree make it more like *stat snapshot*.
> 
> Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
> ---
>  mm/memcontrol.c | 91 +++++++++++++++++++++++++++++++++++++++------------------
>  1 file changed, 62 insertions(+), 29 deletions(-)

I cannot say I like the new code much more than the previous one and
I've never seen the old one being a bottleneck. So I am not entirely
fond of optimization without a good reason. (Hint, if you are optimizing
something always show us numbers which support the optimization)

> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 29501f0..009357e 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -347,7 +347,7 @@ struct mem_cgroup {
>  	struct mem_cgroup_stat_cpu __percpu *stat;
>  	/*
>  	 * used when a cpu is offlined or other synchronizations
> -	 * See mem_cgroup_read_stat().
> +	 * See mem_cgroup_read_stat_vec().
>  	 */
>  	struct mem_cgroup_stat_cpu nocpu_base;
>  	spinlock_t pcp_counter_lock;
> @@ -855,7 +855,13 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
>  	return mz;
>  }
>  
> -/*
> +/**
> + * @memcg: the mem_cgroup to account for.
> + * @stat_bitmask: a bitmap record which stat items to read,
> + *		each mem_cgroup_stat_index has its corresponding bit.
> + * @stat_vec: a stat vector to hold the stat value for returing, caller
> + *		shall take care of initializing it.
> + *
>   * Implementation Note: reading percpu statistics for memcg.
>   *
>   * Both of vmstat[] and percpu_counter has threshold and do periodic
> @@ -874,22 +880,25 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
>   * common workload, threashold and synchonization as vmstat[] should be
>   * implemented.
>   */
> -static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
> -				 enum mem_cgroup_stat_index idx)
> +static void mem_cgroup_read_stat_vec(struct mem_cgroup *memcg,
> +				 unsigned long *stat_bitmask,
> +				 long long *stat_vec)
>  {
> -	long val = 0;
>  	int cpu;
> +	int i;
>  
>  	get_online_cpus();
>  	for_each_online_cpu(cpu)
> -		val += per_cpu(memcg->stat->count[idx], cpu);
> +		for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
> +			stat_vec[i] += per_cpu(memcg->stat->count[i], cpu);
> +
>  #ifdef CONFIG_HOTPLUG_CPU
>  	spin_lock(&memcg->pcp_counter_lock);
> -	val += memcg->nocpu_base.count[idx];
> +	for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
> +		stat_vec[i] += memcg->nocpu_base.count[i];
>  	spin_unlock(&memcg->pcp_counter_lock);
>  #endif
>  	put_online_cpus();
> -	return val;
>  }
>  
>  static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
> @@ -1674,6 +1683,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
>  	static DEFINE_MUTEX(oom_info_lock);
>  	struct mem_cgroup *iter;
>  	unsigned int i;
> +	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
>  
>  	if (!p)
>  		return;
> @@ -1702,16 +1712,22 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
>  		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
>  		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
>  
> +	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	if (!do_swap_account)
> +		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
>  	for_each_mem_cgroup_tree(iter, memcg) {
> +		long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
> +
>  		pr_info("Memory cgroup stats for ");
>  		pr_cont_cgroup_path(iter->css.cgroup);
>  		pr_cont(":");
>  
> +		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
>  		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
>  			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
>  				continue;
> -			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
> -				K(mem_cgroup_read_stat(iter, i)));
> +			pr_cont(" %s:%lldKB", mem_cgroup_stat_names[i],
> +				K(stat_vec[i]));
>  		}
>  
>  		for (i = 0; i < NR_LRU_LISTS; i++)
> @@ -4940,25 +4956,28 @@ out:
>  	return retval;
>  }
>  
> -
> -static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
> -					       enum mem_cgroup_stat_index idx)
> +/* Callers should take care of initialize stat_vec array */
> +static void mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
> +					unsigned long *stat_bitmask,
> +					long long *stat_vec)
>  {
>  	struct mem_cgroup *iter;
> -	long val = 0;
> +	int idx;
>  
>  	/* Per-cpu values can be negative, use a signed accumulator */
>  	for_each_mem_cgroup_tree(iter, memcg)
> -		val += mem_cgroup_read_stat(iter, idx);
> +		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
>  
> -	if (val < 0) /* race ? */
> -		val = 0;
> -	return val;
> +	for_each_set_bit(idx, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
> +		if (stat_vec[idx] < 0) /* race ? */
> +			stat_vec[idx] = 0;
>  }
>  
>  static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
>  {
>  	u64 val;
> +	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
>  
>  	if (!mem_cgroup_is_root(memcg)) {
>  		if (!swap)
> @@ -4967,15 +4986,21 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
>  			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
>  	}
>  
> +
>  	/*
>  	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
>  	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
>  	 */
> -	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
> -	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
> -
> +	bitmap_zero(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	set_bit(MEM_CGROUP_STAT_CACHE, stat_bitmask);
> +	set_bit(MEM_CGROUP_STAT_RSS, stat_bitmask);
>  	if (swap)
> -		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
> +		set_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
> +
> +	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
> +
> +	val = stat_vec[MEM_CGROUP_STAT_CACHE] + stat_vec[MEM_CGROUP_STAT_RSS] +
> +	      (swap ? stat_vec[MEM_CGROUP_STAT_SWAP] : 0);
>  
>  	return val << PAGE_SHIFT;
>  }
> @@ -5349,12 +5374,19 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
>  	struct mem_cgroup *mi;
>  	unsigned int i;
> +	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
> +
> +	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	if (!do_swap_account)
> +		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
> +	mem_cgroup_read_stat_vec(memcg, stat_bitmask, stat_vec);
>  
>  	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
>  		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
>  			continue;
> -		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
> -			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
> +		seq_printf(m, "%s %lld\n", mem_cgroup_stat_names[i],
> +			   stat_vec[i] * PAGE_SIZE);
>  	}
>  
>  	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
> @@ -5375,14 +5407,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  				   memsw_limit);
>  	}
>  
> +	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++)
> +		stat_vec[i] = 0;
> +	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
>  	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
> -		long long val = 0;
> -
>  		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
>  			continue;
> -		for_each_mem_cgroup_tree(mi, memcg)
> -			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
> -		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
> +
> +		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i],
> +				stat_vec[i] * PAGE_SIZE);
>  	}
>  
>  	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
> -- 
> 1.9.0.GIT
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
@ 2014-04-16  9:22   ` Michal Hocko
  0 siblings, 0 replies; 10+ messages in thread
From: Michal Hocko @ 2014-04-16  9:22 UTC (permalink / raw)
  To: Jianyu Zhan
  Cc: hannes, bsingharora, kamezawa.hiroyu, akpm, cgroups, linux-mm,
	linux-kernel

On Fri 11-04-14 01:11:08, Jianyu Zhan wrote:
> Currently, mem_cgroup_read_stat() is used for user interface. The
> user accounts memory usage by memory cgroup and he _always_ requires
> exact value because he accounts memory. So we don't use quick-and-fuzzy
> -read-and-do-periodic-synchronization way. Thus, we iterate all cpus
> for one read.
> 
> And we mem_cgroup_usage() and mem_cgroup_recursive_stat() both finally
> call into mem_cgroup_read_stat().
> 
> However, these *stat snapshot* operations are implemented in a quite
> coarse way: it takes M*N iteration for each stat item(M=nr_memcgs,
> N=nr_possible_cpus). There are two deficiencies:
> 
> 1. for every stat item, we have to iterate over all percpu value, which
>    is not so cache friendly.
> 2. for every stat item, we call mem_cgroup_read_stat() once, which
>    increase the probablity of contending on pcp_counter_lock.
> 
> So, this patch improve this a bit.

How much and under what kind of load?

> Concretely, for all interested stat
> items, mark them in a bitmap, and then make mem_cgroup_read_stat() read
> them all in one go.
> 
> This is more efficient, and to some degree make it more like *stat snapshot*.
> 
> Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
> ---
>  mm/memcontrol.c | 91 +++++++++++++++++++++++++++++++++++++++------------------
>  1 file changed, 62 insertions(+), 29 deletions(-)

I cannot say I like the new code much more than the previous one and
I've never seen the old one being a bottleneck. So I am not entirely
fond of optimization without a good reason. (Hint, if you are optimizing
something always show us numbers which support the optimization)

> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 29501f0..009357e 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -347,7 +347,7 @@ struct mem_cgroup {
>  	struct mem_cgroup_stat_cpu __percpu *stat;
>  	/*
>  	 * used when a cpu is offlined or other synchronizations
> -	 * See mem_cgroup_read_stat().
> +	 * See mem_cgroup_read_stat_vec().
>  	 */
>  	struct mem_cgroup_stat_cpu nocpu_base;
>  	spinlock_t pcp_counter_lock;
> @@ -855,7 +855,13 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
>  	return mz;
>  }
>  
> -/*
> +/**
> + * @memcg: the mem_cgroup to account for.
> + * @stat_bitmask: a bitmap record which stat items to read,
> + *		each mem_cgroup_stat_index has its corresponding bit.
> + * @stat_vec: a stat vector to hold the stat value for returing, caller
> + *		shall take care of initializing it.
> + *
>   * Implementation Note: reading percpu statistics for memcg.
>   *
>   * Both of vmstat[] and percpu_counter has threshold and do periodic
> @@ -874,22 +880,25 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
>   * common workload, threashold and synchonization as vmstat[] should be
>   * implemented.
>   */
> -static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
> -				 enum mem_cgroup_stat_index idx)
> +static void mem_cgroup_read_stat_vec(struct mem_cgroup *memcg,
> +				 unsigned long *stat_bitmask,
> +				 long long *stat_vec)
>  {
> -	long val = 0;
>  	int cpu;
> +	int i;
>  
>  	get_online_cpus();
>  	for_each_online_cpu(cpu)
> -		val += per_cpu(memcg->stat->count[idx], cpu);
> +		for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
> +			stat_vec[i] += per_cpu(memcg->stat->count[i], cpu);
> +
>  #ifdef CONFIG_HOTPLUG_CPU
>  	spin_lock(&memcg->pcp_counter_lock);
> -	val += memcg->nocpu_base.count[idx];
> +	for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
> +		stat_vec[i] += memcg->nocpu_base.count[i];
>  	spin_unlock(&memcg->pcp_counter_lock);
>  #endif
>  	put_online_cpus();
> -	return val;
>  }
>  
>  static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
> @@ -1674,6 +1683,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
>  	static DEFINE_MUTEX(oom_info_lock);
>  	struct mem_cgroup *iter;
>  	unsigned int i;
> +	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
>  
>  	if (!p)
>  		return;
> @@ -1702,16 +1712,22 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
>  		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
>  		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
>  
> +	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	if (!do_swap_account)
> +		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
>  	for_each_mem_cgroup_tree(iter, memcg) {
> +		long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
> +
>  		pr_info("Memory cgroup stats for ");
>  		pr_cont_cgroup_path(iter->css.cgroup);
>  		pr_cont(":");
>  
> +		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
>  		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
>  			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
>  				continue;
> -			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
> -				K(mem_cgroup_read_stat(iter, i)));
> +			pr_cont(" %s:%lldKB", mem_cgroup_stat_names[i],
> +				K(stat_vec[i]));
>  		}
>  
>  		for (i = 0; i < NR_LRU_LISTS; i++)
> @@ -4940,25 +4956,28 @@ out:
>  	return retval;
>  }
>  
> -
> -static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
> -					       enum mem_cgroup_stat_index idx)
> +/* Callers should take care of initialize stat_vec array */
> +static void mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
> +					unsigned long *stat_bitmask,
> +					long long *stat_vec)
>  {
>  	struct mem_cgroup *iter;
> -	long val = 0;
> +	int idx;
>  
>  	/* Per-cpu values can be negative, use a signed accumulator */
>  	for_each_mem_cgroup_tree(iter, memcg)
> -		val += mem_cgroup_read_stat(iter, idx);
> +		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
>  
> -	if (val < 0) /* race ? */
> -		val = 0;
> -	return val;
> +	for_each_set_bit(idx, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
> +		if (stat_vec[idx] < 0) /* race ? */
> +			stat_vec[idx] = 0;
>  }
>  
>  static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
>  {
>  	u64 val;
> +	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
>  
>  	if (!mem_cgroup_is_root(memcg)) {
>  		if (!swap)
> @@ -4967,15 +4986,21 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
>  			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
>  	}
>  
> +
>  	/*
>  	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
>  	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
>  	 */
> -	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
> -	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
> -
> +	bitmap_zero(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	set_bit(MEM_CGROUP_STAT_CACHE, stat_bitmask);
> +	set_bit(MEM_CGROUP_STAT_RSS, stat_bitmask);
>  	if (swap)
> -		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
> +		set_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
> +
> +	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
> +
> +	val = stat_vec[MEM_CGROUP_STAT_CACHE] + stat_vec[MEM_CGROUP_STAT_RSS] +
> +	      (swap ? stat_vec[MEM_CGROUP_STAT_SWAP] : 0);
>  
>  	return val << PAGE_SHIFT;
>  }
> @@ -5349,12 +5374,19 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
>  	struct mem_cgroup *mi;
>  	unsigned int i;
> +	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
> +
> +	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	if (!do_swap_account)
> +		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
> +	mem_cgroup_read_stat_vec(memcg, stat_bitmask, stat_vec);
>  
>  	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
>  		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
>  			continue;
> -		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
> -			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
> +		seq_printf(m, "%s %lld\n", mem_cgroup_stat_names[i],
> +			   stat_vec[i] * PAGE_SIZE);
>  	}
>  
>  	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
> @@ -5375,14 +5407,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  				   memsw_limit);
>  	}
>  
> +	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++)
> +		stat_vec[i] = 0;
> +	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
>  	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
> -		long long val = 0;
> -
>  		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
>  			continue;
> -		for_each_mem_cgroup_tree(mi, memcg)
> -			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
> -		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
> +
> +		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i],
> +				stat_vec[i] * PAGE_SIZE);
>  	}
>  
>  	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
> -- 
> 1.9.0.GIT
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Michal Hocko
SUSE Labs

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
@ 2014-04-16  9:22   ` Michal Hocko
  0 siblings, 0 replies; 10+ messages in thread
From: Michal Hocko @ 2014-04-16  9:22 UTC (permalink / raw)
  To: Jianyu Zhan
  Cc: hannes-druUgvl0LCNAfugRpC6u6w,
	bsingharora-Re5JQEeQqe8AvxtiuMwx3w,
	kamezawa.hiroyu-+CUm20s59erQFUHtdCDX3A,
	akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
	cgroups-u79uwXL29TY76Z2rM5mHXA, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA

On Fri 11-04-14 01:11:08, Jianyu Zhan wrote:
> Currently, mem_cgroup_read_stat() is used for user interface. The
> user accounts memory usage by memory cgroup and he _always_ requires
> exact value because he accounts memory. So we don't use quick-and-fuzzy
> -read-and-do-periodic-synchronization way. Thus, we iterate all cpus
> for one read.
> 
> And we mem_cgroup_usage() and mem_cgroup_recursive_stat() both finally
> call into mem_cgroup_read_stat().
> 
> However, these *stat snapshot* operations are implemented in a quite
> coarse way: it takes M*N iteration for each stat item(M=nr_memcgs,
> N=nr_possible_cpus). There are two deficiencies:
> 
> 1. for every stat item, we have to iterate over all percpu value, which
>    is not so cache friendly.
> 2. for every stat item, we call mem_cgroup_read_stat() once, which
>    increase the probablity of contending on pcp_counter_lock.
> 
> So, this patch improve this a bit.

How much and under what kind of load?

> Concretely, for all interested stat
> items, mark them in a bitmap, and then make mem_cgroup_read_stat() read
> them all in one go.
> 
> This is more efficient, and to some degree make it more like *stat snapshot*.
> 
> Signed-off-by: Jianyu Zhan <nasa4836-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> ---
>  mm/memcontrol.c | 91 +++++++++++++++++++++++++++++++++++++++------------------
>  1 file changed, 62 insertions(+), 29 deletions(-)

I cannot say I like the new code much more than the previous one and
I've never seen the old one being a bottleneck. So I am not entirely
fond of optimization without a good reason. (Hint, if you are optimizing
something always show us numbers which support the optimization)

> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 29501f0..009357e 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -347,7 +347,7 @@ struct mem_cgroup {
>  	struct mem_cgroup_stat_cpu __percpu *stat;
>  	/*
>  	 * used when a cpu is offlined or other synchronizations
> -	 * See mem_cgroup_read_stat().
> +	 * See mem_cgroup_read_stat_vec().
>  	 */
>  	struct mem_cgroup_stat_cpu nocpu_base;
>  	spinlock_t pcp_counter_lock;
> @@ -855,7 +855,13 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
>  	return mz;
>  }
>  
> -/*
> +/**
> + * @memcg: the mem_cgroup to account for.
> + * @stat_bitmask: a bitmap record which stat items to read,
> + *		each mem_cgroup_stat_index has its corresponding bit.
> + * @stat_vec: a stat vector to hold the stat value for returing, caller
> + *		shall take care of initializing it.
> + *
>   * Implementation Note: reading percpu statistics for memcg.
>   *
>   * Both of vmstat[] and percpu_counter has threshold and do periodic
> @@ -874,22 +880,25 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
>   * common workload, threashold and synchonization as vmstat[] should be
>   * implemented.
>   */
> -static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
> -				 enum mem_cgroup_stat_index idx)
> +static void mem_cgroup_read_stat_vec(struct mem_cgroup *memcg,
> +				 unsigned long *stat_bitmask,
> +				 long long *stat_vec)
>  {
> -	long val = 0;
>  	int cpu;
> +	int i;
>  
>  	get_online_cpus();
>  	for_each_online_cpu(cpu)
> -		val += per_cpu(memcg->stat->count[idx], cpu);
> +		for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
> +			stat_vec[i] += per_cpu(memcg->stat->count[i], cpu);
> +
>  #ifdef CONFIG_HOTPLUG_CPU
>  	spin_lock(&memcg->pcp_counter_lock);
> -	val += memcg->nocpu_base.count[idx];
> +	for_each_set_bit(i, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
> +		stat_vec[i] += memcg->nocpu_base.count[i];
>  	spin_unlock(&memcg->pcp_counter_lock);
>  #endif
>  	put_online_cpus();
> -	return val;
>  }
>  
>  static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
> @@ -1674,6 +1683,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
>  	static DEFINE_MUTEX(oom_info_lock);
>  	struct mem_cgroup *iter;
>  	unsigned int i;
> +	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
>  
>  	if (!p)
>  		return;
> @@ -1702,16 +1712,22 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
>  		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
>  		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
>  
> +	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	if (!do_swap_account)
> +		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
>  	for_each_mem_cgroup_tree(iter, memcg) {
> +		long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
> +
>  		pr_info("Memory cgroup stats for ");
>  		pr_cont_cgroup_path(iter->css.cgroup);
>  		pr_cont(":");
>  
> +		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
>  		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
>  			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
>  				continue;
> -			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
> -				K(mem_cgroup_read_stat(iter, i)));
> +			pr_cont(" %s:%lldKB", mem_cgroup_stat_names[i],
> +				K(stat_vec[i]));
>  		}
>  
>  		for (i = 0; i < NR_LRU_LISTS; i++)
> @@ -4940,25 +4956,28 @@ out:
>  	return retval;
>  }
>  
> -
> -static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
> -					       enum mem_cgroup_stat_index idx)
> +/* Callers should take care of initialize stat_vec array */
> +static void mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
> +					unsigned long *stat_bitmask,
> +					long long *stat_vec)
>  {
>  	struct mem_cgroup *iter;
> -	long val = 0;
> +	int idx;
>  
>  	/* Per-cpu values can be negative, use a signed accumulator */
>  	for_each_mem_cgroup_tree(iter, memcg)
> -		val += mem_cgroup_read_stat(iter, idx);
> +		mem_cgroup_read_stat_vec(iter, stat_bitmask, stat_vec);
>  
> -	if (val < 0) /* race ? */
> -		val = 0;
> -	return val;
> +	for_each_set_bit(idx, stat_bitmask, MEM_CGROUP_STAT_NSTATS)
> +		if (stat_vec[idx] < 0) /* race ? */
> +			stat_vec[idx] = 0;
>  }
>  
>  static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
>  {
>  	u64 val;
> +	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
>  
>  	if (!mem_cgroup_is_root(memcg)) {
>  		if (!swap)
> @@ -4967,15 +4986,21 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
>  			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
>  	}
>  
> +
>  	/*
>  	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
>  	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
>  	 */
> -	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
> -	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
> -
> +	bitmap_zero(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	set_bit(MEM_CGROUP_STAT_CACHE, stat_bitmask);
> +	set_bit(MEM_CGROUP_STAT_RSS, stat_bitmask);
>  	if (swap)
> -		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
> +		set_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
> +
> +	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
> +
> +	val = stat_vec[MEM_CGROUP_STAT_CACHE] + stat_vec[MEM_CGROUP_STAT_RSS] +
> +	      (swap ? stat_vec[MEM_CGROUP_STAT_SWAP] : 0);
>  
>  	return val << PAGE_SHIFT;
>  }
> @@ -5349,12 +5374,19 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
>  	struct mem_cgroup *mi;
>  	unsigned int i;
> +	DECLARE_BITMAP(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	long long stat_vec[MEM_CGROUP_STAT_NSTATS] = {0};
> +
> +	bitmap_fill(stat_bitmask, MEM_CGROUP_STAT_NSTATS);
> +	if (!do_swap_account)
> +		clear_bit(MEM_CGROUP_STAT_SWAP, stat_bitmask);
> +	mem_cgroup_read_stat_vec(memcg, stat_bitmask, stat_vec);
>  
>  	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
>  		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
>  			continue;
> -		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
> -			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
> +		seq_printf(m, "%s %lld\n", mem_cgroup_stat_names[i],
> +			   stat_vec[i] * PAGE_SIZE);
>  	}
>  
>  	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
> @@ -5375,14 +5407,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  				   memsw_limit);
>  	}
>  
> +	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++)
> +		stat_vec[i] = 0;
> +	mem_cgroup_recursive_stat(memcg, stat_bitmask, stat_vec);
>  	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
> -		long long val = 0;
> -
>  		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
>  			continue;
> -		for_each_mem_cgroup_tree(mi, memcg)
> -			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
> -		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
> +
> +		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i],
> +				stat_vec[i] * PAGE_SIZE);
>  	}
>  
>  	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
> -- 
> 1.9.0.GIT
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo-Bw31MaZKKs0EbZ0PF+XxCw@public.gmane.org  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org"> email-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org </a>

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
  2014-04-16  9:22   ` Michal Hocko
@ 2014-04-16 15:22     ` Zhan Jianyu
  -1 siblings, 0 replies; 10+ messages in thread
From: Zhan Jianyu @ 2014-04-16 15:22 UTC (permalink / raw)
  To: Michal Hocko
  Cc: Johannes Weiner, Balbir Singh, kamezawa.hiroyu, Andrew Morton,
	cgroups, linux-mm, LKML

On Wed, Apr 16, 2014 at 5:22 PM, Michal Hocko <mhocko@suse.cz> wrote:
> cannot say I like the new code much more than the previous one and
> I've never seen the old one being a bottleneck. So I am not entirely
> fond of optimization without a good reason. (Hint, if you are optimizing
> something always show us numbers which support the optimization)

Hmm, actually I now have no workload to support this optimizaton, I will
refine patch if I have one.  Thanks for all your comments.

Thanks,
Jianyu Zhan

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go
@ 2014-04-16 15:22     ` Zhan Jianyu
  0 siblings, 0 replies; 10+ messages in thread
From: Zhan Jianyu @ 2014-04-16 15:22 UTC (permalink / raw)
  To: Michal Hocko
  Cc: Johannes Weiner, Balbir Singh, kamezawa.hiroyu, Andrew Morton,
	cgroups, linux-mm, LKML

On Wed, Apr 16, 2014 at 5:22 PM, Michal Hocko <mhocko@suse.cz> wrote:
> cannot say I like the new code much more than the previous one and
> I've never seen the old one being a bottleneck. So I am not entirely
> fond of optimization without a good reason. (Hint, if you are optimizing
> something always show us numbers which support the optimization)

Hmm, actually I now have no workload to support this optimizaton, I will
refine patch if I have one.  Thanks for all your comments.

Thanks,
Jianyu Zhan

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2014-04-16 15:23 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-04-10 17:11 [PATCH] mm/memcontrol.c: make mem_cgroup_read_stat() read all interested stat item in one go Jianyu Zhan
2014-04-10 17:11 ` Jianyu Zhan
2014-04-10 17:11 ` Jianyu Zhan
2014-04-15  1:52 ` Johannes Weiner
2014-04-15  1:52   ` Johannes Weiner
2014-04-16  9:22 ` Michal Hocko
2014-04-16  9:22   ` Michal Hocko
2014-04-16  9:22   ` Michal Hocko
2014-04-16 15:22   ` Zhan Jianyu
2014-04-16 15:22     ` Zhan Jianyu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.