All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch 1/3] mm: memcontrol: remove unnecessary soft limit tree node test
@ 2015-01-10  2:13 ` Johannes Weiner
  0 siblings, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:13 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel

kzalloc_node() automatically falls back to nodes with suitable memory.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/memcontrol.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fd9e542fc26f..aad254b30708 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4520,13 +4520,10 @@ static void __init mem_cgroup_soft_limit_tree_init(void)
 {
 	struct mem_cgroup_tree_per_node *rtpn;
 	struct mem_cgroup_tree_per_zone *rtpz;
-	int tmp, node, zone;
+	int node, zone;
 
 	for_each_node(node) {
-		tmp = node;
-		if (!node_state(node, N_NORMAL_MEMORY))
-			tmp = -1;
-		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
 		BUG_ON(!rtpn);
 
 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
-- 
2.2.0


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [patch 1/3] mm: memcontrol: remove unnecessary soft limit tree node test
@ 2015-01-10  2:13 ` Johannes Weiner
  0 siblings, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:13 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel

kzalloc_node() automatically falls back to nodes with suitable memory.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/memcontrol.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fd9e542fc26f..aad254b30708 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4520,13 +4520,10 @@ static void __init mem_cgroup_soft_limit_tree_init(void)
 {
 	struct mem_cgroup_tree_per_node *rtpn;
 	struct mem_cgroup_tree_per_zone *rtpz;
-	int tmp, node, zone;
+	int node, zone;
 
 	for_each_node(node) {
-		tmp = node;
-		if (!node_state(node, N_NORMAL_MEMORY))
-			tmp = -1;
-		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
 		BUG_ON(!rtpn);
 
 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
-- 
2.2.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [patch 2/3] mm: memcontrol: consolidate memory controller initialization
  2015-01-10  2:13 ` Johannes Weiner
@ 2015-01-10  2:14   ` Johannes Weiner
  -1 siblings, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:14 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel

The initialization code for the per-cpu charge stock and the soft
limit tree is compact enough to inline it into mem_cgroup_init().

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/memcontrol.c | 57 ++++++++++++++++++++++++---------------------------------
 1 file changed, 24 insertions(+), 33 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index aad254b30708..f66bb8f83ac9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2137,17 +2137,6 @@ static void drain_local_stock(struct work_struct *dummy)
 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 }
 
-static void __init memcg_stock_init(void)
-{
-	int cpu;
-
-	for_each_possible_cpu(cpu) {
-		struct memcg_stock_pcp *stock =
-					&per_cpu(memcg_stock, cpu);
-		INIT_WORK(&stock->work, drain_local_stock);
-	}
-}
-
 /*
  * Cache charges(val) to local per_cpu area.
  * This will be consumed by consume_stock() function, later.
@@ -4516,26 +4505,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 }
 EXPORT_SYMBOL(parent_mem_cgroup);
 
-static void __init mem_cgroup_soft_limit_tree_init(void)
-{
-	struct mem_cgroup_tree_per_node *rtpn;
-	struct mem_cgroup_tree_per_zone *rtpz;
-	int node, zone;
-
-	for_each_node(node) {
-		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
-		BUG_ON(!rtpn);
-
-		soft_limit_tree.rb_tree_per_node[node] = rtpn;
-
-		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
-			rtpz = &rtpn->rb_tree_per_zone[zone];
-			rtpz->rb_root = RB_ROOT;
-			spin_lock_init(&rtpz->lock);
-		}
-	}
-}
-
 static struct cgroup_subsys_state * __ref
 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -5927,10 +5896,32 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
  */
 static int __init mem_cgroup_init(void)
 {
+	int cpu, nid;
+
 	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+
+	for_each_possible_cpu(cpu)
+		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+			  drain_local_stock);
+
+	for_each_node(nid) {
+		struct mem_cgroup_tree_per_node *rtpn;
+		int zone;
+
+		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, nid);
+
+		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+			struct mem_cgroup_tree_per_zone *rtpz;
+
+			rtpz = &rtpn->rb_tree_per_zone[zone];
+			rtpz->rb_root = RB_ROOT;
+			spin_lock_init(&rtpz->lock);
+		}
+		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
+	}
+
 	enable_swap_cgroup();
-	mem_cgroup_soft_limit_tree_init();
-	memcg_stock_init();
+
 	return 0;
 }
 subsys_initcall(mem_cgroup_init);
-- 
2.2.0


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [patch 2/3] mm: memcontrol: consolidate memory controller initialization
@ 2015-01-10  2:14   ` Johannes Weiner
  0 siblings, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:14 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel

The initialization code for the per-cpu charge stock and the soft
limit tree is compact enough to inline it into mem_cgroup_init().

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/memcontrol.c | 57 ++++++++++++++++++++++++---------------------------------
 1 file changed, 24 insertions(+), 33 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index aad254b30708..f66bb8f83ac9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2137,17 +2137,6 @@ static void drain_local_stock(struct work_struct *dummy)
 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 }
 
-static void __init memcg_stock_init(void)
-{
-	int cpu;
-
-	for_each_possible_cpu(cpu) {
-		struct memcg_stock_pcp *stock =
-					&per_cpu(memcg_stock, cpu);
-		INIT_WORK(&stock->work, drain_local_stock);
-	}
-}
-
 /*
  * Cache charges(val) to local per_cpu area.
  * This will be consumed by consume_stock() function, later.
@@ -4516,26 +4505,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 }
 EXPORT_SYMBOL(parent_mem_cgroup);
 
-static void __init mem_cgroup_soft_limit_tree_init(void)
-{
-	struct mem_cgroup_tree_per_node *rtpn;
-	struct mem_cgroup_tree_per_zone *rtpz;
-	int node, zone;
-
-	for_each_node(node) {
-		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
-		BUG_ON(!rtpn);
-
-		soft_limit_tree.rb_tree_per_node[node] = rtpn;
-
-		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
-			rtpz = &rtpn->rb_tree_per_zone[zone];
-			rtpz->rb_root = RB_ROOT;
-			spin_lock_init(&rtpz->lock);
-		}
-	}
-}
-
 static struct cgroup_subsys_state * __ref
 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -5927,10 +5896,32 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
  */
 static int __init mem_cgroup_init(void)
 {
+	int cpu, nid;
+
 	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+
+	for_each_possible_cpu(cpu)
+		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+			  drain_local_stock);
+
+	for_each_node(nid) {
+		struct mem_cgroup_tree_per_node *rtpn;
+		int zone;
+
+		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, nid);
+
+		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+			struct mem_cgroup_tree_per_zone *rtpz;
+
+			rtpz = &rtpn->rb_tree_per_zone[zone];
+			rtpz->rb_root = RB_ROOT;
+			spin_lock_init(&rtpz->lock);
+		}
+		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
+	}
+
 	enable_swap_cgroup();
-	mem_cgroup_soft_limit_tree_init();
-	memcg_stock_init();
+
 	return 0;
 }
 subsys_initcall(mem_cgroup_init);
-- 
2.2.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [patch 2/3] mm: memcontrol: consolidate memory controller initialization
  2015-01-10  2:13 ` Johannes Weiner
  (?)
@ 2015-01-10  2:14 ` Johannes Weiner
  -1 siblings, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:14 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel

The initialization code for the per-cpu charge stock and the soft
limit tree is compact enough to inline it into mem_cgroup_init().

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/memcontrol.c | 57 ++++++++++++++++++++++++---------------------------------
 1 file changed, 24 insertions(+), 33 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index aad254b30708..f66bb8f83ac9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2137,17 +2137,6 @@ static void drain_local_stock(struct work_struct *dummy)
 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 }
 
-static void __init memcg_stock_init(void)
-{
-	int cpu;
-
-	for_each_possible_cpu(cpu) {
-		struct memcg_stock_pcp *stock =
-					&per_cpu(memcg_stock, cpu);
-		INIT_WORK(&stock->work, drain_local_stock);
-	}
-}
-
 /*
  * Cache charges(val) to local per_cpu area.
  * This will be consumed by consume_stock() function, later.
@@ -4516,26 +4505,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 }
 EXPORT_SYMBOL(parent_mem_cgroup);
 
-static void __init mem_cgroup_soft_limit_tree_init(void)
-{
-	struct mem_cgroup_tree_per_node *rtpn;
-	struct mem_cgroup_tree_per_zone *rtpz;
-	int node, zone;
-
-	for_each_node(node) {
-		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
-		BUG_ON(!rtpn);
-
-		soft_limit_tree.rb_tree_per_node[node] = rtpn;
-
-		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
-			rtpz = &rtpn->rb_tree_per_zone[zone];
-			rtpz->rb_root = RB_ROOT;
-			spin_lock_init(&rtpz->lock);
-		}
-	}
-}
-
 static struct cgroup_subsys_state * __ref
 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -5927,10 +5896,32 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
  */
 static int __init mem_cgroup_init(void)
 {
+	int cpu, nid;
+
 	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+
+	for_each_possible_cpu(cpu)
+		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+			  drain_local_stock);
+
+	for_each_node(nid) {
+		struct mem_cgroup_tree_per_node *rtpn;
+		int zone;
+
+		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, nid);
+
+		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+			struct mem_cgroup_tree_per_zone *rtpz;
+
+			rtpz = &rtpn->rb_tree_per_zone[zone];
+			rtpz->rb_root = RB_ROOT;
+			spin_lock_init(&rtpz->lock);
+		}
+		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
+	}
+
 	enable_swap_cgroup();
-	mem_cgroup_soft_limit_tree_init();
-	memcg_stock_init();
+
 	return 0;
 }
 subsys_initcall(mem_cgroup_init);
-- 
2.2.0

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [patch 2/3] mm: memcontrol: consolidate memory controller initialization
       [not found] ` <1420856041-27647-1-git-send-email-hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>
@ 2015-01-10  2:14   ` Johannes Weiner
  2015-01-10  2:14   ` [patch 3/3] mm: memcontrol: consolidate swap controller code Johannes Weiner
  1 sibling, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:14 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	cgroups-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA

The initialization code for the per-cpu charge stock and the soft
limit tree is compact enough to inline it into mem_cgroup_init().

Signed-off-by: Johannes Weiner <hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>
---
 mm/memcontrol.c | 57 ++++++++++++++++++++++++---------------------------------
 1 file changed, 24 insertions(+), 33 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index aad254b30708..f66bb8f83ac9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2137,17 +2137,6 @@ static void drain_local_stock(struct work_struct *dummy)
 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 }
 
-static void __init memcg_stock_init(void)
-{
-	int cpu;
-
-	for_each_possible_cpu(cpu) {
-		struct memcg_stock_pcp *stock =
-					&per_cpu(memcg_stock, cpu);
-		INIT_WORK(&stock->work, drain_local_stock);
-	}
-}
-
 /*
  * Cache charges(val) to local per_cpu area.
  * This will be consumed by consume_stock() function, later.
@@ -4516,26 +4505,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 }
 EXPORT_SYMBOL(parent_mem_cgroup);
 
-static void __init mem_cgroup_soft_limit_tree_init(void)
-{
-	struct mem_cgroup_tree_per_node *rtpn;
-	struct mem_cgroup_tree_per_zone *rtpz;
-	int node, zone;
-
-	for_each_node(node) {
-		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
-		BUG_ON(!rtpn);
-
-		soft_limit_tree.rb_tree_per_node[node] = rtpn;
-
-		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
-			rtpz = &rtpn->rb_tree_per_zone[zone];
-			rtpz->rb_root = RB_ROOT;
-			spin_lock_init(&rtpz->lock);
-		}
-	}
-}
-
 static struct cgroup_subsys_state * __ref
 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -5927,10 +5896,32 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
  */
 static int __init mem_cgroup_init(void)
 {
+	int cpu, nid;
+
 	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+
+	for_each_possible_cpu(cpu)
+		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+			  drain_local_stock);
+
+	for_each_node(nid) {
+		struct mem_cgroup_tree_per_node *rtpn;
+		int zone;
+
+		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, nid);
+
+		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+			struct mem_cgroup_tree_per_zone *rtpz;
+
+			rtpz = &rtpn->rb_tree_per_zone[zone];
+			rtpz->rb_root = RB_ROOT;
+			spin_lock_init(&rtpz->lock);
+		}
+		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
+	}
+
 	enable_swap_cgroup();
-	mem_cgroup_soft_limit_tree_init();
-	memcg_stock_init();
+
 	return 0;
 }
 subsys_initcall(mem_cgroup_init);
-- 
2.2.0

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [patch 3/3] mm: memcontrol: consolidate swap controller code
  2015-01-10  2:13 ` Johannes Weiner
@ 2015-01-10  2:14   ` Johannes Weiner
  -1 siblings, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:14 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel

The swap controller code is scattered all over the file.  Gather all
the code that isn't directly needed by the memory controller at the
end of the file in its own CONFIG_MEMCG_SWAP section.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/memcontrol.c | 264 +++++++++++++++++++++++++++-----------------------------
 1 file changed, 125 insertions(+), 139 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f66bb8f83ac9..5a5769e8b12c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -72,22 +72,13 @@ EXPORT_SYMBOL(memory_cgrp_subsys);
 #define MEM_CGROUP_RECLAIM_RETRIES	5
 static struct mem_cgroup *root_mem_cgroup __read_mostly;
 
+/* Whether the swap controller is active */
 #ifdef CONFIG_MEMCG_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
 int do_swap_account __read_mostly;
-
-/* for remember boot option*/
-#ifdef CONFIG_MEMCG_SWAP_ENABLED
-static int really_do_swap_account __initdata = 1;
-#else
-static int really_do_swap_account __initdata;
-#endif
-
 #else
 #define do_swap_account		0
 #endif
 
-
 static const char * const mem_cgroup_stat_names[] = {
 	"cache",
 	"rss",
@@ -4382,34 +4373,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
 	{ },	/* terminate */
 };
 
-#ifdef CONFIG_MEMCG_SWAP
-static struct cftype memsw_cgroup_files[] = {
-	{
-		.name = "memsw.usage_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.max_usage_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
-		.write = mem_cgroup_reset,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.limit_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
-		.write = mem_cgroup_write,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.failcnt",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
-		.write = mem_cgroup_reset,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{ },	/* terminate */
-};
-#endif
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
 	struct mem_cgroup_per_node *pn;
@@ -5415,37 +5378,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
 	.early_init = 0,
 };
 
-#ifdef CONFIG_MEMCG_SWAP
-static int __init enable_swap_account(char *s)
-{
-	if (!strcmp(s, "1"))
-		really_do_swap_account = 1;
-	else if (!strcmp(s, "0"))
-		really_do_swap_account = 0;
-	return 1;
-}
-__setup("swapaccount=", enable_swap_account);
-
-static void __init memsw_file_init(void)
-{
-	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
-					  memsw_cgroup_files));
-}
-
-static void __init enable_swap_cgroup(void)
-{
-	if (!mem_cgroup_disabled() && really_do_swap_account) {
-		do_swap_account = 1;
-		memsw_file_init();
-	}
-}
-
-#else
-static void __init enable_swap_cgroup(void)
-{
-}
-#endif
-
 /**
  * mem_cgroup_events - count memory events against a cgroup
  * @memcg: the memory cgroup
@@ -5496,74 +5428,6 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
 	return true;
 }
 
-#ifdef CONFIG_MEMCG_SWAP
-/**
- * mem_cgroup_swapout - transfer a memsw charge to swap
- * @page: page whose memsw charge to transfer
- * @entry: swap entry to move the charge to
- *
- * Transfer the memsw charge of @page to @entry.
- */
-void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
-{
-	struct mem_cgroup *memcg;
-	unsigned short oldid;
-
-	VM_BUG_ON_PAGE(PageLRU(page), page);
-	VM_BUG_ON_PAGE(page_count(page), page);
-
-	if (!do_swap_account)
-		return;
-
-	memcg = page->mem_cgroup;
-
-	/* Readahead page, never charged */
-	if (!memcg)
-		return;
-
-	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
-	VM_BUG_ON_PAGE(oldid, page);
-	mem_cgroup_swap_statistics(memcg, true);
-
-	page->mem_cgroup = NULL;
-
-	if (!mem_cgroup_is_root(memcg))
-		page_counter_uncharge(&memcg->memory, 1);
-
-	/* XXX: caller holds IRQ-safe mapping->tree_lock */
-	VM_BUG_ON(!irqs_disabled());
-
-	mem_cgroup_charge_statistics(memcg, page, -1);
-	memcg_check_events(memcg, page);
-}
-
-/**
- * mem_cgroup_uncharge_swap - uncharge a swap entry
- * @entry: swap entry to uncharge
- *
- * Drop the memsw charge associated with @entry.
- */
-void mem_cgroup_uncharge_swap(swp_entry_t entry)
-{
-	struct mem_cgroup *memcg;
-	unsigned short id;
-
-	if (!do_swap_account)
-		return;
-
-	id = swap_cgroup_record(entry, 0);
-	rcu_read_lock();
-	memcg = mem_cgroup_lookup(id);
-	if (memcg) {
-		if (!mem_cgroup_is_root(memcg))
-			page_counter_uncharge(&memcg->memsw, 1);
-		mem_cgroup_swap_statistics(memcg, false);
-		css_put(&memcg->css);
-	}
-	rcu_read_unlock();
-}
-#endif
-
 /**
  * mem_cgroup_try_charge - try charging a page
  * @page: page to charge
@@ -5920,8 +5784,130 @@ static int __init mem_cgroup_init(void)
 		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
 	}
 
-	enable_swap_cgroup();
-
 	return 0;
 }
 subsys_initcall(mem_cgroup_init);
+
+#ifdef CONFIG_MEMCG_SWAP
+/**
+ * mem_cgroup_swapout - transfer a memsw charge to swap
+ * @page: page whose memsw charge to transfer
+ * @entry: swap entry to move the charge to
+ *
+ * Transfer the memsw charge of @page to @entry.
+ */
+void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+{
+	struct mem_cgroup *memcg;
+	unsigned short oldid;
+
+	VM_BUG_ON_PAGE(PageLRU(page), page);
+	VM_BUG_ON_PAGE(page_count(page), page);
+
+	if (!do_swap_account)
+		return;
+
+	memcg = page->mem_cgroup;
+
+	/* Readahead page, never charged */
+	if (!memcg)
+		return;
+
+	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+	VM_BUG_ON_PAGE(oldid, page);
+	mem_cgroup_swap_statistics(memcg, true);
+
+	page->mem_cgroup = NULL;
+
+	if (!mem_cgroup_is_root(memcg))
+		page_counter_uncharge(&memcg->memory, 1);
+
+	/* XXX: caller holds IRQ-safe mapping->tree_lock */
+	VM_BUG_ON(!irqs_disabled());
+
+	mem_cgroup_charge_statistics(memcg, page, -1);
+	memcg_check_events(memcg, page);
+}
+
+/**
+ * mem_cgroup_uncharge_swap - uncharge a swap entry
+ * @entry: swap entry to uncharge
+ *
+ * Drop the memsw charge associated with @entry.
+ */
+void mem_cgroup_uncharge_swap(swp_entry_t entry)
+{
+	struct mem_cgroup *memcg;
+	unsigned short id;
+
+	if (!do_swap_account)
+		return;
+
+	id = swap_cgroup_record(entry, 0);
+	rcu_read_lock();
+	memcg = mem_cgroup_lookup(id);
+	if (memcg) {
+		if (!mem_cgroup_is_root(memcg))
+			page_counter_uncharge(&memcg->memsw, 1);
+		mem_cgroup_swap_statistics(memcg, false);
+		css_put(&memcg->css);
+	}
+	rcu_read_unlock();
+}
+
+/* for remember boot option*/
+#ifdef CONFIG_MEMCG_SWAP_ENABLED
+static int really_do_swap_account __initdata = 1;
+#else
+static int really_do_swap_account __initdata;
+#endif
+
+static int __init enable_swap_account(char *s)
+{
+	if (!strcmp(s, "1"))
+		really_do_swap_account = 1;
+	else if (!strcmp(s, "0"))
+		really_do_swap_account = 0;
+	return 1;
+}
+__setup("swapaccount=", enable_swap_account);
+
+static struct cftype memsw_cgroup_files[] = {
+	{
+		.name = "memsw.usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.max_usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
+		.write = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.limit_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
+		.write = mem_cgroup_write,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.failcnt",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
+		.write = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{ },	/* terminate */
+};
+
+static int __init mem_cgroup_swap_init(void)
+{
+	if (!mem_cgroup_disabled() && really_do_swap_account) {
+		do_swap_account = 1;
+		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
+						  memsw_cgroup_files));
+	}
+	return 0;
+}
+subsys_initcall(mem_cgroup_swap_init);
+
+#endif /* CONFIG_MEMCG_SWAP */
-- 
2.2.0


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [patch 3/3] mm: memcontrol: consolidate swap controller code
@ 2015-01-10  2:14   ` Johannes Weiner
  0 siblings, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:14 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel

The swap controller code is scattered all over the file.  Gather all
the code that isn't directly needed by the memory controller at the
end of the file in its own CONFIG_MEMCG_SWAP section.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/memcontrol.c | 264 +++++++++++++++++++++++++++-----------------------------
 1 file changed, 125 insertions(+), 139 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f66bb8f83ac9..5a5769e8b12c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -72,22 +72,13 @@ EXPORT_SYMBOL(memory_cgrp_subsys);
 #define MEM_CGROUP_RECLAIM_RETRIES	5
 static struct mem_cgroup *root_mem_cgroup __read_mostly;
 
+/* Whether the swap controller is active */
 #ifdef CONFIG_MEMCG_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
 int do_swap_account __read_mostly;
-
-/* for remember boot option*/
-#ifdef CONFIG_MEMCG_SWAP_ENABLED
-static int really_do_swap_account __initdata = 1;
-#else
-static int really_do_swap_account __initdata;
-#endif
-
 #else
 #define do_swap_account		0
 #endif
 
-
 static const char * const mem_cgroup_stat_names[] = {
 	"cache",
 	"rss",
@@ -4382,34 +4373,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
 	{ },	/* terminate */
 };
 
-#ifdef CONFIG_MEMCG_SWAP
-static struct cftype memsw_cgroup_files[] = {
-	{
-		.name = "memsw.usage_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.max_usage_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
-		.write = mem_cgroup_reset,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.limit_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
-		.write = mem_cgroup_write,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.failcnt",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
-		.write = mem_cgroup_reset,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{ },	/* terminate */
-};
-#endif
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
 	struct mem_cgroup_per_node *pn;
@@ -5415,37 +5378,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
 	.early_init = 0,
 };
 
-#ifdef CONFIG_MEMCG_SWAP
-static int __init enable_swap_account(char *s)
-{
-	if (!strcmp(s, "1"))
-		really_do_swap_account = 1;
-	else if (!strcmp(s, "0"))
-		really_do_swap_account = 0;
-	return 1;
-}
-__setup("swapaccount=", enable_swap_account);
-
-static void __init memsw_file_init(void)
-{
-	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
-					  memsw_cgroup_files));
-}
-
-static void __init enable_swap_cgroup(void)
-{
-	if (!mem_cgroup_disabled() && really_do_swap_account) {
-		do_swap_account = 1;
-		memsw_file_init();
-	}
-}
-
-#else
-static void __init enable_swap_cgroup(void)
-{
-}
-#endif
-
 /**
  * mem_cgroup_events - count memory events against a cgroup
  * @memcg: the memory cgroup
@@ -5496,74 +5428,6 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
 	return true;
 }
 
-#ifdef CONFIG_MEMCG_SWAP
-/**
- * mem_cgroup_swapout - transfer a memsw charge to swap
- * @page: page whose memsw charge to transfer
- * @entry: swap entry to move the charge to
- *
- * Transfer the memsw charge of @page to @entry.
- */
-void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
-{
-	struct mem_cgroup *memcg;
-	unsigned short oldid;
-
-	VM_BUG_ON_PAGE(PageLRU(page), page);
-	VM_BUG_ON_PAGE(page_count(page), page);
-
-	if (!do_swap_account)
-		return;
-
-	memcg = page->mem_cgroup;
-
-	/* Readahead page, never charged */
-	if (!memcg)
-		return;
-
-	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
-	VM_BUG_ON_PAGE(oldid, page);
-	mem_cgroup_swap_statistics(memcg, true);
-
-	page->mem_cgroup = NULL;
-
-	if (!mem_cgroup_is_root(memcg))
-		page_counter_uncharge(&memcg->memory, 1);
-
-	/* XXX: caller holds IRQ-safe mapping->tree_lock */
-	VM_BUG_ON(!irqs_disabled());
-
-	mem_cgroup_charge_statistics(memcg, page, -1);
-	memcg_check_events(memcg, page);
-}
-
-/**
- * mem_cgroup_uncharge_swap - uncharge a swap entry
- * @entry: swap entry to uncharge
- *
- * Drop the memsw charge associated with @entry.
- */
-void mem_cgroup_uncharge_swap(swp_entry_t entry)
-{
-	struct mem_cgroup *memcg;
-	unsigned short id;
-
-	if (!do_swap_account)
-		return;
-
-	id = swap_cgroup_record(entry, 0);
-	rcu_read_lock();
-	memcg = mem_cgroup_lookup(id);
-	if (memcg) {
-		if (!mem_cgroup_is_root(memcg))
-			page_counter_uncharge(&memcg->memsw, 1);
-		mem_cgroup_swap_statistics(memcg, false);
-		css_put(&memcg->css);
-	}
-	rcu_read_unlock();
-}
-#endif
-
 /**
  * mem_cgroup_try_charge - try charging a page
  * @page: page to charge
@@ -5920,8 +5784,130 @@ static int __init mem_cgroup_init(void)
 		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
 	}
 
-	enable_swap_cgroup();
-
 	return 0;
 }
 subsys_initcall(mem_cgroup_init);
+
+#ifdef CONFIG_MEMCG_SWAP
+/**
+ * mem_cgroup_swapout - transfer a memsw charge to swap
+ * @page: page whose memsw charge to transfer
+ * @entry: swap entry to move the charge to
+ *
+ * Transfer the memsw charge of @page to @entry.
+ */
+void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+{
+	struct mem_cgroup *memcg;
+	unsigned short oldid;
+
+	VM_BUG_ON_PAGE(PageLRU(page), page);
+	VM_BUG_ON_PAGE(page_count(page), page);
+
+	if (!do_swap_account)
+		return;
+
+	memcg = page->mem_cgroup;
+
+	/* Readahead page, never charged */
+	if (!memcg)
+		return;
+
+	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+	VM_BUG_ON_PAGE(oldid, page);
+	mem_cgroup_swap_statistics(memcg, true);
+
+	page->mem_cgroup = NULL;
+
+	if (!mem_cgroup_is_root(memcg))
+		page_counter_uncharge(&memcg->memory, 1);
+
+	/* XXX: caller holds IRQ-safe mapping->tree_lock */
+	VM_BUG_ON(!irqs_disabled());
+
+	mem_cgroup_charge_statistics(memcg, page, -1);
+	memcg_check_events(memcg, page);
+}
+
+/**
+ * mem_cgroup_uncharge_swap - uncharge a swap entry
+ * @entry: swap entry to uncharge
+ *
+ * Drop the memsw charge associated with @entry.
+ */
+void mem_cgroup_uncharge_swap(swp_entry_t entry)
+{
+	struct mem_cgroup *memcg;
+	unsigned short id;
+
+	if (!do_swap_account)
+		return;
+
+	id = swap_cgroup_record(entry, 0);
+	rcu_read_lock();
+	memcg = mem_cgroup_lookup(id);
+	if (memcg) {
+		if (!mem_cgroup_is_root(memcg))
+			page_counter_uncharge(&memcg->memsw, 1);
+		mem_cgroup_swap_statistics(memcg, false);
+		css_put(&memcg->css);
+	}
+	rcu_read_unlock();
+}
+
+/* for remember boot option*/
+#ifdef CONFIG_MEMCG_SWAP_ENABLED
+static int really_do_swap_account __initdata = 1;
+#else
+static int really_do_swap_account __initdata;
+#endif
+
+static int __init enable_swap_account(char *s)
+{
+	if (!strcmp(s, "1"))
+		really_do_swap_account = 1;
+	else if (!strcmp(s, "0"))
+		really_do_swap_account = 0;
+	return 1;
+}
+__setup("swapaccount=", enable_swap_account);
+
+static struct cftype memsw_cgroup_files[] = {
+	{
+		.name = "memsw.usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.max_usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
+		.write = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.limit_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
+		.write = mem_cgroup_write,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.failcnt",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
+		.write = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{ },	/* terminate */
+};
+
+static int __init mem_cgroup_swap_init(void)
+{
+	if (!mem_cgroup_disabled() && really_do_swap_account) {
+		do_swap_account = 1;
+		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
+						  memsw_cgroup_files));
+	}
+	return 0;
+}
+subsys_initcall(mem_cgroup_swap_init);
+
+#endif /* CONFIG_MEMCG_SWAP */
-- 
2.2.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [patch 3/3] mm: memcontrol: consolidate swap controller code
  2015-01-10  2:13 ` Johannes Weiner
                   ` (3 preceding siblings ...)
  (?)
@ 2015-01-10  2:14 ` Johannes Weiner
  -1 siblings, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:14 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm, cgroups, linux-kernel

The swap controller code is scattered all over the file.  Gather all
the code that isn't directly needed by the memory controller at the
end of the file in its own CONFIG_MEMCG_SWAP section.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/memcontrol.c | 264 +++++++++++++++++++++++++++-----------------------------
 1 file changed, 125 insertions(+), 139 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f66bb8f83ac9..5a5769e8b12c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -72,22 +72,13 @@ EXPORT_SYMBOL(memory_cgrp_subsys);
 #define MEM_CGROUP_RECLAIM_RETRIES	5
 static struct mem_cgroup *root_mem_cgroup __read_mostly;
 
+/* Whether the swap controller is active */
 #ifdef CONFIG_MEMCG_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
 int do_swap_account __read_mostly;
-
-/* for remember boot option*/
-#ifdef CONFIG_MEMCG_SWAP_ENABLED
-static int really_do_swap_account __initdata = 1;
-#else
-static int really_do_swap_account __initdata;
-#endif
-
 #else
 #define do_swap_account		0
 #endif
 
-
 static const char * const mem_cgroup_stat_names[] = {
 	"cache",
 	"rss",
@@ -4382,34 +4373,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
 	{ },	/* terminate */
 };
 
-#ifdef CONFIG_MEMCG_SWAP
-static struct cftype memsw_cgroup_files[] = {
-	{
-		.name = "memsw.usage_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.max_usage_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
-		.write = mem_cgroup_reset,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.limit_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
-		.write = mem_cgroup_write,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.failcnt",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
-		.write = mem_cgroup_reset,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{ },	/* terminate */
-};
-#endif
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
 	struct mem_cgroup_per_node *pn;
@@ -5415,37 +5378,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
 	.early_init = 0,
 };
 
-#ifdef CONFIG_MEMCG_SWAP
-static int __init enable_swap_account(char *s)
-{
-	if (!strcmp(s, "1"))
-		really_do_swap_account = 1;
-	else if (!strcmp(s, "0"))
-		really_do_swap_account = 0;
-	return 1;
-}
-__setup("swapaccount=", enable_swap_account);
-
-static void __init memsw_file_init(void)
-{
-	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
-					  memsw_cgroup_files));
-}
-
-static void __init enable_swap_cgroup(void)
-{
-	if (!mem_cgroup_disabled() && really_do_swap_account) {
-		do_swap_account = 1;
-		memsw_file_init();
-	}
-}
-
-#else
-static void __init enable_swap_cgroup(void)
-{
-}
-#endif
-
 /**
  * mem_cgroup_events - count memory events against a cgroup
  * @memcg: the memory cgroup
@@ -5496,74 +5428,6 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
 	return true;
 }
 
-#ifdef CONFIG_MEMCG_SWAP
-/**
- * mem_cgroup_swapout - transfer a memsw charge to swap
- * @page: page whose memsw charge to transfer
- * @entry: swap entry to move the charge to
- *
- * Transfer the memsw charge of @page to @entry.
- */
-void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
-{
-	struct mem_cgroup *memcg;
-	unsigned short oldid;
-
-	VM_BUG_ON_PAGE(PageLRU(page), page);
-	VM_BUG_ON_PAGE(page_count(page), page);
-
-	if (!do_swap_account)
-		return;
-
-	memcg = page->mem_cgroup;
-
-	/* Readahead page, never charged */
-	if (!memcg)
-		return;
-
-	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
-	VM_BUG_ON_PAGE(oldid, page);
-	mem_cgroup_swap_statistics(memcg, true);
-
-	page->mem_cgroup = NULL;
-
-	if (!mem_cgroup_is_root(memcg))
-		page_counter_uncharge(&memcg->memory, 1);
-
-	/* XXX: caller holds IRQ-safe mapping->tree_lock */
-	VM_BUG_ON(!irqs_disabled());
-
-	mem_cgroup_charge_statistics(memcg, page, -1);
-	memcg_check_events(memcg, page);
-}
-
-/**
- * mem_cgroup_uncharge_swap - uncharge a swap entry
- * @entry: swap entry to uncharge
- *
- * Drop the memsw charge associated with @entry.
- */
-void mem_cgroup_uncharge_swap(swp_entry_t entry)
-{
-	struct mem_cgroup *memcg;
-	unsigned short id;
-
-	if (!do_swap_account)
-		return;
-
-	id = swap_cgroup_record(entry, 0);
-	rcu_read_lock();
-	memcg = mem_cgroup_lookup(id);
-	if (memcg) {
-		if (!mem_cgroup_is_root(memcg))
-			page_counter_uncharge(&memcg->memsw, 1);
-		mem_cgroup_swap_statistics(memcg, false);
-		css_put(&memcg->css);
-	}
-	rcu_read_unlock();
-}
-#endif
-
 /**
  * mem_cgroup_try_charge - try charging a page
  * @page: page to charge
@@ -5920,8 +5784,130 @@ static int __init mem_cgroup_init(void)
 		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
 	}
 
-	enable_swap_cgroup();
-
 	return 0;
 }
 subsys_initcall(mem_cgroup_init);
+
+#ifdef CONFIG_MEMCG_SWAP
+/**
+ * mem_cgroup_swapout - transfer a memsw charge to swap
+ * @page: page whose memsw charge to transfer
+ * @entry: swap entry to move the charge to
+ *
+ * Transfer the memsw charge of @page to @entry.
+ */
+void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+{
+	struct mem_cgroup *memcg;
+	unsigned short oldid;
+
+	VM_BUG_ON_PAGE(PageLRU(page), page);
+	VM_BUG_ON_PAGE(page_count(page), page);
+
+	if (!do_swap_account)
+		return;
+
+	memcg = page->mem_cgroup;
+
+	/* Readahead page, never charged */
+	if (!memcg)
+		return;
+
+	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+	VM_BUG_ON_PAGE(oldid, page);
+	mem_cgroup_swap_statistics(memcg, true);
+
+	page->mem_cgroup = NULL;
+
+	if (!mem_cgroup_is_root(memcg))
+		page_counter_uncharge(&memcg->memory, 1);
+
+	/* XXX: caller holds IRQ-safe mapping->tree_lock */
+	VM_BUG_ON(!irqs_disabled());
+
+	mem_cgroup_charge_statistics(memcg, page, -1);
+	memcg_check_events(memcg, page);
+}
+
+/**
+ * mem_cgroup_uncharge_swap - uncharge a swap entry
+ * @entry: swap entry to uncharge
+ *
+ * Drop the memsw charge associated with @entry.
+ */
+void mem_cgroup_uncharge_swap(swp_entry_t entry)
+{
+	struct mem_cgroup *memcg;
+	unsigned short id;
+
+	if (!do_swap_account)
+		return;
+
+	id = swap_cgroup_record(entry, 0);
+	rcu_read_lock();
+	memcg = mem_cgroup_lookup(id);
+	if (memcg) {
+		if (!mem_cgroup_is_root(memcg))
+			page_counter_uncharge(&memcg->memsw, 1);
+		mem_cgroup_swap_statistics(memcg, false);
+		css_put(&memcg->css);
+	}
+	rcu_read_unlock();
+}
+
+/* for remember boot option*/
+#ifdef CONFIG_MEMCG_SWAP_ENABLED
+static int really_do_swap_account __initdata = 1;
+#else
+static int really_do_swap_account __initdata;
+#endif
+
+static int __init enable_swap_account(char *s)
+{
+	if (!strcmp(s, "1"))
+		really_do_swap_account = 1;
+	else if (!strcmp(s, "0"))
+		really_do_swap_account = 0;
+	return 1;
+}
+__setup("swapaccount=", enable_swap_account);
+
+static struct cftype memsw_cgroup_files[] = {
+	{
+		.name = "memsw.usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.max_usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
+		.write = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.limit_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
+		.write = mem_cgroup_write,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.failcnt",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
+		.write = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{ },	/* terminate */
+};
+
+static int __init mem_cgroup_swap_init(void)
+{
+	if (!mem_cgroup_disabled() && really_do_swap_account) {
+		do_swap_account = 1;
+		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
+						  memsw_cgroup_files));
+	}
+	return 0;
+}
+subsys_initcall(mem_cgroup_swap_init);
+
+#endif /* CONFIG_MEMCG_SWAP */
-- 
2.2.0

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [patch 3/3] mm: memcontrol: consolidate swap controller code
       [not found] ` <1420856041-27647-1-git-send-email-hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>
  2015-01-10  2:14   ` Johannes Weiner
@ 2015-01-10  2:14   ` Johannes Weiner
  1 sibling, 0 replies; 25+ messages in thread
From: Johannes Weiner @ 2015-01-10  2:14 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vladimir Davydov, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	cgroups-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA

The swap controller code is scattered all over the file.  Gather all
the code that isn't directly needed by the memory controller at the
end of the file in its own CONFIG_MEMCG_SWAP section.

Signed-off-by: Johannes Weiner <hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>
---
 mm/memcontrol.c | 264 +++++++++++++++++++++++++++-----------------------------
 1 file changed, 125 insertions(+), 139 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f66bb8f83ac9..5a5769e8b12c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -72,22 +72,13 @@ EXPORT_SYMBOL(memory_cgrp_subsys);
 #define MEM_CGROUP_RECLAIM_RETRIES	5
 static struct mem_cgroup *root_mem_cgroup __read_mostly;
 
+/* Whether the swap controller is active */
 #ifdef CONFIG_MEMCG_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
 int do_swap_account __read_mostly;
-
-/* for remember boot option*/
-#ifdef CONFIG_MEMCG_SWAP_ENABLED
-static int really_do_swap_account __initdata = 1;
-#else
-static int really_do_swap_account __initdata;
-#endif
-
 #else
 #define do_swap_account		0
 #endif
 
-
 static const char * const mem_cgroup_stat_names[] = {
 	"cache",
 	"rss",
@@ -4382,34 +4373,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
 	{ },	/* terminate */
 };
 
-#ifdef CONFIG_MEMCG_SWAP
-static struct cftype memsw_cgroup_files[] = {
-	{
-		.name = "memsw.usage_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.max_usage_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
-		.write = mem_cgroup_reset,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.limit_in_bytes",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
-		.write = mem_cgroup_write,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{
-		.name = "memsw.failcnt",
-		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
-		.write = mem_cgroup_reset,
-		.read_u64 = mem_cgroup_read_u64,
-	},
-	{ },	/* terminate */
-};
-#endif
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
 	struct mem_cgroup_per_node *pn;
@@ -5415,37 +5378,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
 	.early_init = 0,
 };
 
-#ifdef CONFIG_MEMCG_SWAP
-static int __init enable_swap_account(char *s)
-{
-	if (!strcmp(s, "1"))
-		really_do_swap_account = 1;
-	else if (!strcmp(s, "0"))
-		really_do_swap_account = 0;
-	return 1;
-}
-__setup("swapaccount=", enable_swap_account);
-
-static void __init memsw_file_init(void)
-{
-	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
-					  memsw_cgroup_files));
-}
-
-static void __init enable_swap_cgroup(void)
-{
-	if (!mem_cgroup_disabled() && really_do_swap_account) {
-		do_swap_account = 1;
-		memsw_file_init();
-	}
-}
-
-#else
-static void __init enable_swap_cgroup(void)
-{
-}
-#endif
-
 /**
  * mem_cgroup_events - count memory events against a cgroup
  * @memcg: the memory cgroup
@@ -5496,74 +5428,6 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
 	return true;
 }
 
-#ifdef CONFIG_MEMCG_SWAP
-/**
- * mem_cgroup_swapout - transfer a memsw charge to swap
- * @page: page whose memsw charge to transfer
- * @entry: swap entry to move the charge to
- *
- * Transfer the memsw charge of @page to @entry.
- */
-void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
-{
-	struct mem_cgroup *memcg;
-	unsigned short oldid;
-
-	VM_BUG_ON_PAGE(PageLRU(page), page);
-	VM_BUG_ON_PAGE(page_count(page), page);
-
-	if (!do_swap_account)
-		return;
-
-	memcg = page->mem_cgroup;
-
-	/* Readahead page, never charged */
-	if (!memcg)
-		return;
-
-	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
-	VM_BUG_ON_PAGE(oldid, page);
-	mem_cgroup_swap_statistics(memcg, true);
-
-	page->mem_cgroup = NULL;
-
-	if (!mem_cgroup_is_root(memcg))
-		page_counter_uncharge(&memcg->memory, 1);
-
-	/* XXX: caller holds IRQ-safe mapping->tree_lock */
-	VM_BUG_ON(!irqs_disabled());
-
-	mem_cgroup_charge_statistics(memcg, page, -1);
-	memcg_check_events(memcg, page);
-}
-
-/**
- * mem_cgroup_uncharge_swap - uncharge a swap entry
- * @entry: swap entry to uncharge
- *
- * Drop the memsw charge associated with @entry.
- */
-void mem_cgroup_uncharge_swap(swp_entry_t entry)
-{
-	struct mem_cgroup *memcg;
-	unsigned short id;
-
-	if (!do_swap_account)
-		return;
-
-	id = swap_cgroup_record(entry, 0);
-	rcu_read_lock();
-	memcg = mem_cgroup_lookup(id);
-	if (memcg) {
-		if (!mem_cgroup_is_root(memcg))
-			page_counter_uncharge(&memcg->memsw, 1);
-		mem_cgroup_swap_statistics(memcg, false);
-		css_put(&memcg->css);
-	}
-	rcu_read_unlock();
-}
-#endif
-
 /**
  * mem_cgroup_try_charge - try charging a page
  * @page: page to charge
@@ -5920,8 +5784,130 @@ static int __init mem_cgroup_init(void)
 		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
 	}
 
-	enable_swap_cgroup();
-
 	return 0;
 }
 subsys_initcall(mem_cgroup_init);
+
+#ifdef CONFIG_MEMCG_SWAP
+/**
+ * mem_cgroup_swapout - transfer a memsw charge to swap
+ * @page: page whose memsw charge to transfer
+ * @entry: swap entry to move the charge to
+ *
+ * Transfer the memsw charge of @page to @entry.
+ */
+void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+{
+	struct mem_cgroup *memcg;
+	unsigned short oldid;
+
+	VM_BUG_ON_PAGE(PageLRU(page), page);
+	VM_BUG_ON_PAGE(page_count(page), page);
+
+	if (!do_swap_account)
+		return;
+
+	memcg = page->mem_cgroup;
+
+	/* Readahead page, never charged */
+	if (!memcg)
+		return;
+
+	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+	VM_BUG_ON_PAGE(oldid, page);
+	mem_cgroup_swap_statistics(memcg, true);
+
+	page->mem_cgroup = NULL;
+
+	if (!mem_cgroup_is_root(memcg))
+		page_counter_uncharge(&memcg->memory, 1);
+
+	/* XXX: caller holds IRQ-safe mapping->tree_lock */
+	VM_BUG_ON(!irqs_disabled());
+
+	mem_cgroup_charge_statistics(memcg, page, -1);
+	memcg_check_events(memcg, page);
+}
+
+/**
+ * mem_cgroup_uncharge_swap - uncharge a swap entry
+ * @entry: swap entry to uncharge
+ *
+ * Drop the memsw charge associated with @entry.
+ */
+void mem_cgroup_uncharge_swap(swp_entry_t entry)
+{
+	struct mem_cgroup *memcg;
+	unsigned short id;
+
+	if (!do_swap_account)
+		return;
+
+	id = swap_cgroup_record(entry, 0);
+	rcu_read_lock();
+	memcg = mem_cgroup_lookup(id);
+	if (memcg) {
+		if (!mem_cgroup_is_root(memcg))
+			page_counter_uncharge(&memcg->memsw, 1);
+		mem_cgroup_swap_statistics(memcg, false);
+		css_put(&memcg->css);
+	}
+	rcu_read_unlock();
+}
+
+/* for remember boot option*/
+#ifdef CONFIG_MEMCG_SWAP_ENABLED
+static int really_do_swap_account __initdata = 1;
+#else
+static int really_do_swap_account __initdata;
+#endif
+
+static int __init enable_swap_account(char *s)
+{
+	if (!strcmp(s, "1"))
+		really_do_swap_account = 1;
+	else if (!strcmp(s, "0"))
+		really_do_swap_account = 0;
+	return 1;
+}
+__setup("swapaccount=", enable_swap_account);
+
+static struct cftype memsw_cgroup_files[] = {
+	{
+		.name = "memsw.usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.max_usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
+		.write = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.limit_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
+		.write = mem_cgroup_write,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{
+		.name = "memsw.failcnt",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
+		.write = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read_u64,
+	},
+	{ },	/* terminate */
+};
+
+static int __init mem_cgroup_swap_init(void)
+{
+	if (!mem_cgroup_disabled() && really_do_swap_account) {
+		do_swap_account = 1;
+		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
+						  memsw_cgroup_files));
+	}
+	return 0;
+}
+subsys_initcall(mem_cgroup_swap_init);
+
+#endif /* CONFIG_MEMCG_SWAP */
-- 
2.2.0

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: [patch 1/3] mm: memcontrol: remove unnecessary soft limit tree node test
  2015-01-10  2:13 ` Johannes Weiner
  (?)
@ 2015-01-12 11:11   ` Vladimir Davydov
  -1 siblings, 0 replies; 25+ messages in thread
From: Vladimir Davydov @ 2015-01-12 11:11 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel

On Fri, Jan 09, 2015 at 09:13:59PM -0500, Johannes Weiner wrote:
> kzalloc_node() automatically falls back to nodes with suitable memory.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 1/3] mm: memcontrol: remove unnecessary soft limit tree node test
@ 2015-01-12 11:11   ` Vladimir Davydov
  0 siblings, 0 replies; 25+ messages in thread
From: Vladimir Davydov @ 2015-01-12 11:11 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel

On Fri, Jan 09, 2015 at 09:13:59PM -0500, Johannes Weiner wrote:
> kzalloc_node() automatically falls back to nodes with suitable memory.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 1/3] mm: memcontrol: remove unnecessary soft limit tree node test
@ 2015-01-12 11:11   ` Vladimir Davydov
  0 siblings, 0 replies; 25+ messages in thread
From: Vladimir Davydov @ 2015-01-12 11:11 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	cgroups-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA

On Fri, Jan 09, 2015 at 09:13:59PM -0500, Johannes Weiner wrote:
> kzalloc_node() automatically falls back to nodes with suitable memory.
> 
> Signed-off-by: Johannes Weiner <hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>

Reviewed-by: Vladimir Davydov <vdavydov-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 2/3] mm: memcontrol: consolidate memory controller initialization
  2015-01-10  2:14   ` Johannes Weiner
@ 2015-01-12 11:14     ` Vladimir Davydov
  -1 siblings, 0 replies; 25+ messages in thread
From: Vladimir Davydov @ 2015-01-12 11:14 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel

On Fri, Jan 09, 2015 at 09:14:00PM -0500, Johannes Weiner wrote:
> The initialization code for the per-cpu charge stock and the soft
> limit tree is compact enough to inline it into mem_cgroup_init().
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> ---
>  mm/memcontrol.c | 57 ++++++++++++++++++++++++---------------------------------
>  1 file changed, 24 insertions(+), 33 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index aad254b30708..f66bb8f83ac9 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
[...]
> @@ -5927,10 +5896,32 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
>   */
>  static int __init mem_cgroup_init(void)
>  {
> +	int cpu, nid;
> +
>  	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
> +
> +	for_each_possible_cpu(cpu)
> +		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
> +			  drain_local_stock);
> +
> +	for_each_node(nid) {
> +		struct mem_cgroup_tree_per_node *rtpn;
> +		int zone;
> +
> +		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, nid);

I'd like to see BUG_ON(!rtpn) here, just for clarity. Not critical
though.

Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>

> +
> +		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
> +			struct mem_cgroup_tree_per_zone *rtpz;
> +
> +			rtpz = &rtpn->rb_tree_per_zone[zone];
> +			rtpz->rb_root = RB_ROOT;
> +			spin_lock_init(&rtpz->lock);
> +		}
> +		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
> +	}
> +
>  	enable_swap_cgroup();
> -	mem_cgroup_soft_limit_tree_init();
> -	memcg_stock_init();
> +
>  	return 0;
>  }
>  subsys_initcall(mem_cgroup_init);
> -- 
> 2.2.0
> 

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 2/3] mm: memcontrol: consolidate memory controller initialization
@ 2015-01-12 11:14     ` Vladimir Davydov
  0 siblings, 0 replies; 25+ messages in thread
From: Vladimir Davydov @ 2015-01-12 11:14 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel

On Fri, Jan 09, 2015 at 09:14:00PM -0500, Johannes Weiner wrote:
> The initialization code for the per-cpu charge stock and the soft
> limit tree is compact enough to inline it into mem_cgroup_init().
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> ---
>  mm/memcontrol.c | 57 ++++++++++++++++++++++++---------------------------------
>  1 file changed, 24 insertions(+), 33 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index aad254b30708..f66bb8f83ac9 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
[...]
> @@ -5927,10 +5896,32 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
>   */
>  static int __init mem_cgroup_init(void)
>  {
> +	int cpu, nid;
> +
>  	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
> +
> +	for_each_possible_cpu(cpu)
> +		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
> +			  drain_local_stock);
> +
> +	for_each_node(nid) {
> +		struct mem_cgroup_tree_per_node *rtpn;
> +		int zone;
> +
> +		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, nid);

I'd like to see BUG_ON(!rtpn) here, just for clarity. Not critical
though.

Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>

> +
> +		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
> +			struct mem_cgroup_tree_per_zone *rtpz;
> +
> +			rtpz = &rtpn->rb_tree_per_zone[zone];
> +			rtpz->rb_root = RB_ROOT;
> +			spin_lock_init(&rtpz->lock);
> +		}
> +		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
> +	}
> +
>  	enable_swap_cgroup();
> -	mem_cgroup_soft_limit_tree_init();
> -	memcg_stock_init();
> +
>  	return 0;
>  }
>  subsys_initcall(mem_cgroup_init);
> -- 
> 2.2.0
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 3/3] mm: memcontrol: consolidate swap controller code
  2015-01-10  2:14   ` Johannes Weiner
  (?)
@ 2015-01-12 11:22     ` Vladimir Davydov
  -1 siblings, 0 replies; 25+ messages in thread
From: Vladimir Davydov @ 2015-01-12 11:22 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel

On Fri, Jan 09, 2015 at 09:14:01PM -0500, Johannes Weiner wrote:
> The swap controller code is scattered all over the file.  Gather all
> the code that isn't directly needed by the memory controller at the
> end of the file in its own CONFIG_MEMCG_SWAP section.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

I was always wondering why it had to be scattered all over the place. I
guess we'll have to do the same for the kmem part.

Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 3/3] mm: memcontrol: consolidate swap controller code
@ 2015-01-12 11:22     ` Vladimir Davydov
  0 siblings, 0 replies; 25+ messages in thread
From: Vladimir Davydov @ 2015-01-12 11:22 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm, cgroups, linux-kernel

On Fri, Jan 09, 2015 at 09:14:01PM -0500, Johannes Weiner wrote:
> The swap controller code is scattered all over the file.  Gather all
> the code that isn't directly needed by the memory controller at the
> end of the file in its own CONFIG_MEMCG_SWAP section.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

I was always wondering why it had to be scattered all over the place. I
guess we'll have to do the same for the kmem part.

Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 3/3] mm: memcontrol: consolidate swap controller code
@ 2015-01-12 11:22     ` Vladimir Davydov
  0 siblings, 0 replies; 25+ messages in thread
From: Vladimir Davydov @ 2015-01-12 11:22 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Michal Hocko, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	cgroups-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA

On Fri, Jan 09, 2015 at 09:14:01PM -0500, Johannes Weiner wrote:
> The swap controller code is scattered all over the file.  Gather all
> the code that isn't directly needed by the memory controller at the
> end of the file in its own CONFIG_MEMCG_SWAP section.
> 
> Signed-off-by: Johannes Weiner <hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>

I was always wondering why it had to be scattered all over the place. I
guess we'll have to do the same for the kmem part.

Reviewed-by: Vladimir Davydov <vdavydov-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 1/3] mm: memcontrol: remove unnecessary soft limit tree node test
  2015-01-10  2:13 ` Johannes Weiner
@ 2015-01-13 14:50   ` Michal Hocko
  -1 siblings, 0 replies; 25+ messages in thread
From: Michal Hocko @ 2015-01-13 14:50 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel

On Fri 09-01-15 21:13:59, Johannes Weiner wrote:
> kzalloc_node() automatically falls back to nodes with suitable memory.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Michal Hocko <mhocko@suse.cz>

> ---
>  mm/memcontrol.c | 7 ++-----
>  1 file changed, 2 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index fd9e542fc26f..aad254b30708 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -4520,13 +4520,10 @@ static void __init mem_cgroup_soft_limit_tree_init(void)
>  {
>  	struct mem_cgroup_tree_per_node *rtpn;
>  	struct mem_cgroup_tree_per_zone *rtpz;
> -	int tmp, node, zone;
> +	int node, zone;
>  
>  	for_each_node(node) {
> -		tmp = node;
> -		if (!node_state(node, N_NORMAL_MEMORY))
> -			tmp = -1;
> -		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
> +		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
>  		BUG_ON(!rtpn);
>  
>  		soft_limit_tree.rb_tree_per_node[node] = rtpn;
> -- 
> 2.2.0
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 1/3] mm: memcontrol: remove unnecessary soft limit tree node test
@ 2015-01-13 14:50   ` Michal Hocko
  0 siblings, 0 replies; 25+ messages in thread
From: Michal Hocko @ 2015-01-13 14:50 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel

On Fri 09-01-15 21:13:59, Johannes Weiner wrote:
> kzalloc_node() automatically falls back to nodes with suitable memory.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Michal Hocko <mhocko@suse.cz>

> ---
>  mm/memcontrol.c | 7 ++-----
>  1 file changed, 2 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index fd9e542fc26f..aad254b30708 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -4520,13 +4520,10 @@ static void __init mem_cgroup_soft_limit_tree_init(void)
>  {
>  	struct mem_cgroup_tree_per_node *rtpn;
>  	struct mem_cgroup_tree_per_zone *rtpz;
> -	int tmp, node, zone;
> +	int node, zone;
>  
>  	for_each_node(node) {
> -		tmp = node;
> -		if (!node_state(node, N_NORMAL_MEMORY))
> -			tmp = -1;
> -		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
> +		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
>  		BUG_ON(!rtpn);
>  
>  		soft_limit_tree.rb_tree_per_node[node] = rtpn;
> -- 
> 2.2.0
> 

-- 
Michal Hocko
SUSE Labs

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 2/3] mm: memcontrol: consolidate memory controller initialization
  2015-01-10  2:14   ` Johannes Weiner
@ 2015-01-13 14:53     ` Michal Hocko
  -1 siblings, 0 replies; 25+ messages in thread
From: Michal Hocko @ 2015-01-13 14:53 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel

On Fri 09-01-15 21:14:00, Johannes Weiner wrote:
> The initialization code for the per-cpu charge stock and the soft
> limit tree is compact enough to inline it into mem_cgroup_init().
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Michal Hocko <mhocko@suse.cz>

> ---
>  mm/memcontrol.c | 57 ++++++++++++++++++++++++---------------------------------
>  1 file changed, 24 insertions(+), 33 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index aad254b30708..f66bb8f83ac9 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2137,17 +2137,6 @@ static void drain_local_stock(struct work_struct *dummy)
>  	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
>  }
>  
> -static void __init memcg_stock_init(void)
> -{
> -	int cpu;
> -
> -	for_each_possible_cpu(cpu) {
> -		struct memcg_stock_pcp *stock =
> -					&per_cpu(memcg_stock, cpu);
> -		INIT_WORK(&stock->work, drain_local_stock);
> -	}
> -}
> -
>  /*
>   * Cache charges(val) to local per_cpu area.
>   * This will be consumed by consume_stock() function, later.
> @@ -4516,26 +4505,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
>  }
>  EXPORT_SYMBOL(parent_mem_cgroup);
>  
> -static void __init mem_cgroup_soft_limit_tree_init(void)
> -{
> -	struct mem_cgroup_tree_per_node *rtpn;
> -	struct mem_cgroup_tree_per_zone *rtpz;
> -	int node, zone;
> -
> -	for_each_node(node) {
> -		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
> -		BUG_ON(!rtpn);
> -
> -		soft_limit_tree.rb_tree_per_node[node] = rtpn;
> -
> -		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
> -			rtpz = &rtpn->rb_tree_per_zone[zone];
> -			rtpz->rb_root = RB_ROOT;
> -			spin_lock_init(&rtpz->lock);
> -		}
> -	}
> -}
> -
>  static struct cgroup_subsys_state * __ref
>  mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
>  {
> @@ -5927,10 +5896,32 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
>   */
>  static int __init mem_cgroup_init(void)
>  {
> +	int cpu, nid;
> +
>  	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
> +
> +	for_each_possible_cpu(cpu)
> +		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
> +			  drain_local_stock);
> +
> +	for_each_node(nid) {
> +		struct mem_cgroup_tree_per_node *rtpn;
> +		int zone;
> +
> +		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, nid);
> +
> +		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
> +			struct mem_cgroup_tree_per_zone *rtpz;
> +
> +			rtpz = &rtpn->rb_tree_per_zone[zone];
> +			rtpz->rb_root = RB_ROOT;
> +			spin_lock_init(&rtpz->lock);
> +		}
> +		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
> +	}
> +
>  	enable_swap_cgroup();
> -	mem_cgroup_soft_limit_tree_init();
> -	memcg_stock_init();
> +
>  	return 0;
>  }
>  subsys_initcall(mem_cgroup_init);
> -- 
> 2.2.0
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 2/3] mm: memcontrol: consolidate memory controller initialization
@ 2015-01-13 14:53     ` Michal Hocko
  0 siblings, 0 replies; 25+ messages in thread
From: Michal Hocko @ 2015-01-13 14:53 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel

On Fri 09-01-15 21:14:00, Johannes Weiner wrote:
> The initialization code for the per-cpu charge stock and the soft
> limit tree is compact enough to inline it into mem_cgroup_init().
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Michal Hocko <mhocko@suse.cz>

> ---
>  mm/memcontrol.c | 57 ++++++++++++++++++++++++---------------------------------
>  1 file changed, 24 insertions(+), 33 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index aad254b30708..f66bb8f83ac9 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2137,17 +2137,6 @@ static void drain_local_stock(struct work_struct *dummy)
>  	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
>  }
>  
> -static void __init memcg_stock_init(void)
> -{
> -	int cpu;
> -
> -	for_each_possible_cpu(cpu) {
> -		struct memcg_stock_pcp *stock =
> -					&per_cpu(memcg_stock, cpu);
> -		INIT_WORK(&stock->work, drain_local_stock);
> -	}
> -}
> -
>  /*
>   * Cache charges(val) to local per_cpu area.
>   * This will be consumed by consume_stock() function, later.
> @@ -4516,26 +4505,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
>  }
>  EXPORT_SYMBOL(parent_mem_cgroup);
>  
> -static void __init mem_cgroup_soft_limit_tree_init(void)
> -{
> -	struct mem_cgroup_tree_per_node *rtpn;
> -	struct mem_cgroup_tree_per_zone *rtpz;
> -	int node, zone;
> -
> -	for_each_node(node) {
> -		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
> -		BUG_ON(!rtpn);
> -
> -		soft_limit_tree.rb_tree_per_node[node] = rtpn;
> -
> -		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
> -			rtpz = &rtpn->rb_tree_per_zone[zone];
> -			rtpz->rb_root = RB_ROOT;
> -			spin_lock_init(&rtpz->lock);
> -		}
> -	}
> -}
> -
>  static struct cgroup_subsys_state * __ref
>  mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
>  {
> @@ -5927,10 +5896,32 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
>   */
>  static int __init mem_cgroup_init(void)
>  {
> +	int cpu, nid;
> +
>  	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
> +
> +	for_each_possible_cpu(cpu)
> +		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
> +			  drain_local_stock);
> +
> +	for_each_node(nid) {
> +		struct mem_cgroup_tree_per_node *rtpn;
> +		int zone;
> +
> +		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, nid);
> +
> +		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
> +			struct mem_cgroup_tree_per_zone *rtpz;
> +
> +			rtpz = &rtpn->rb_tree_per_zone[zone];
> +			rtpz->rb_root = RB_ROOT;
> +			spin_lock_init(&rtpz->lock);
> +		}
> +		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
> +	}
> +
>  	enable_swap_cgroup();
> -	mem_cgroup_soft_limit_tree_init();
> -	memcg_stock_init();
> +
>  	return 0;
>  }
>  subsys_initcall(mem_cgroup_init);
> -- 
> 2.2.0
> 

-- 
Michal Hocko
SUSE Labs

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 3/3] mm: memcontrol: consolidate swap controller code
  2015-01-10  2:14   ` Johannes Weiner
  (?)
@ 2015-01-13 15:01     ` Michal Hocko
  -1 siblings, 0 replies; 25+ messages in thread
From: Michal Hocko @ 2015-01-13 15:01 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel

On Fri 09-01-15 21:14:01, Johannes Weiner wrote:
> The swap controller code is scattered all over the file.  Gather all
> the code that isn't directly needed by the memory controller at the
> end of the file in its own CONFIG_MEMCG_SWAP section.

Well, the idea was to stick with corresponding infrastructure I guess.
memsw_cgroup_files where together with mem_cgroup_files, swap accounting
with the charge routines. Putting everything together is certainly
an option as well. I do not feel strongly about either way. I tend
to dislike code churn but if it makes further changes easier then
definitely no objections from me.

> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> ---
>  mm/memcontrol.c | 264 +++++++++++++++++++++++++++-----------------------------
>  1 file changed, 125 insertions(+), 139 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index f66bb8f83ac9..5a5769e8b12c 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -72,22 +72,13 @@ EXPORT_SYMBOL(memory_cgrp_subsys);
>  #define MEM_CGROUP_RECLAIM_RETRIES	5
>  static struct mem_cgroup *root_mem_cgroup __read_mostly;
>  
> +/* Whether the swap controller is active */
>  #ifdef CONFIG_MEMCG_SWAP
> -/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
>  int do_swap_account __read_mostly;
> -
> -/* for remember boot option*/
> -#ifdef CONFIG_MEMCG_SWAP_ENABLED
> -static int really_do_swap_account __initdata = 1;
> -#else
> -static int really_do_swap_account __initdata;
> -#endif
> -
>  #else
>  #define do_swap_account		0
>  #endif
>  
> -
>  static const char * const mem_cgroup_stat_names[] = {
>  	"cache",
>  	"rss",
> @@ -4382,34 +4373,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
>  	{ },	/* terminate */
>  };
>  
> -#ifdef CONFIG_MEMCG_SWAP
> -static struct cftype memsw_cgroup_files[] = {
> -	{
> -		.name = "memsw.usage_in_bytes",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{
> -		.name = "memsw.max_usage_in_bytes",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
> -		.write = mem_cgroup_reset,
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{
> -		.name = "memsw.limit_in_bytes",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
> -		.write = mem_cgroup_write,
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{
> -		.name = "memsw.failcnt",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
> -		.write = mem_cgroup_reset,
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{ },	/* terminate */
> -};
> -#endif
>  static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
>  {
>  	struct mem_cgroup_per_node *pn;
> @@ -5415,37 +5378,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
>  	.early_init = 0,
>  };
>  
> -#ifdef CONFIG_MEMCG_SWAP
> -static int __init enable_swap_account(char *s)
> -{
> -	if (!strcmp(s, "1"))
> -		really_do_swap_account = 1;
> -	else if (!strcmp(s, "0"))
> -		really_do_swap_account = 0;
> -	return 1;
> -}
> -__setup("swapaccount=", enable_swap_account);
> -
> -static void __init memsw_file_init(void)
> -{
> -	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
> -					  memsw_cgroup_files));
> -}
> -
> -static void __init enable_swap_cgroup(void)
> -{
> -	if (!mem_cgroup_disabled() && really_do_swap_account) {
> -		do_swap_account = 1;
> -		memsw_file_init();
> -	}
> -}
> -
> -#else
> -static void __init enable_swap_cgroup(void)
> -{
> -}
> -#endif
> -
>  /**
>   * mem_cgroup_events - count memory events against a cgroup
>   * @memcg: the memory cgroup
> @@ -5496,74 +5428,6 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
>  	return true;
>  }
>  
> -#ifdef CONFIG_MEMCG_SWAP
> -/**
> - * mem_cgroup_swapout - transfer a memsw charge to swap
> - * @page: page whose memsw charge to transfer
> - * @entry: swap entry to move the charge to
> - *
> - * Transfer the memsw charge of @page to @entry.
> - */
> -void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
> -{
> -	struct mem_cgroup *memcg;
> -	unsigned short oldid;
> -
> -	VM_BUG_ON_PAGE(PageLRU(page), page);
> -	VM_BUG_ON_PAGE(page_count(page), page);
> -
> -	if (!do_swap_account)
> -		return;
> -
> -	memcg = page->mem_cgroup;
> -
> -	/* Readahead page, never charged */
> -	if (!memcg)
> -		return;
> -
> -	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
> -	VM_BUG_ON_PAGE(oldid, page);
> -	mem_cgroup_swap_statistics(memcg, true);
> -
> -	page->mem_cgroup = NULL;
> -
> -	if (!mem_cgroup_is_root(memcg))
> -		page_counter_uncharge(&memcg->memory, 1);
> -
> -	/* XXX: caller holds IRQ-safe mapping->tree_lock */
> -	VM_BUG_ON(!irqs_disabled());
> -
> -	mem_cgroup_charge_statistics(memcg, page, -1);
> -	memcg_check_events(memcg, page);
> -}
> -
> -/**
> - * mem_cgroup_uncharge_swap - uncharge a swap entry
> - * @entry: swap entry to uncharge
> - *
> - * Drop the memsw charge associated with @entry.
> - */
> -void mem_cgroup_uncharge_swap(swp_entry_t entry)
> -{
> -	struct mem_cgroup *memcg;
> -	unsigned short id;
> -
> -	if (!do_swap_account)
> -		return;
> -
> -	id = swap_cgroup_record(entry, 0);
> -	rcu_read_lock();
> -	memcg = mem_cgroup_lookup(id);
> -	if (memcg) {
> -		if (!mem_cgroup_is_root(memcg))
> -			page_counter_uncharge(&memcg->memsw, 1);
> -		mem_cgroup_swap_statistics(memcg, false);
> -		css_put(&memcg->css);
> -	}
> -	rcu_read_unlock();
> -}
> -#endif
> -
>  /**
>   * mem_cgroup_try_charge - try charging a page
>   * @page: page to charge
> @@ -5920,8 +5784,130 @@ static int __init mem_cgroup_init(void)
>  		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
>  	}
>  
> -	enable_swap_cgroup();
> -
>  	return 0;
>  }
>  subsys_initcall(mem_cgroup_init);
> +
> +#ifdef CONFIG_MEMCG_SWAP
> +/**
> + * mem_cgroup_swapout - transfer a memsw charge to swap
> + * @page: page whose memsw charge to transfer
> + * @entry: swap entry to move the charge to
> + *
> + * Transfer the memsw charge of @page to @entry.
> + */
> +void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
> +{
> +	struct mem_cgroup *memcg;
> +	unsigned short oldid;
> +
> +	VM_BUG_ON_PAGE(PageLRU(page), page);
> +	VM_BUG_ON_PAGE(page_count(page), page);
> +
> +	if (!do_swap_account)
> +		return;
> +
> +	memcg = page->mem_cgroup;
> +
> +	/* Readahead page, never charged */
> +	if (!memcg)
> +		return;
> +
> +	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
> +	VM_BUG_ON_PAGE(oldid, page);
> +	mem_cgroup_swap_statistics(memcg, true);
> +
> +	page->mem_cgroup = NULL;
> +
> +	if (!mem_cgroup_is_root(memcg))
> +		page_counter_uncharge(&memcg->memory, 1);
> +
> +	/* XXX: caller holds IRQ-safe mapping->tree_lock */
> +	VM_BUG_ON(!irqs_disabled());
> +
> +	mem_cgroup_charge_statistics(memcg, page, -1);
> +	memcg_check_events(memcg, page);
> +}
> +
> +/**
> + * mem_cgroup_uncharge_swap - uncharge a swap entry
> + * @entry: swap entry to uncharge
> + *
> + * Drop the memsw charge associated with @entry.
> + */
> +void mem_cgroup_uncharge_swap(swp_entry_t entry)
> +{
> +	struct mem_cgroup *memcg;
> +	unsigned short id;
> +
> +	if (!do_swap_account)
> +		return;
> +
> +	id = swap_cgroup_record(entry, 0);
> +	rcu_read_lock();
> +	memcg = mem_cgroup_lookup(id);
> +	if (memcg) {
> +		if (!mem_cgroup_is_root(memcg))
> +			page_counter_uncharge(&memcg->memsw, 1);
> +		mem_cgroup_swap_statistics(memcg, false);
> +		css_put(&memcg->css);
> +	}
> +	rcu_read_unlock();
> +}
> +
> +/* for remember boot option*/
> +#ifdef CONFIG_MEMCG_SWAP_ENABLED
> +static int really_do_swap_account __initdata = 1;
> +#else
> +static int really_do_swap_account __initdata;
> +#endif
> +
> +static int __init enable_swap_account(char *s)
> +{
> +	if (!strcmp(s, "1"))
> +		really_do_swap_account = 1;
> +	else if (!strcmp(s, "0"))
> +		really_do_swap_account = 0;
> +	return 1;
> +}
> +__setup("swapaccount=", enable_swap_account);
> +
> +static struct cftype memsw_cgroup_files[] = {
> +	{
> +		.name = "memsw.usage_in_bytes",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{
> +		.name = "memsw.max_usage_in_bytes",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
> +		.write = mem_cgroup_reset,
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{
> +		.name = "memsw.limit_in_bytes",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
> +		.write = mem_cgroup_write,
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{
> +		.name = "memsw.failcnt",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
> +		.write = mem_cgroup_reset,
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{ },	/* terminate */
> +};
> +
> +static int __init mem_cgroup_swap_init(void)
> +{
> +	if (!mem_cgroup_disabled() && really_do_swap_account) {
> +		do_swap_account = 1;
> +		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
> +						  memsw_cgroup_files));
> +	}
> +	return 0;
> +}
> +subsys_initcall(mem_cgroup_swap_init);
> +
> +#endif /* CONFIG_MEMCG_SWAP */
> -- 
> 2.2.0
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 3/3] mm: memcontrol: consolidate swap controller code
@ 2015-01-13 15:01     ` Michal Hocko
  0 siblings, 0 replies; 25+ messages in thread
From: Michal Hocko @ 2015-01-13 15:01 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm, cgroups, linux-kernel

On Fri 09-01-15 21:14:01, Johannes Weiner wrote:
> The swap controller code is scattered all over the file.  Gather all
> the code that isn't directly needed by the memory controller at the
> end of the file in its own CONFIG_MEMCG_SWAP section.

Well, the idea was to stick with corresponding infrastructure I guess.
memsw_cgroup_files where together with mem_cgroup_files, swap accounting
with the charge routines. Putting everything together is certainly
an option as well. I do not feel strongly about either way. I tend
to dislike code churn but if it makes further changes easier then
definitely no objections from me.

> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> ---
>  mm/memcontrol.c | 264 +++++++++++++++++++++++++++-----------------------------
>  1 file changed, 125 insertions(+), 139 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index f66bb8f83ac9..5a5769e8b12c 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -72,22 +72,13 @@ EXPORT_SYMBOL(memory_cgrp_subsys);
>  #define MEM_CGROUP_RECLAIM_RETRIES	5
>  static struct mem_cgroup *root_mem_cgroup __read_mostly;
>  
> +/* Whether the swap controller is active */
>  #ifdef CONFIG_MEMCG_SWAP
> -/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
>  int do_swap_account __read_mostly;
> -
> -/* for remember boot option*/
> -#ifdef CONFIG_MEMCG_SWAP_ENABLED
> -static int really_do_swap_account __initdata = 1;
> -#else
> -static int really_do_swap_account __initdata;
> -#endif
> -
>  #else
>  #define do_swap_account		0
>  #endif
>  
> -
>  static const char * const mem_cgroup_stat_names[] = {
>  	"cache",
>  	"rss",
> @@ -4382,34 +4373,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
>  	{ },	/* terminate */
>  };
>  
> -#ifdef CONFIG_MEMCG_SWAP
> -static struct cftype memsw_cgroup_files[] = {
> -	{
> -		.name = "memsw.usage_in_bytes",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{
> -		.name = "memsw.max_usage_in_bytes",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
> -		.write = mem_cgroup_reset,
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{
> -		.name = "memsw.limit_in_bytes",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
> -		.write = mem_cgroup_write,
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{
> -		.name = "memsw.failcnt",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
> -		.write = mem_cgroup_reset,
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{ },	/* terminate */
> -};
> -#endif
>  static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
>  {
>  	struct mem_cgroup_per_node *pn;
> @@ -5415,37 +5378,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
>  	.early_init = 0,
>  };
>  
> -#ifdef CONFIG_MEMCG_SWAP
> -static int __init enable_swap_account(char *s)
> -{
> -	if (!strcmp(s, "1"))
> -		really_do_swap_account = 1;
> -	else if (!strcmp(s, "0"))
> -		really_do_swap_account = 0;
> -	return 1;
> -}
> -__setup("swapaccount=", enable_swap_account);
> -
> -static void __init memsw_file_init(void)
> -{
> -	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
> -					  memsw_cgroup_files));
> -}
> -
> -static void __init enable_swap_cgroup(void)
> -{
> -	if (!mem_cgroup_disabled() && really_do_swap_account) {
> -		do_swap_account = 1;
> -		memsw_file_init();
> -	}
> -}
> -
> -#else
> -static void __init enable_swap_cgroup(void)
> -{
> -}
> -#endif
> -
>  /**
>   * mem_cgroup_events - count memory events against a cgroup
>   * @memcg: the memory cgroup
> @@ -5496,74 +5428,6 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
>  	return true;
>  }
>  
> -#ifdef CONFIG_MEMCG_SWAP
> -/**
> - * mem_cgroup_swapout - transfer a memsw charge to swap
> - * @page: page whose memsw charge to transfer
> - * @entry: swap entry to move the charge to
> - *
> - * Transfer the memsw charge of @page to @entry.
> - */
> -void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
> -{
> -	struct mem_cgroup *memcg;
> -	unsigned short oldid;
> -
> -	VM_BUG_ON_PAGE(PageLRU(page), page);
> -	VM_BUG_ON_PAGE(page_count(page), page);
> -
> -	if (!do_swap_account)
> -		return;
> -
> -	memcg = page->mem_cgroup;
> -
> -	/* Readahead page, never charged */
> -	if (!memcg)
> -		return;
> -
> -	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
> -	VM_BUG_ON_PAGE(oldid, page);
> -	mem_cgroup_swap_statistics(memcg, true);
> -
> -	page->mem_cgroup = NULL;
> -
> -	if (!mem_cgroup_is_root(memcg))
> -		page_counter_uncharge(&memcg->memory, 1);
> -
> -	/* XXX: caller holds IRQ-safe mapping->tree_lock */
> -	VM_BUG_ON(!irqs_disabled());
> -
> -	mem_cgroup_charge_statistics(memcg, page, -1);
> -	memcg_check_events(memcg, page);
> -}
> -
> -/**
> - * mem_cgroup_uncharge_swap - uncharge a swap entry
> - * @entry: swap entry to uncharge
> - *
> - * Drop the memsw charge associated with @entry.
> - */
> -void mem_cgroup_uncharge_swap(swp_entry_t entry)
> -{
> -	struct mem_cgroup *memcg;
> -	unsigned short id;
> -
> -	if (!do_swap_account)
> -		return;
> -
> -	id = swap_cgroup_record(entry, 0);
> -	rcu_read_lock();
> -	memcg = mem_cgroup_lookup(id);
> -	if (memcg) {
> -		if (!mem_cgroup_is_root(memcg))
> -			page_counter_uncharge(&memcg->memsw, 1);
> -		mem_cgroup_swap_statistics(memcg, false);
> -		css_put(&memcg->css);
> -	}
> -	rcu_read_unlock();
> -}
> -#endif
> -
>  /**
>   * mem_cgroup_try_charge - try charging a page
>   * @page: page to charge
> @@ -5920,8 +5784,130 @@ static int __init mem_cgroup_init(void)
>  		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
>  	}
>  
> -	enable_swap_cgroup();
> -
>  	return 0;
>  }
>  subsys_initcall(mem_cgroup_init);
> +
> +#ifdef CONFIG_MEMCG_SWAP
> +/**
> + * mem_cgroup_swapout - transfer a memsw charge to swap
> + * @page: page whose memsw charge to transfer
> + * @entry: swap entry to move the charge to
> + *
> + * Transfer the memsw charge of @page to @entry.
> + */
> +void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
> +{
> +	struct mem_cgroup *memcg;
> +	unsigned short oldid;
> +
> +	VM_BUG_ON_PAGE(PageLRU(page), page);
> +	VM_BUG_ON_PAGE(page_count(page), page);
> +
> +	if (!do_swap_account)
> +		return;
> +
> +	memcg = page->mem_cgroup;
> +
> +	/* Readahead page, never charged */
> +	if (!memcg)
> +		return;
> +
> +	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
> +	VM_BUG_ON_PAGE(oldid, page);
> +	mem_cgroup_swap_statistics(memcg, true);
> +
> +	page->mem_cgroup = NULL;
> +
> +	if (!mem_cgroup_is_root(memcg))
> +		page_counter_uncharge(&memcg->memory, 1);
> +
> +	/* XXX: caller holds IRQ-safe mapping->tree_lock */
> +	VM_BUG_ON(!irqs_disabled());
> +
> +	mem_cgroup_charge_statistics(memcg, page, -1);
> +	memcg_check_events(memcg, page);
> +}
> +
> +/**
> + * mem_cgroup_uncharge_swap - uncharge a swap entry
> + * @entry: swap entry to uncharge
> + *
> + * Drop the memsw charge associated with @entry.
> + */
> +void mem_cgroup_uncharge_swap(swp_entry_t entry)
> +{
> +	struct mem_cgroup *memcg;
> +	unsigned short id;
> +
> +	if (!do_swap_account)
> +		return;
> +
> +	id = swap_cgroup_record(entry, 0);
> +	rcu_read_lock();
> +	memcg = mem_cgroup_lookup(id);
> +	if (memcg) {
> +		if (!mem_cgroup_is_root(memcg))
> +			page_counter_uncharge(&memcg->memsw, 1);
> +		mem_cgroup_swap_statistics(memcg, false);
> +		css_put(&memcg->css);
> +	}
> +	rcu_read_unlock();
> +}
> +
> +/* for remember boot option*/
> +#ifdef CONFIG_MEMCG_SWAP_ENABLED
> +static int really_do_swap_account __initdata = 1;
> +#else
> +static int really_do_swap_account __initdata;
> +#endif
> +
> +static int __init enable_swap_account(char *s)
> +{
> +	if (!strcmp(s, "1"))
> +		really_do_swap_account = 1;
> +	else if (!strcmp(s, "0"))
> +		really_do_swap_account = 0;
> +	return 1;
> +}
> +__setup("swapaccount=", enable_swap_account);
> +
> +static struct cftype memsw_cgroup_files[] = {
> +	{
> +		.name = "memsw.usage_in_bytes",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{
> +		.name = "memsw.max_usage_in_bytes",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
> +		.write = mem_cgroup_reset,
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{
> +		.name = "memsw.limit_in_bytes",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
> +		.write = mem_cgroup_write,
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{
> +		.name = "memsw.failcnt",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
> +		.write = mem_cgroup_reset,
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{ },	/* terminate */
> +};
> +
> +static int __init mem_cgroup_swap_init(void)
> +{
> +	if (!mem_cgroup_disabled() && really_do_swap_account) {
> +		do_swap_account = 1;
> +		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
> +						  memsw_cgroup_files));
> +	}
> +	return 0;
> +}
> +subsys_initcall(mem_cgroup_swap_init);
> +
> +#endif /* CONFIG_MEMCG_SWAP */
> -- 
> 2.2.0
> 

-- 
Michal Hocko
SUSE Labs

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [patch 3/3] mm: memcontrol: consolidate swap controller code
@ 2015-01-13 15:01     ` Michal Hocko
  0 siblings, 0 replies; 25+ messages in thread
From: Michal Hocko @ 2015-01-13 15:01 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: Andrew Morton, Vladimir Davydov, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	cgroups-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA

On Fri 09-01-15 21:14:01, Johannes Weiner wrote:
> The swap controller code is scattered all over the file.  Gather all
> the code that isn't directly needed by the memory controller at the
> end of the file in its own CONFIG_MEMCG_SWAP section.

Well, the idea was to stick with corresponding infrastructure I guess.
memsw_cgroup_files where together with mem_cgroup_files, swap accounting
with the charge routines. Putting everything together is certainly
an option as well. I do not feel strongly about either way. I tend
to dislike code churn but if it makes further changes easier then
definitely no objections from me.

> Signed-off-by: Johannes Weiner <hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>
> ---
>  mm/memcontrol.c | 264 +++++++++++++++++++++++++++-----------------------------
>  1 file changed, 125 insertions(+), 139 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index f66bb8f83ac9..5a5769e8b12c 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -72,22 +72,13 @@ EXPORT_SYMBOL(memory_cgrp_subsys);
>  #define MEM_CGROUP_RECLAIM_RETRIES	5
>  static struct mem_cgroup *root_mem_cgroup __read_mostly;
>  
> +/* Whether the swap controller is active */
>  #ifdef CONFIG_MEMCG_SWAP
> -/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
>  int do_swap_account __read_mostly;
> -
> -/* for remember boot option*/
> -#ifdef CONFIG_MEMCG_SWAP_ENABLED
> -static int really_do_swap_account __initdata = 1;
> -#else
> -static int really_do_swap_account __initdata;
> -#endif
> -
>  #else
>  #define do_swap_account		0
>  #endif
>  
> -
>  static const char * const mem_cgroup_stat_names[] = {
>  	"cache",
>  	"rss",
> @@ -4382,34 +4373,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
>  	{ },	/* terminate */
>  };
>  
> -#ifdef CONFIG_MEMCG_SWAP
> -static struct cftype memsw_cgroup_files[] = {
> -	{
> -		.name = "memsw.usage_in_bytes",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{
> -		.name = "memsw.max_usage_in_bytes",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
> -		.write = mem_cgroup_reset,
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{
> -		.name = "memsw.limit_in_bytes",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
> -		.write = mem_cgroup_write,
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{
> -		.name = "memsw.failcnt",
> -		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
> -		.write = mem_cgroup_reset,
> -		.read_u64 = mem_cgroup_read_u64,
> -	},
> -	{ },	/* terminate */
> -};
> -#endif
>  static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
>  {
>  	struct mem_cgroup_per_node *pn;
> @@ -5415,37 +5378,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
>  	.early_init = 0,
>  };
>  
> -#ifdef CONFIG_MEMCG_SWAP
> -static int __init enable_swap_account(char *s)
> -{
> -	if (!strcmp(s, "1"))
> -		really_do_swap_account = 1;
> -	else if (!strcmp(s, "0"))
> -		really_do_swap_account = 0;
> -	return 1;
> -}
> -__setup("swapaccount=", enable_swap_account);
> -
> -static void __init memsw_file_init(void)
> -{
> -	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
> -					  memsw_cgroup_files));
> -}
> -
> -static void __init enable_swap_cgroup(void)
> -{
> -	if (!mem_cgroup_disabled() && really_do_swap_account) {
> -		do_swap_account = 1;
> -		memsw_file_init();
> -	}
> -}
> -
> -#else
> -static void __init enable_swap_cgroup(void)
> -{
> -}
> -#endif
> -
>  /**
>   * mem_cgroup_events - count memory events against a cgroup
>   * @memcg: the memory cgroup
> @@ -5496,74 +5428,6 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
>  	return true;
>  }
>  
> -#ifdef CONFIG_MEMCG_SWAP
> -/**
> - * mem_cgroup_swapout - transfer a memsw charge to swap
> - * @page: page whose memsw charge to transfer
> - * @entry: swap entry to move the charge to
> - *
> - * Transfer the memsw charge of @page to @entry.
> - */
> -void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
> -{
> -	struct mem_cgroup *memcg;
> -	unsigned short oldid;
> -
> -	VM_BUG_ON_PAGE(PageLRU(page), page);
> -	VM_BUG_ON_PAGE(page_count(page), page);
> -
> -	if (!do_swap_account)
> -		return;
> -
> -	memcg = page->mem_cgroup;
> -
> -	/* Readahead page, never charged */
> -	if (!memcg)
> -		return;
> -
> -	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
> -	VM_BUG_ON_PAGE(oldid, page);
> -	mem_cgroup_swap_statistics(memcg, true);
> -
> -	page->mem_cgroup = NULL;
> -
> -	if (!mem_cgroup_is_root(memcg))
> -		page_counter_uncharge(&memcg->memory, 1);
> -
> -	/* XXX: caller holds IRQ-safe mapping->tree_lock */
> -	VM_BUG_ON(!irqs_disabled());
> -
> -	mem_cgroup_charge_statistics(memcg, page, -1);
> -	memcg_check_events(memcg, page);
> -}
> -
> -/**
> - * mem_cgroup_uncharge_swap - uncharge a swap entry
> - * @entry: swap entry to uncharge
> - *
> - * Drop the memsw charge associated with @entry.
> - */
> -void mem_cgroup_uncharge_swap(swp_entry_t entry)
> -{
> -	struct mem_cgroup *memcg;
> -	unsigned short id;
> -
> -	if (!do_swap_account)
> -		return;
> -
> -	id = swap_cgroup_record(entry, 0);
> -	rcu_read_lock();
> -	memcg = mem_cgroup_lookup(id);
> -	if (memcg) {
> -		if (!mem_cgroup_is_root(memcg))
> -			page_counter_uncharge(&memcg->memsw, 1);
> -		mem_cgroup_swap_statistics(memcg, false);
> -		css_put(&memcg->css);
> -	}
> -	rcu_read_unlock();
> -}
> -#endif
> -
>  /**
>   * mem_cgroup_try_charge - try charging a page
>   * @page: page to charge
> @@ -5920,8 +5784,130 @@ static int __init mem_cgroup_init(void)
>  		soft_limit_tree.rb_tree_per_node[nid] = rtpn;
>  	}
>  
> -	enable_swap_cgroup();
> -
>  	return 0;
>  }
>  subsys_initcall(mem_cgroup_init);
> +
> +#ifdef CONFIG_MEMCG_SWAP
> +/**
> + * mem_cgroup_swapout - transfer a memsw charge to swap
> + * @page: page whose memsw charge to transfer
> + * @entry: swap entry to move the charge to
> + *
> + * Transfer the memsw charge of @page to @entry.
> + */
> +void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
> +{
> +	struct mem_cgroup *memcg;
> +	unsigned short oldid;
> +
> +	VM_BUG_ON_PAGE(PageLRU(page), page);
> +	VM_BUG_ON_PAGE(page_count(page), page);
> +
> +	if (!do_swap_account)
> +		return;
> +
> +	memcg = page->mem_cgroup;
> +
> +	/* Readahead page, never charged */
> +	if (!memcg)
> +		return;
> +
> +	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
> +	VM_BUG_ON_PAGE(oldid, page);
> +	mem_cgroup_swap_statistics(memcg, true);
> +
> +	page->mem_cgroup = NULL;
> +
> +	if (!mem_cgroup_is_root(memcg))
> +		page_counter_uncharge(&memcg->memory, 1);
> +
> +	/* XXX: caller holds IRQ-safe mapping->tree_lock */
> +	VM_BUG_ON(!irqs_disabled());
> +
> +	mem_cgroup_charge_statistics(memcg, page, -1);
> +	memcg_check_events(memcg, page);
> +}
> +
> +/**
> + * mem_cgroup_uncharge_swap - uncharge a swap entry
> + * @entry: swap entry to uncharge
> + *
> + * Drop the memsw charge associated with @entry.
> + */
> +void mem_cgroup_uncharge_swap(swp_entry_t entry)
> +{
> +	struct mem_cgroup *memcg;
> +	unsigned short id;
> +
> +	if (!do_swap_account)
> +		return;
> +
> +	id = swap_cgroup_record(entry, 0);
> +	rcu_read_lock();
> +	memcg = mem_cgroup_lookup(id);
> +	if (memcg) {
> +		if (!mem_cgroup_is_root(memcg))
> +			page_counter_uncharge(&memcg->memsw, 1);
> +		mem_cgroup_swap_statistics(memcg, false);
> +		css_put(&memcg->css);
> +	}
> +	rcu_read_unlock();
> +}
> +
> +/* for remember boot option*/
> +#ifdef CONFIG_MEMCG_SWAP_ENABLED
> +static int really_do_swap_account __initdata = 1;
> +#else
> +static int really_do_swap_account __initdata;
> +#endif
> +
> +static int __init enable_swap_account(char *s)
> +{
> +	if (!strcmp(s, "1"))
> +		really_do_swap_account = 1;
> +	else if (!strcmp(s, "0"))
> +		really_do_swap_account = 0;
> +	return 1;
> +}
> +__setup("swapaccount=", enable_swap_account);
> +
> +static struct cftype memsw_cgroup_files[] = {
> +	{
> +		.name = "memsw.usage_in_bytes",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{
> +		.name = "memsw.max_usage_in_bytes",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
> +		.write = mem_cgroup_reset,
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{
> +		.name = "memsw.limit_in_bytes",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
> +		.write = mem_cgroup_write,
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{
> +		.name = "memsw.failcnt",
> +		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
> +		.write = mem_cgroup_reset,
> +		.read_u64 = mem_cgroup_read_u64,
> +	},
> +	{ },	/* terminate */
> +};
> +
> +static int __init mem_cgroup_swap_init(void)
> +{
> +	if (!mem_cgroup_disabled() && really_do_swap_account) {
> +		do_swap_account = 1;
> +		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
> +						  memsw_cgroup_files));
> +	}
> +	return 0;
> +}
> +subsys_initcall(mem_cgroup_swap_init);
> +
> +#endif /* CONFIG_MEMCG_SWAP */
> -- 
> 2.2.0
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2015-01-13 15:01 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-10  2:13 [patch 1/3] mm: memcontrol: remove unnecessary soft limit tree node test Johannes Weiner
2015-01-10  2:13 ` Johannes Weiner
2015-01-10  2:14 ` [patch 2/3] mm: memcontrol: consolidate memory controller initialization Johannes Weiner
2015-01-10  2:14 ` Johannes Weiner
2015-01-10  2:14   ` Johannes Weiner
2015-01-12 11:14   ` Vladimir Davydov
2015-01-12 11:14     ` Vladimir Davydov
2015-01-13 14:53   ` Michal Hocko
2015-01-13 14:53     ` Michal Hocko
     [not found] ` <1420856041-27647-1-git-send-email-hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>
2015-01-10  2:14   ` Johannes Weiner
2015-01-10  2:14   ` [patch 3/3] mm: memcontrol: consolidate swap controller code Johannes Weiner
2015-01-10  2:14 ` Johannes Weiner
2015-01-10  2:14 ` Johannes Weiner
2015-01-10  2:14   ` Johannes Weiner
2015-01-12 11:22   ` Vladimir Davydov
2015-01-12 11:22     ` Vladimir Davydov
2015-01-12 11:22     ` Vladimir Davydov
2015-01-13 15:01   ` Michal Hocko
2015-01-13 15:01     ` Michal Hocko
2015-01-13 15:01     ` Michal Hocko
2015-01-12 11:11 ` [patch 1/3] mm: memcontrol: remove unnecessary soft limit tree node test Vladimir Davydov
2015-01-12 11:11   ` Vladimir Davydov
2015-01-12 11:11   ` Vladimir Davydov
2015-01-13 14:50 ` Michal Hocko
2015-01-13 14:50   ` Michal Hocko

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.