From mboxrd@z Thu Jan 1 00:00:00 1970 From: Glauber Costa Subject: [PATCH v3 1/2] Always free struct memcg through schedule_work() Date: Thu, 26 Apr 2012 18:24:22 -0300 Message-ID: <1335475463-25167-2-git-send-email-glommer@parallels.com> References: <1335475463-25167-1-git-send-email-glommer@parallels.com> Cc: , Li Zefan , Tejun Heo , , , , Glauber Costa , Johannes Weiner , Michal Hocko To: Return-path: Received: from mailhub.sw.ru ([195.214.232.25]:45828 "EHLO relay.sw.ru" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751940Ab2DZV0W (ORCPT ); Thu, 26 Apr 2012 17:26:22 -0400 In-Reply-To: <1335475463-25167-1-git-send-email-glommer@parallels.com> Sender: netdev-owner@vger.kernel.org List-ID: Right now we free struct memcg with kfree right after a rcu grace period, but defer it if we need to use vfree() to get rid of that memory area. We do that by need, because we need vfree to be called in a process context. This patch unifies this behavior, by ensuring that even kfree will happen in a separate thread. The goal is to have a stable place to call the upcoming jump label destruction function outside the realm of the complicated and quite far-reaching cgroup lock (that can't be held when calling neither the cpu_hotplug.lock nor the jump_label_mutex) Signed-off-by: Glauber Costa CC: Tejun Heo CC: Li Zefan CC: Kamezawa Hiroyuki CC: Johannes Weiner CC: Michal Hocko --- mm/memcontrol.c | 24 +++++++++++++----------- 1 files changed, 13 insertions(+), 11 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7832b4d..b0076cc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -245,8 +245,8 @@ struct mem_cgroup { */ struct rcu_head rcu_freeing; /* - * But when using vfree(), that cannot be done at - * interrupt time, so we must then queue the work. + * We also need some space for a worker in deferred freeing. + * By the time we call it, rcu_freeing is not longer in use. */ struct work_struct work_freeing; }; @@ -4826,23 +4826,28 @@ out_free: } /* - * Helpers for freeing a vzalloc()ed mem_cgroup by RCU, + * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU, * but in process context. The work_freeing structure is overlaid * on the rcu_freeing structure, which itself is overlaid on memsw. */ -static void vfree_work(struct work_struct *work) +static void free_work(struct work_struct *work) { struct mem_cgroup *memcg; + int size = sizeof(struct mem_cgroup); memcg = container_of(work, struct mem_cgroup, work_freeing); - vfree(memcg); + if (size < PAGE_SIZE) + kfree(memcg); + else + vfree(memcg); } -static void vfree_rcu(struct rcu_head *rcu_head) + +static void free_rcu(struct rcu_head *rcu_head) { struct mem_cgroup *memcg; memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing); - INIT_WORK(&memcg->work_freeing, vfree_work); + INIT_WORK(&memcg->work_freeing, free_work); schedule_work(&memcg->work_freeing); } @@ -4868,10 +4873,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) free_mem_cgroup_per_zone_info(memcg, node); free_percpu(memcg->stat); - if (sizeof(struct mem_cgroup) < PAGE_SIZE) - kfree_rcu(memcg, rcu_freeing); - else - call_rcu(&memcg->rcu_freeing, vfree_rcu); + call_rcu(&memcg->rcu_freeing, free_rcu); } static void mem_cgroup_get(struct mem_cgroup *memcg) -- 1.7.7.6 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from psmtp.com (na3sys010amx151.postini.com [74.125.245.151]) by kanga.kvack.org (Postfix) with SMTP id 35CC46B007E for ; Thu, 26 Apr 2012 17:26:46 -0400 (EDT) From: Glauber Costa Subject: [PATCH v3 1/2] Always free struct memcg through schedule_work() Date: Thu, 26 Apr 2012 18:24:22 -0300 Message-Id: <1335475463-25167-2-git-send-email-glommer@parallels.com> In-Reply-To: <1335475463-25167-1-git-send-email-glommer@parallels.com> References: <1335475463-25167-1-git-send-email-glommer@parallels.com> Sender: owner-linux-mm@kvack.org List-ID: To: cgroups@vger.kernel.org Cc: netdev@vger.kernel.org, Li Zefan , Tejun Heo , kamezawa.hiroyu@jp.fujitsu.com, linux-mm@kvack.org, devel@openvz.org, Glauber Costa , Johannes Weiner , Michal Hocko Right now we free struct memcg with kfree right after a rcu grace period, but defer it if we need to use vfree() to get rid of that memory area. We do that by need, because we need vfree to be called in a process context. This patch unifies this behavior, by ensuring that even kfree will happen in a separate thread. The goal is to have a stable place to call the upcoming jump label destruction function outside the realm of the complicated and quite far-reaching cgroup lock (that can't be held when calling neither the cpu_hotplug.lock nor the jump_label_mutex) Signed-off-by: Glauber Costa CC: Tejun Heo CC: Li Zefan CC: Kamezawa Hiroyuki CC: Johannes Weiner CC: Michal Hocko --- mm/memcontrol.c | 24 +++++++++++++----------- 1 files changed, 13 insertions(+), 11 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7832b4d..b0076cc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -245,8 +245,8 @@ struct mem_cgroup { */ struct rcu_head rcu_freeing; /* - * But when using vfree(), that cannot be done at - * interrupt time, so we must then queue the work. + * We also need some space for a worker in deferred freeing. + * By the time we call it, rcu_freeing is not longer in use. */ struct work_struct work_freeing; }; @@ -4826,23 +4826,28 @@ out_free: } /* - * Helpers for freeing a vzalloc()ed mem_cgroup by RCU, + * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU, * but in process context. The work_freeing structure is overlaid * on the rcu_freeing structure, which itself is overlaid on memsw. */ -static void vfree_work(struct work_struct *work) +static void free_work(struct work_struct *work) { struct mem_cgroup *memcg; + int size = sizeof(struct mem_cgroup); memcg = container_of(work, struct mem_cgroup, work_freeing); - vfree(memcg); + if (size < PAGE_SIZE) + kfree(memcg); + else + vfree(memcg); } -static void vfree_rcu(struct rcu_head *rcu_head) + +static void free_rcu(struct rcu_head *rcu_head) { struct mem_cgroup *memcg; memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing); - INIT_WORK(&memcg->work_freeing, vfree_work); + INIT_WORK(&memcg->work_freeing, free_work); schedule_work(&memcg->work_freeing); } @@ -4868,10 +4873,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) free_mem_cgroup_per_zone_info(memcg, node); free_percpu(memcg->stat); - if (sizeof(struct mem_cgroup) < PAGE_SIZE) - kfree_rcu(memcg, rcu_freeing); - else - call_rcu(&memcg->rcu_freeing, vfree_rcu); + call_rcu(&memcg->rcu_freeing, free_rcu); } static void mem_cgroup_get(struct mem_cgroup *memcg) -- 1.7.7.6 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: email@kvack.org From mboxrd@z Thu Jan 1 00:00:00 1970 From: Glauber Costa Subject: [PATCH v3 1/2] Always free struct memcg through schedule_work() Date: Thu, 26 Apr 2012 18:24:22 -0300 Message-ID: <1335475463-25167-2-git-send-email-glommer@parallels.com> References: <1335475463-25167-1-git-send-email-glommer@parallels.com> Return-path: In-Reply-To: <1335475463-25167-1-git-send-email-glommer@parallels.com> Sender: netdev-owner@vger.kernel.org List-ID: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: cgroups@vger.kernel.org Cc: netdev@vger.kernel.org, Li Zefan , Tejun Heo , kamezawa.hiroyu@jp.fujitsu.com, linux-mm@kvack.org, devel@openvz.org, Glauber Costa , Johannes Weiner , Michal Hocko Right now we free struct memcg with kfree right after a rcu grace period, but defer it if we need to use vfree() to get rid of that memory area. We do that by need, because we need vfree to be called in a process context. This patch unifies this behavior, by ensuring that even kfree will happen in a separate thread. The goal is to have a stable place to call the upcoming jump label destruction function outside the realm of the complicated and quite far-reaching cgroup lock (that can't be held when calling neither the cpu_hotplug.lock nor the jump_label_mutex) Signed-off-by: Glauber Costa CC: Tejun Heo CC: Li Zefan CC: Kamezawa Hiroyuki CC: Johannes Weiner CC: Michal Hocko --- mm/memcontrol.c | 24 +++++++++++++----------- 1 files changed, 13 insertions(+), 11 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7832b4d..b0076cc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -245,8 +245,8 @@ struct mem_cgroup { */ struct rcu_head rcu_freeing; /* - * But when using vfree(), that cannot be done at - * interrupt time, so we must then queue the work. + * We also need some space for a worker in deferred freeing. + * By the time we call it, rcu_freeing is not longer in use. */ struct work_struct work_freeing; }; @@ -4826,23 +4826,28 @@ out_free: } /* - * Helpers for freeing a vzalloc()ed mem_cgroup by RCU, + * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU, * but in process context. The work_freeing structure is overlaid * on the rcu_freeing structure, which itself is overlaid on memsw. */ -static void vfree_work(struct work_struct *work) +static void free_work(struct work_struct *work) { struct mem_cgroup *memcg; + int size = sizeof(struct mem_cgroup); memcg = container_of(work, struct mem_cgroup, work_freeing); - vfree(memcg); + if (size < PAGE_SIZE) + kfree(memcg); + else + vfree(memcg); } -static void vfree_rcu(struct rcu_head *rcu_head) + +static void free_rcu(struct rcu_head *rcu_head) { struct mem_cgroup *memcg; memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing); - INIT_WORK(&memcg->work_freeing, vfree_work); + INIT_WORK(&memcg->work_freeing, free_work); schedule_work(&memcg->work_freeing); } @@ -4868,10 +4873,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) free_mem_cgroup_per_zone_info(memcg, node); free_percpu(memcg->stat); - if (sizeof(struct mem_cgroup) < PAGE_SIZE) - kfree_rcu(memcg, rcu_freeing); - else - call_rcu(&memcg->rcu_freeing, vfree_rcu); + call_rcu(&memcg->rcu_freeing, free_rcu); } static void mem_cgroup_get(struct mem_cgroup *memcg) -- 1.7.7.6