From mboxrd@z Thu Jan 1 00:00:00 1970 From: Glauber Costa Subject: [PATCH v5 30/31] memcg: reap dead memcgs upon global memory pressure. Date: Thu, 9 May 2013 10:06:47 +0400 Message-ID: <1368079608-5611-31-git-send-email-glommer@openvz.org> References: <1368079608-5611-1-git-send-email-glommer@openvz.org> Cc: Andrew Morton , Mel Gorman , , , Johannes Weiner , Michal Hocko , hughd@google.com, Greg Thelen , , Glauber Costa , Dave Chinner , Rik van Riel To: Return-path: Received: from mailhub.sw.ru ([195.214.232.25]:25151 "EHLO relay.sw.ru" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753120Ab3EIGHi (ORCPT ); Thu, 9 May 2013 02:07:38 -0400 In-Reply-To: <1368079608-5611-1-git-send-email-glommer@openvz.org> Sender: linux-fsdevel-owner@vger.kernel.org List-ID: When we delete kmem-enabled memcgs, they can still be zombieing around for a while. The reason is that the objects may still be alive, and we won't be able to delete them at destruction time. The only entry point for that, though, are the shrinkers. The shrinker interface, however, is not exactly tailored to our needs. It could be a little bit better by using the API Dave Chinner proposed, but it is still not ideal since we aren't really a count-and-scan event, but more a one-off flush-all-you-can event that would have to abuse that somehow. Signed-off-by: Glauber Costa Cc: Dave Chinner Cc: Mel Gorman Cc: Rik van Riel Cc: Johannes Weiner Cc: Michal Hocko Cc: Hugh Dickins Cc: Kamezawa Hiroyuki Cc: Andrew Morton --- mm/memcontrol.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 3 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1ff72f9..fc3a8d5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -319,8 +319,16 @@ struct mem_cgroup { /* thresholds for mem+swap usage. RCU-protected */ struct mem_cgroup_thresholds memsw_thresholds; - /* For oom notifier event fd */ - struct list_head oom_notify; + union { + /* For oom notifier event fd */ + struct list_head oom_notify; + /* + * we can only trigger an oom event if the memcg is alive. + * so we will reuse this field to hook the memcg in the list + * of dead memcgs. + */ + struct list_head dead; + }; /* * Should we move charges of a task when a task is moved into this @@ -383,6 +391,24 @@ static size_t memcg_size(void) static DEFINE_MUTEX(set_limit_mutex); +static LIST_HEAD(dangling_memcgs); +static DEFINE_MUTEX(dangling_memcgs_mutex); + +static inline void memcg_dangling_free(struct mem_cgroup *memcg) +{ + mutex_lock(&dangling_memcgs_mutex); + list_del(&memcg->dead); + mutex_unlock(&dangling_memcgs_mutex); +} + +static inline void memcg_dangling_add(struct mem_cgroup *memcg) +{ + INIT_LIST_HEAD(&memcg->dead); + mutex_lock(&dangling_memcgs_mutex); + list_add(&memcg->dead, &dangling_memcgs); + mutex_unlock(&dangling_memcgs_mutex); +} + /* internal only representation about the status of kmem accounting. */ enum { KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ @@ -6115,6 +6141,41 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, } #ifdef CONFIG_MEMCG_KMEM +static void memcg_vmpressure_shrink_dead(void) +{ + struct memcg_cache_params *params, *tmp; + struct kmem_cache *cachep; + struct mem_cgroup *memcg; + + mutex_lock(&dangling_memcgs_mutex); + list_for_each_entry(memcg, &dangling_memcgs, dead) { + mutex_lock(&memcg->slab_caches_mutex); + /* The element may go away as an indirect result of shrink */ + list_for_each_entry_safe(params, tmp, + &memcg->memcg_slab_caches, list) { + cachep = memcg_params_to_cache(params); + /* + * the cpu_hotplug lock is taken in kmem_cache_create + * outside the slab_caches_mutex manipulation. It will + * be taken by kmem_cache_shrink to flush the cache. + * So we need to drop the lock. It is all right because + * the lock only protects elements moving in and out the + * list. + */ + mutex_unlock(&memcg->slab_caches_mutex); + kmem_cache_shrink(cachep); + mutex_lock(&memcg->slab_caches_mutex); + } + mutex_unlock(&memcg->slab_caches_mutex); + } + mutex_unlock(&dangling_memcgs_mutex); +} + +static void memcg_register_kmem_events(struct cgroup *cont) +{ + vmpressure_register_kernel_event(cont, memcg_vmpressure_shrink_dead); +} + static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { int ret; @@ -6150,6 +6211,10 @@ static void kmem_cgroup_destroy(struct mem_cgroup *memcg) } } #else +static inline void memcg_register_kmem_events(struct cgroup *cont) +{ +} + static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { return 0; @@ -6415,6 +6480,8 @@ static void free_work(struct work_struct *work) struct mem_cgroup *memcg; memcg = container_of(work, struct mem_cgroup, work_freeing); + + memcg_dangling_free(memcg); __mem_cgroup_free(memcg); } @@ -6525,8 +6592,10 @@ mem_cgroup_css_online(struct cgroup *cont) struct mem_cgroup *memcg, *parent; int error = 0; - if (!cont->parent) + if (!cont->parent) { + memcg_register_kmem_events(cont); return 0; + } mutex_lock(&memcg_create_mutex); memcg = mem_cgroup_from_cont(cont); @@ -6609,6 +6678,7 @@ static void mem_cgroup_css_free(struct cgroup *cont) kmem_cgroup_destroy(memcg); + memcg_dangling_add(memcg); mem_cgroup_put(memcg); } -- 1.8.1.4 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from psmtp.com (na3sys010amx140.postini.com [74.125.245.140]) by kanga.kvack.org (Postfix) with SMTP id 0F8BE6B00A0 for ; Thu, 9 May 2013 02:07:47 -0400 (EDT) From: Glauber Costa Subject: [PATCH v5 30/31] memcg: reap dead memcgs upon global memory pressure. Date: Thu, 9 May 2013 10:06:47 +0400 Message-Id: <1368079608-5611-31-git-send-email-glommer@openvz.org> In-Reply-To: <1368079608-5611-1-git-send-email-glommer@openvz.org> References: <1368079608-5611-1-git-send-email-glommer@openvz.org> Sender: owner-linux-mm@kvack.org List-ID: To: linux-mm@kvack.org Cc: Andrew Morton , Mel Gorman , cgroups@vger.kernel.org, kamezawa.hiroyu@jp.fujitsu.com, Johannes Weiner , Michal Hocko , hughd@google.com, Greg Thelen , linux-fsdevel@vger.kernel.org, Glauber Costa , Dave Chinner , Rik van Riel When we delete kmem-enabled memcgs, they can still be zombieing around for a while. The reason is that the objects may still be alive, and we won't be able to delete them at destruction time. The only entry point for that, though, are the shrinkers. The shrinker interface, however, is not exactly tailored to our needs. It could be a little bit better by using the API Dave Chinner proposed, but it is still not ideal since we aren't really a count-and-scan event, but more a one-off flush-all-you-can event that would have to abuse that somehow. Signed-off-by: Glauber Costa Cc: Dave Chinner Cc: Mel Gorman Cc: Rik van Riel Cc: Johannes Weiner Cc: Michal Hocko Cc: Hugh Dickins Cc: Kamezawa Hiroyuki Cc: Andrew Morton --- mm/memcontrol.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 3 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1ff72f9..fc3a8d5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -319,8 +319,16 @@ struct mem_cgroup { /* thresholds for mem+swap usage. RCU-protected */ struct mem_cgroup_thresholds memsw_thresholds; - /* For oom notifier event fd */ - struct list_head oom_notify; + union { + /* For oom notifier event fd */ + struct list_head oom_notify; + /* + * we can only trigger an oom event if the memcg is alive. + * so we will reuse this field to hook the memcg in the list + * of dead memcgs. + */ + struct list_head dead; + }; /* * Should we move charges of a task when a task is moved into this @@ -383,6 +391,24 @@ static size_t memcg_size(void) static DEFINE_MUTEX(set_limit_mutex); +static LIST_HEAD(dangling_memcgs); +static DEFINE_MUTEX(dangling_memcgs_mutex); + +static inline void memcg_dangling_free(struct mem_cgroup *memcg) +{ + mutex_lock(&dangling_memcgs_mutex); + list_del(&memcg->dead); + mutex_unlock(&dangling_memcgs_mutex); +} + +static inline void memcg_dangling_add(struct mem_cgroup *memcg) +{ + INIT_LIST_HEAD(&memcg->dead); + mutex_lock(&dangling_memcgs_mutex); + list_add(&memcg->dead, &dangling_memcgs); + mutex_unlock(&dangling_memcgs_mutex); +} + /* internal only representation about the status of kmem accounting. */ enum { KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ @@ -6115,6 +6141,41 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, } #ifdef CONFIG_MEMCG_KMEM +static void memcg_vmpressure_shrink_dead(void) +{ + struct memcg_cache_params *params, *tmp; + struct kmem_cache *cachep; + struct mem_cgroup *memcg; + + mutex_lock(&dangling_memcgs_mutex); + list_for_each_entry(memcg, &dangling_memcgs, dead) { + mutex_lock(&memcg->slab_caches_mutex); + /* The element may go away as an indirect result of shrink */ + list_for_each_entry_safe(params, tmp, + &memcg->memcg_slab_caches, list) { + cachep = memcg_params_to_cache(params); + /* + * the cpu_hotplug lock is taken in kmem_cache_create + * outside the slab_caches_mutex manipulation. It will + * be taken by kmem_cache_shrink to flush the cache. + * So we need to drop the lock. It is all right because + * the lock only protects elements moving in and out the + * list. + */ + mutex_unlock(&memcg->slab_caches_mutex); + kmem_cache_shrink(cachep); + mutex_lock(&memcg->slab_caches_mutex); + } + mutex_unlock(&memcg->slab_caches_mutex); + } + mutex_unlock(&dangling_memcgs_mutex); +} + +static void memcg_register_kmem_events(struct cgroup *cont) +{ + vmpressure_register_kernel_event(cont, memcg_vmpressure_shrink_dead); +} + static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { int ret; @@ -6150,6 +6211,10 @@ static void kmem_cgroup_destroy(struct mem_cgroup *memcg) } } #else +static inline void memcg_register_kmem_events(struct cgroup *cont) +{ +} + static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { return 0; @@ -6415,6 +6480,8 @@ static void free_work(struct work_struct *work) struct mem_cgroup *memcg; memcg = container_of(work, struct mem_cgroup, work_freeing); + + memcg_dangling_free(memcg); __mem_cgroup_free(memcg); } @@ -6525,8 +6592,10 @@ mem_cgroup_css_online(struct cgroup *cont) struct mem_cgroup *memcg, *parent; int error = 0; - if (!cont->parent) + if (!cont->parent) { + memcg_register_kmem_events(cont); return 0; + } mutex_lock(&memcg_create_mutex); memcg = mem_cgroup_from_cont(cont); @@ -6609,6 +6678,7 @@ static void mem_cgroup_css_free(struct cgroup *cont) kmem_cgroup_destroy(memcg); + memcg_dangling_add(memcg); mem_cgroup_put(memcg); } -- 1.8.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org From mboxrd@z Thu Jan 1 00:00:00 1970 From: Glauber Costa Subject: [PATCH v5 30/31] memcg: reap dead memcgs upon global memory pressure. Date: Thu, 9 May 2013 10:06:47 +0400 Message-ID: <1368079608-5611-31-git-send-email-glommer@openvz.org> References: <1368079608-5611-1-git-send-email-glommer@openvz.org> Return-path: In-Reply-To: <1368079608-5611-1-git-send-email-glommer@openvz.org> Sender: linux-fsdevel-owner@vger.kernel.org List-ID: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: linux-mm@kvack.org Cc: Andrew Morton , Mel Gorman , cgroups@vger.kernel.org, kamezawa.hiroyu@jp.fujitsu.com, Johannes Weiner , Michal Hocko , hughd@google.com, Greg Thelen , linux-fsdevel@vger.kernel.org, Glauber Costa , Dave Chinner , Rik van Riel When we delete kmem-enabled memcgs, they can still be zombieing around for a while. The reason is that the objects may still be alive, and we won't be able to delete them at destruction time. The only entry point for that, though, are the shrinkers. The shrinker interface, however, is not exactly tailored to our needs. It could be a little bit better by using the API Dave Chinner proposed, but it is still not ideal since we aren't really a count-and-scan event, but more a one-off flush-all-you-can event that would have to abuse that somehow. Signed-off-by: Glauber Costa Cc: Dave Chinner Cc: Mel Gorman Cc: Rik van Riel Cc: Johannes Weiner Cc: Michal Hocko Cc: Hugh Dickins Cc: Kamezawa Hiroyuki Cc: Andrew Morton --- mm/memcontrol.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 3 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1ff72f9..fc3a8d5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -319,8 +319,16 @@ struct mem_cgroup { /* thresholds for mem+swap usage. RCU-protected */ struct mem_cgroup_thresholds memsw_thresholds; - /* For oom notifier event fd */ - struct list_head oom_notify; + union { + /* For oom notifier event fd */ + struct list_head oom_notify; + /* + * we can only trigger an oom event if the memcg is alive. + * so we will reuse this field to hook the memcg in the list + * of dead memcgs. + */ + struct list_head dead; + }; /* * Should we move charges of a task when a task is moved into this @@ -383,6 +391,24 @@ static size_t memcg_size(void) static DEFINE_MUTEX(set_limit_mutex); +static LIST_HEAD(dangling_memcgs); +static DEFINE_MUTEX(dangling_memcgs_mutex); + +static inline void memcg_dangling_free(struct mem_cgroup *memcg) +{ + mutex_lock(&dangling_memcgs_mutex); + list_del(&memcg->dead); + mutex_unlock(&dangling_memcgs_mutex); +} + +static inline void memcg_dangling_add(struct mem_cgroup *memcg) +{ + INIT_LIST_HEAD(&memcg->dead); + mutex_lock(&dangling_memcgs_mutex); + list_add(&memcg->dead, &dangling_memcgs); + mutex_unlock(&dangling_memcgs_mutex); +} + /* internal only representation about the status of kmem accounting. */ enum { KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ @@ -6115,6 +6141,41 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, } #ifdef CONFIG_MEMCG_KMEM +static void memcg_vmpressure_shrink_dead(void) +{ + struct memcg_cache_params *params, *tmp; + struct kmem_cache *cachep; + struct mem_cgroup *memcg; + + mutex_lock(&dangling_memcgs_mutex); + list_for_each_entry(memcg, &dangling_memcgs, dead) { + mutex_lock(&memcg->slab_caches_mutex); + /* The element may go away as an indirect result of shrink */ + list_for_each_entry_safe(params, tmp, + &memcg->memcg_slab_caches, list) { + cachep = memcg_params_to_cache(params); + /* + * the cpu_hotplug lock is taken in kmem_cache_create + * outside the slab_caches_mutex manipulation. It will + * be taken by kmem_cache_shrink to flush the cache. + * So we need to drop the lock. It is all right because + * the lock only protects elements moving in and out the + * list. + */ + mutex_unlock(&memcg->slab_caches_mutex); + kmem_cache_shrink(cachep); + mutex_lock(&memcg->slab_caches_mutex); + } + mutex_unlock(&memcg->slab_caches_mutex); + } + mutex_unlock(&dangling_memcgs_mutex); +} + +static void memcg_register_kmem_events(struct cgroup *cont) +{ + vmpressure_register_kernel_event(cont, memcg_vmpressure_shrink_dead); +} + static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { int ret; @@ -6150,6 +6211,10 @@ static void kmem_cgroup_destroy(struct mem_cgroup *memcg) } } #else +static inline void memcg_register_kmem_events(struct cgroup *cont) +{ +} + static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { return 0; @@ -6415,6 +6480,8 @@ static void free_work(struct work_struct *work) struct mem_cgroup *memcg; memcg = container_of(work, struct mem_cgroup, work_freeing); + + memcg_dangling_free(memcg); __mem_cgroup_free(memcg); } @@ -6525,8 +6592,10 @@ mem_cgroup_css_online(struct cgroup *cont) struct mem_cgroup *memcg, *parent; int error = 0; - if (!cont->parent) + if (!cont->parent) { + memcg_register_kmem_events(cont); return 0; + } mutex_lock(&memcg_create_mutex); memcg = mem_cgroup_from_cont(cont); @@ -6609,6 +6678,7 @@ static void mem_cgroup_css_free(struct cgroup *cont) kmem_cgroup_destroy(memcg); + memcg_dangling_add(memcg); mem_cgroup_put(memcg); } -- 1.8.1.4