linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vladimir Davydov <vdavydov@parallels.com>
To: <akpm@linux-foundation.org>
Cc: <cl@linux.com>, <iamjoonsoo.kim@lge.com>, <rientjes@google.com>,
	<penberg@kernel.org>, <hannes@cmpxchg.org>, <mhocko@suse.cz>,
	<linux-kernel@vger.kernel.org>, <linux-mm@kvack.org>
Subject: [PATCH -mm v3 2/8] memcg: destroy kmem caches when last slab is freed
Date: Fri, 13 Jun 2014 00:38:16 +0400	[thread overview]
Message-ID: <6884cccbbf6382c91a9ce2d26f922b2102af441f.1402602126.git.vdavydov@parallels.com> (raw)
In-Reply-To: <cover.1402602126.git.vdavydov@parallels.com>

When the memcg_cache_params->refcnt goes to 0, schedule the worker that
will unregister the cache. To prevent this from happening when the owner
memcg is alive, keep the refcnt incremented during memcg lifetime.

Note, this doesn't guarantee that the cache that belongs to a dead memcg
will go away as soon as the last object is freed, because SL[AU]B
implementation can cache empty slabs for performance reasons. Hence the
cache may be hanging around indefinitely after memcg offline. This is to
be resolved by the next patches.

Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Christoph Lameter <cl@linux.com>
---
 include/linux/slab.h |    2 ++
 mm/memcontrol.c      |   22 ++++++++++++++++++++--
 2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1985bd9bec7d..d9716fdc8211 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -527,6 +527,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  * @list: list_head for the list of all caches in this memcg
  * @root_cache: pointer to the global, root cache, this cache was derived from
  * @refcnt: reference counter
+ * @unregister_work: worker to destroy the cache
  */
 struct memcg_cache_params {
 	bool is_root_cache;
@@ -540,6 +541,7 @@ struct memcg_cache_params {
 			struct list_head list;
 			struct kmem_cache *root_cache;
 			atomic_long_t refcnt;
+			struct work_struct unregister_work;
 		};
 	};
 };
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 98a24e5ea4b5..886b5b414958 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3114,6 +3114,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
 	return 0;
 }
 
+static void memcg_unregister_cache_func(struct work_struct *work);
+
 int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
 			     struct kmem_cache *root_cache)
 {
@@ -3135,6 +3137,9 @@ int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
 	if (memcg) {
 		s->memcg_params->memcg = memcg;
 		s->memcg_params->root_cache = root_cache;
+		atomic_long_set(&s->memcg_params->refcnt, 1);
+		INIT_WORK(&s->memcg_params->unregister_work,
+			  memcg_unregister_cache_func);
 		css_get(&memcg->css);
 	} else
 		s->memcg_params->is_root_cache = true;
@@ -3216,6 +3221,17 @@ static void memcg_unregister_cache(struct kmem_cache *cachep)
 	kmem_cache_destroy(cachep);
 }
 
+static void memcg_unregister_cache_func(struct work_struct *work)
+{
+	struct memcg_cache_params *params =
+		container_of(work, struct memcg_cache_params, unregister_work);
+	struct kmem_cache *cachep = memcg_params_to_cache(params);
+
+	mutex_lock(&memcg_slab_mutex);
+	memcg_unregister_cache(cachep);
+	mutex_unlock(&memcg_slab_mutex);
+}
+
 /*
  * During the creation a new cache, we need to disable our accounting mechanism
  * altogether. This is true even if we are not creating, but rather just
@@ -3279,7 +3295,7 @@ static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
 	list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
 		cachep = memcg_params_to_cache(params);
 		kmem_cache_shrink(cachep);
-		if (atomic_long_read(&cachep->memcg_params->refcnt) == 0)
+		if (atomic_long_dec_and_test(&cachep->memcg_params->refcnt))
 			memcg_unregister_cache(cachep);
 	}
 	mutex_unlock(&memcg_slab_mutex);
@@ -3360,7 +3376,9 @@ int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
 void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
 {
 	memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
-	atomic_long_dec(&cachep->memcg_params->refcnt);
+
+	if (unlikely(atomic_long_dec_and_test(&cachep->memcg_params->refcnt)))
+		schedule_work(&cachep->memcg_params->unregister_work);
 }
 
 /*
-- 
1.7.10.4


  parent reply	other threads:[~2014-06-12 20:42 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-12 20:38 [PATCH -mm v3 0/8] memcg/slab: reintroduce dead cache self-destruction Vladimir Davydov
2014-06-12 20:38 ` [PATCH -mm v3 1/8] memcg: cleanup memcg_cache_params refcnt usage Vladimir Davydov
2014-06-12 20:38 ` Vladimir Davydov [this message]
2014-06-12 20:38 ` [PATCH -mm v3 3/8] memcg: mark caches that belong to offline memcgs as dead Vladimir Davydov
2014-06-12 20:38 ` [PATCH -mm v3 4/8] slub: don't fail kmem_cache_shrink if slab placement optimization fails Vladimir Davydov
2014-06-12 20:38 ` [PATCH -mm v3 5/8] slub: make slab_free non-preemptable Vladimir Davydov
2014-06-12 20:38 ` [PATCH -mm v3 6/8] memcg: wait for kfree's to finish before destroying cache Vladimir Davydov
2014-06-12 20:38 ` [PATCH -mm v3 7/8] slub: make dead memcg caches discard free slabs immediately Vladimir Davydov
2014-06-13 16:54   ` Christoph Lameter
2014-06-24  7:50   ` Joonsoo Kim
2014-06-24  8:25     ` Vladimir Davydov
2014-06-24  9:42     ` [PATCH -mm] slub: kmem_cache_shrink: check if partial list is empty under list_lock Vladimir Davydov
2014-06-12 20:38 ` [PATCH -mm v3 8/8] slab: do not keep free objects/slabs on dead memcg caches Vladimir Davydov
2014-06-12 20:41   ` Vladimir Davydov
2014-06-24  7:25   ` Joonsoo Kim
2014-06-24  7:42     ` Vladimir Davydov
2014-06-24 12:28     ` [PATCH -mm] slab: set free_limit for dead caches to 0 Vladimir Davydov
2014-06-24  7:38   ` [PATCH -mm v3 8/8] slab: do not keep free objects/slabs on dead memcg caches Joonsoo Kim
2014-06-24  7:48     ` Vladimir Davydov
2014-06-25 13:45     ` Vladimir Davydov
2014-06-27  6:05       ` Joonsoo Kim
2014-06-30 15:49         ` Christoph Lameter
2014-07-01  7:46           ` Vladimir Davydov
2014-06-25 14:39     ` [PATCH] slab: document why cache can have no per cpu array on kfree Vladimir Davydov
2014-06-25 16:19       ` Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6884cccbbf6382c91a9ce2d26f922b2102af441f.1402602126.git.vdavydov@parallels.com \
    --to=vdavydov@parallels.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=hannes@cmpxchg.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.cz \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).