From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751538AbdANFzY (ORCPT ); Sat, 14 Jan 2017 00:55:24 -0500 Received: from mail-pg0-f65.google.com ([74.125.83.65]:35846 "EHLO mail-pg0-f65.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751406AbdANFzK (ORCPT ); Sat, 14 Jan 2017 00:55:10 -0500 From: Tejun Heo To: vdavydov.dev@gmail.com, cl@linux.com, penberg@kernel.org, rientjes@google.com, iamjoonsoo.kim@lge.com, akpm@linux-foundation.org Cc: jsvana@fb.com, hannes@cmpxchg.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, cgroups@vger.kernel.org, kernel-team@fb.com, Tejun Heo Subject: [PATCH 7/9] slab: introduce __kmemcg_cache_deactivate() Date: Sat, 14 Jan 2017 00:54:47 -0500 Message-Id: <20170114055449.11044-8-tj@kernel.org> X-Mailer: git-send-email 2.9.3 In-Reply-To: <20170114055449.11044-1-tj@kernel.org> References: <20170114055449.11044-1-tj@kernel.org> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org __kmem_cache_shrink() is called with %true @deactivate only for memcg caches. Remove @deactivate from __kmem_cache_shrink() and introduce __kmemcg_cache_deactivate() instead. Each memcg-supporting allocator should implement it and it should deactivate and drain the cache. This is to allow memcg cache deactivation behavior to further deviate from simple shrinking without messing up __kmem_cache_shrink(). This is pure reorganization and doesn't introduce any observable behavior changes. Signed-off-by: Tejun Heo Cc: Vladimir Davydov Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton --- mm/slab.c | 11 +++++++++-- mm/slab.h | 5 ++++- mm/slab_common.c | 4 ++-- mm/slob.c | 2 +- mm/slub.c | 39 ++++++++++++++++++++++----------------- 5 files changed, 38 insertions(+), 23 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 767e8e4..65814f2 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2314,7 +2314,7 @@ static int drain_freelist(struct kmem_cache *cache, return nr_freed; } -int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) +int __kmem_cache_shrink(struct kmem_cache *cachep) { int ret = 0; int node; @@ -2332,9 +2332,16 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) return (ret ? 1 : 0); } +#ifdef CONFIG_MEMCG +void __kmemcg_cache_deactivate(struct kmem_cache *cachep) +{ + __kmem_cache_shrink(cachep); +} +#endif + int __kmem_cache_shutdown(struct kmem_cache *cachep) { - return __kmem_cache_shrink(cachep, false); + return __kmem_cache_shrink(cachep); } void __kmem_cache_release(struct kmem_cache *cachep) diff --git a/mm/slab.h b/mm/slab.h index 8f47a44..73ed6b5 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -164,7 +164,10 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, int __kmem_cache_shutdown(struct kmem_cache *); void __kmem_cache_release(struct kmem_cache *); -int __kmem_cache_shrink(struct kmem_cache *, bool); +int __kmem_cache_shrink(struct kmem_cache *); +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) +void __kmemcg_cache_deactivate(struct kmem_cache *s); +#endif void slab_kmem_cache_release(struct kmem_cache *); struct seq_file; diff --git a/mm/slab_common.c b/mm/slab_common.c index c0d0126..87e5535 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -602,7 +602,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) if (!c) continue; - __kmem_cache_shrink(c, true); + __kmemcg_cache_deactivate(c); arr->entries[idx] = NULL; } mutex_unlock(&slab_mutex); @@ -727,7 +727,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) get_online_cpus(); get_online_mems(); kasan_cache_shrink(cachep); - ret = __kmem_cache_shrink(cachep, false); + ret = __kmem_cache_shrink(cachep); put_online_mems(); put_online_cpus(); return ret; diff --git a/mm/slob.c b/mm/slob.c index 5ec1580..eac04d4 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c) { } -int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) +int __kmem_cache_shrink(struct kmem_cache *d) { return 0; } diff --git a/mm/slub.c b/mm/slub.c index a26cb90..ef89a07 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3886,7 +3886,7 @@ EXPORT_SYMBOL(kfree); * being allocated from last increasing the chance that the last objects * are freed in them. */ -int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) +int __kmem_cache_shrink(struct kmem_cache *s) { int node; int i; @@ -3898,21 +3898,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) unsigned long flags; int ret = 0; - if (deactivate) { - /* - * Disable empty slabs caching. Used to avoid pinning offline - * memory cgroups by kmem pages that can be freed. - */ - s->cpu_partial = 0; - s->min_partial = 0; - - /* - * s->cpu_partial is checked locklessly (see put_cpu_partial), - * so we have to make sure the change is visible. - */ - synchronize_sched(); - } - flush_all(s); for_each_kmem_cache_node(s, node, n) { INIT_LIST_HEAD(&discard); @@ -3963,13 +3948,33 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) return ret; } +#ifdef CONFIG_MEMCG +void __kmemcg_cache_deactivate(struct kmem_cache *s) +{ + /* + * Disable empty slabs caching. Used to avoid pinning offline + * memory cgroups by kmem pages that can be freed. + */ + s->cpu_partial = 0; + s->min_partial = 0; + + /* + * s->cpu_partial is checked locklessly (see put_cpu_partial), so + * we have to make sure the change is visible. + */ + synchronize_sched(); + + __kmem_cache_shrink(s); +} +#endif + static int slab_mem_going_offline_callback(void *arg) { struct kmem_cache *s; mutex_lock(&slab_mutex); list_for_each_entry(s, &slab_caches, list) - __kmem_cache_shrink(s, false); + __kmem_cache_shrink(s); mutex_unlock(&slab_mutex); return 0; -- 2.9.3 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Tejun Heo Subject: [PATCH 7/9] slab: introduce __kmemcg_cache_deactivate() Date: Sat, 14 Jan 2017 00:54:47 -0500 Message-ID: <20170114055449.11044-8-tj@kernel.org> References: <20170114055449.11044-1-tj@kernel.org> Return-path: DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=sender:from:to:cc:subject:date:message-id:in-reply-to:references; bh=5N4ZZHjtSTCIFXG7uR28wJcaJMciKq3oO1+c1KDTvEw=; b=aqHi2qT59hELjPkz0IY2PhgOyCxrBSRhTPWjLhSUTvEP7Cx0FZh3e+DHLD/03YLP4Z 8LShuQ+bOvVLdfEruTkhQg685YYkXQD8yZZcPc1kASjlSj7vaFB119eT4OhZOZdWXJZ2 Xr/O1Qu82f3j0sUa1CI4PO0MgA5mmv0laoMobsQzguV1T4kROZV97hb5ZyYxU3ZUUXPj M7d0iDgVE6xqM4jhklIuyL5FrrOiHfnj1lvhIhexKfu+q32ewTr+80FhgPEbyCaZ9ZKQ kQLfzwfVJ6xmEXynuKVCVvDaoy0ud6yQsLX0jMHuI86gUlmKgkVuXtF3aJULghNtJWnS Bg3A== In-Reply-To: <20170114055449.11044-1-tj@kernel.org> Sender: owner-linux-mm@kvack.org List-ID: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: vdavydov.dev@gmail.com, cl@linux.com, penberg@kernel.org, rientjes@google.com, iamjoonsoo.kim@lge.com, akpm@linux-foundation.org Cc: jsvana@fb.com, hannes@cmpxchg.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, cgroups@vger.kernel.org, kernel-team@fb.com, Tejun Heo __kmem_cache_shrink() is called with %true @deactivate only for memcg caches. Remove @deactivate from __kmem_cache_shrink() and introduce __kmemcg_cache_deactivate() instead. Each memcg-supporting allocator should implement it and it should deactivate and drain the cache. This is to allow memcg cache deactivation behavior to further deviate from simple shrinking without messing up __kmem_cache_shrink(). This is pure reorganization and doesn't introduce any observable behavior changes. Signed-off-by: Tejun Heo Cc: Vladimir Davydov Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton --- mm/slab.c | 11 +++++++++-- mm/slab.h | 5 ++++- mm/slab_common.c | 4 ++-- mm/slob.c | 2 +- mm/slub.c | 39 ++++++++++++++++++++++----------------- 5 files changed, 38 insertions(+), 23 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 767e8e4..65814f2 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2314,7 +2314,7 @@ static int drain_freelist(struct kmem_cache *cache, return nr_freed; } -int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) +int __kmem_cache_shrink(struct kmem_cache *cachep) { int ret = 0; int node; @@ -2332,9 +2332,16 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) return (ret ? 1 : 0); } +#ifdef CONFIG_MEMCG +void __kmemcg_cache_deactivate(struct kmem_cache *cachep) +{ + __kmem_cache_shrink(cachep); +} +#endif + int __kmem_cache_shutdown(struct kmem_cache *cachep) { - return __kmem_cache_shrink(cachep, false); + return __kmem_cache_shrink(cachep); } void __kmem_cache_release(struct kmem_cache *cachep) diff --git a/mm/slab.h b/mm/slab.h index 8f47a44..73ed6b5 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -164,7 +164,10 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, int __kmem_cache_shutdown(struct kmem_cache *); void __kmem_cache_release(struct kmem_cache *); -int __kmem_cache_shrink(struct kmem_cache *, bool); +int __kmem_cache_shrink(struct kmem_cache *); +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) +void __kmemcg_cache_deactivate(struct kmem_cache *s); +#endif void slab_kmem_cache_release(struct kmem_cache *); struct seq_file; diff --git a/mm/slab_common.c b/mm/slab_common.c index c0d0126..87e5535 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -602,7 +602,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) if (!c) continue; - __kmem_cache_shrink(c, true); + __kmemcg_cache_deactivate(c); arr->entries[idx] = NULL; } mutex_unlock(&slab_mutex); @@ -727,7 +727,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) get_online_cpus(); get_online_mems(); kasan_cache_shrink(cachep); - ret = __kmem_cache_shrink(cachep, false); + ret = __kmem_cache_shrink(cachep); put_online_mems(); put_online_cpus(); return ret; diff --git a/mm/slob.c b/mm/slob.c index 5ec1580..eac04d4 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c) { } -int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) +int __kmem_cache_shrink(struct kmem_cache *d) { return 0; } diff --git a/mm/slub.c b/mm/slub.c index a26cb90..ef89a07 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3886,7 +3886,7 @@ EXPORT_SYMBOL(kfree); * being allocated from last increasing the chance that the last objects * are freed in them. */ -int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) +int __kmem_cache_shrink(struct kmem_cache *s) { int node; int i; @@ -3898,21 +3898,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) unsigned long flags; int ret = 0; - if (deactivate) { - /* - * Disable empty slabs caching. Used to avoid pinning offline - * memory cgroups by kmem pages that can be freed. - */ - s->cpu_partial = 0; - s->min_partial = 0; - - /* - * s->cpu_partial is checked locklessly (see put_cpu_partial), - * so we have to make sure the change is visible. - */ - synchronize_sched(); - } - flush_all(s); for_each_kmem_cache_node(s, node, n) { INIT_LIST_HEAD(&discard); @@ -3963,13 +3948,33 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) return ret; } +#ifdef CONFIG_MEMCG +void __kmemcg_cache_deactivate(struct kmem_cache *s) +{ + /* + * Disable empty slabs caching. Used to avoid pinning offline + * memory cgroups by kmem pages that can be freed. + */ + s->cpu_partial = 0; + s->min_partial = 0; + + /* + * s->cpu_partial is checked locklessly (see put_cpu_partial), so + * we have to make sure the change is visible. + */ + synchronize_sched(); + + __kmem_cache_shrink(s); +} +#endif + static int slab_mem_going_offline_callback(void *arg) { struct kmem_cache *s; mutex_lock(&slab_mutex); list_for_each_entry(s, &slab_caches, list) - __kmem_cache_shrink(s, false); + __kmem_cache_shrink(s); mutex_unlock(&slab_mutex); return 0; -- 2.9.3 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org