From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-pl0-f71.google.com (mail-pl0-f71.google.com [209.85.160.71]) by kanga.kvack.org (Postfix) with ESMTP id F1A086B05A4 for ; Fri, 18 May 2018 04:42:26 -0400 (EDT) Received: by mail-pl0-f71.google.com with SMTP id e1-v6so3220041pld.23 for ; Fri, 18 May 2018 01:42:26 -0700 (PDT) Received: from EUR01-VE1-obe.outbound.protection.outlook.com (mail-ve1eur01on0105.outbound.protection.outlook.com. [104.47.1.105]) by mx.google.com with ESMTPS id f91-v6si7113754plb.510.2018.05.18.01.42.25 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Fri, 18 May 2018 01:42:25 -0700 (PDT) Subject: [PATCH v6 03/17] mm: Assign id to every memcg-aware shrinker From: Kirill Tkhai Date: Fri, 18 May 2018 11:42:08 +0300 Message-ID: <152663292827.5308.15476497557507652648.stgit@localhost.localdomain> In-Reply-To: <152663268383.5308.8660992135988724014.stgit@localhost.localdomain> References: <152663268383.5308.8660992135988724014.stgit@localhost.localdomain> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit Sender: owner-linux-mm@kvack.org List-ID: To: akpm@linux-foundation.org, vdavydov.dev@gmail.com, shakeelb@google.com, viro@zeniv.linux.org.uk, hannes@cmpxchg.org, mhocko@kernel.org, ktkhai@virtuozzo.com, tglx@linutronix.de, pombredanne@nexb.com, stummala@codeaurora.org, gregkh@linuxfoundation.org, sfr@canb.auug.org.au, guro@fb.com, mka@chromium.org, penguin-kernel@I-love.SAKURA.ne.jp, chris@chris-wilson.co.uk, longman@redhat.com, minchan@kernel.org, ying.huang@intel.com, mgorman@techsingularity.net, jbacik@fb.com, linux@roeck-us.net, linux-kernel@vger.kernel.org, linux-mm@kvack.org, willy@infradead.org, lirongqing@baidu.com, aryabinin@virtuozzo.com The patch introduces shrinker::id number, which is used to enumerate memcg-aware shrinkers. The number start from 0, and the code tries to maintain it as small as possible. This will be used as to represent a memcg-aware shrinkers in memcg shrinkers map. Since all memcg-aware shrinkers are based on list_lru, which is per-memcg in case of !CONFIG_MEMCG_KMEM only, the new functionality will be under this config option. Signed-off-by: Kirill Tkhai --- include/linux/shrinker.h | 4 +++ mm/vmscan.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 6794490f25b2..7ca9c18cf130 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -66,6 +66,10 @@ struct shrinker { /* These are for internal use */ struct list_head list; +#ifdef CONFIG_MEMCG_KMEM + /* ID in shrinker_idr */ + int id; +#endif /* objs pending delete, per node */ atomic_long_t *nr_deferred; }; diff --git a/mm/vmscan.c b/mm/vmscan.c index 50055d72f294..3de12a9bdf85 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -169,6 +169,48 @@ unsigned long vm_total_pages; static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); +#ifdef CONFIG_MEMCG_KMEM +static DEFINE_IDR(shrinker_idr); + +static int prealloc_memcg_shrinker(struct shrinker *shrinker) +{ + int id, ret; + + shrinker->id = -1; + down_write(&shrinker_rwsem); + ret = id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL); + if (ret < 0) + goto unlock; + shrinker->id = id; + ret = 0; +unlock: + up_write(&shrinker_rwsem); + return ret; +} + +static void unregister_memcg_shrinker(struct shrinker *shrinker) +{ + int id = shrinker->id; + + if (id < 0) + return; + + down_write(&shrinker_rwsem); + idr_remove(&shrinker_idr, id); + up_write(&shrinker_rwsem); + shrinker->id = -1; +} +#else /* CONFIG_MEMCG_KMEM */ +static int prealloc_memcg_shrinker(struct shrinker *shrinker) +{ + return 0; +} + +static void unregister_memcg_shrinker(struct shrinker *shrinker) +{ +} +#endif /* CONFIG_MEMCG_KMEM */ + #ifdef CONFIG_MEMCG static bool global_reclaim(struct scan_control *sc) { @@ -306,6 +348,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone int prealloc_shrinker(struct shrinker *shrinker) { size_t size = sizeof(*shrinker->nr_deferred); + int ret; if (shrinker->flags & SHRINKER_NUMA_AWARE) size *= nr_node_ids; @@ -313,11 +356,26 @@ int prealloc_shrinker(struct shrinker *shrinker) shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); if (!shrinker->nr_deferred) return -ENOMEM; + + if (shrinker->flags & SHRINKER_MEMCG_AWARE) { + ret = prealloc_memcg_shrinker(shrinker); + if (ret) + goto free_deferred; + } + return 0; + +free_deferred: + kfree(shrinker->nr_deferred); + shrinker->nr_deferred = NULL; + return -ENOMEM; } void free_prealloced_shrinker(struct shrinker *shrinker) { + if (shrinker->flags & SHRINKER_MEMCG_AWARE) + unregister_memcg_shrinker(shrinker); + kfree(shrinker->nr_deferred); shrinker->nr_deferred = NULL; } @@ -347,6 +405,8 @@ void unregister_shrinker(struct shrinker *shrinker) { if (!shrinker->nr_deferred) return; + if (shrinker->flags & SHRINKER_MEMCG_AWARE) + unregister_memcg_shrinker(shrinker); down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem);