linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: willy@infradead.org, akpm@linux-foundation.org,
	hannes@cmpxchg.org, mhocko@kernel.org, vdavydov.dev@gmail.com,
	shakeelb@google.com, guro@fb.com, shy828301@gmail.com,
	alexs@kernel.org, richard.weiyang@gmail.com, david@fromorbit.com,
	trond.myklebust@hammerspace.com, anna.schumaker@netapp.com
Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-nfs@vger.kernel.org,
	zhengqi.arch@bytedance.com, duanxiongchun@bytedance.com,
	fam.zheng@bytedance.com, smuchun@gmail.com,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v3 73/76] mm: memcontrol: reuse memory cgroup ID for kmem ID
Date: Tue, 14 Sep 2021 15:29:35 +0800	[thread overview]
Message-ID: <20210914072938.6440-74-songmuchun@bytedance.com> (raw)
In-Reply-To: <20210914072938.6440-1-songmuchun@bytedance.com>

There are two idrs being used by memory cgroup, one is for kmem ID,
another is for memory cgroup ID. The maximum ID of both is 64Ki.
Both of them can limit the total number of memory cgroups. Actually,
we can reuse memory cgroup ID for kmem ID to simplify the code.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 include/linux/memcontrol.h |  1 +
 mm/memcontrol.c            | 47 ++++++++--------------------------------------
 2 files changed, 9 insertions(+), 39 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 83add6c484b1..33f6ec4783f8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -56,6 +56,7 @@ struct mem_cgroup_reclaim_cookie {
 #ifdef CONFIG_MEMCG
 
 #define MEM_CGROUP_ID_SHIFT	16
+#define MEM_CGROUP_ID_MIN	1
 #define MEM_CGROUP_ID_MAX	USHRT_MAX
 
 struct mem_cgroup_id {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8e0cde19b648..e3a2e4d65cc5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -356,23 +356,6 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 }
 
 /*
- * This will be used as a shrinker list's index.
- * The main reason for not using cgroup id for this:
- *  this works better in sparse environments, where we have a lot of memcgs,
- *  but only a few kmem-limited.
- */
-static DEFINE_IDA(memcg_cache_ida);
-
-/*
- * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
- * this constant directly from cgroup, but it is understandable that this is
- * better kept as an internal representation in cgroup.c. In any case, the
- * cgrp_id space is not getting any smaller, and we don't have to necessarily
- * increase ours as well if it increases.
- */
-#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
-
-/*
  * A lot of the calls to the cache allocation functions are expected to be
  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
  * conditional to this static branch, we'll have to allow modules that does
@@ -3520,10 +3503,12 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
 }
 
 #ifdef CONFIG_MEMCG_KMEM
+#define MEM_CGROUP_KMEM_ID_MIN	-1
+#define MEM_CGROUP_ID_DIFF	(MEM_CGROUP_ID_MIN - MEM_CGROUP_KMEM_ID_MIN)
+
 static int memcg_online_kmem(struct mem_cgroup *memcg)
 {
 	struct obj_cgroup *objcg;
-	int memcg_id;
 
 	if (cgroup_memory_nokmem)
 		return 0;
@@ -3531,22 +3516,16 @@ static int memcg_online_kmem(struct mem_cgroup *memcg)
 	if (unlikely(mem_cgroup_is_root(memcg)))
 		return 0;
 
-	memcg_id = ida_alloc_max(&memcg_cache_ida, MEMCG_CACHES_MAX_SIZE - 1,
-				 GFP_KERNEL);
-	if (memcg_id < 0)
-		return memcg_id;
-
 	objcg = obj_cgroup_alloc();
-	if (!objcg) {
-		ida_free(&memcg_cache_ida, memcg_id);
+	if (!objcg)
 		return -ENOMEM;
-	}
+
 	objcg->memcg = memcg;
 	rcu_assign_pointer(memcg->objcg, objcg);
 
 	static_branch_enable(&memcg_kmem_enabled_key);
 
-	memcg->kmemcg_id = memcg_id;
+	memcg->kmemcg_id = memcg->id.id - MEM_CGROUP_ID_DIFF;
 
 	return 0;
 }
@@ -3554,7 +3533,6 @@ static int memcg_online_kmem(struct mem_cgroup *memcg)
 static void memcg_offline_kmem(struct mem_cgroup *memcg)
 {
 	struct mem_cgroup *parent;
-	int kmemcg_id;
 
 	if (cgroup_memory_nokmem)
 		return;
@@ -3567,16 +3545,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
 		parent = root_mem_cgroup;
 
 	memcg_reparent_objcgs(memcg, parent);
-
-	/*
-	 * memcg_reparent_list_lrus() can change memcg->kmemcg_id.
-	 * Cache it to @kmemcg_id.
-	 */
-	kmemcg_id = memcg->kmemcg_id;
-
 	memcg_reparent_list_lrus(memcg, parent);
-
-	ida_free(&memcg_cache_ida, kmemcg_id);
 }
 #else
 static int memcg_online_kmem(struct mem_cgroup *memcg)
@@ -5042,7 +5011,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 		return ERR_PTR(error);
 
 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
-				 1, MEM_CGROUP_ID_MAX,
+				 MEM_CGROUP_ID_MIN, MEM_CGROUP_ID_MAX,
 				 GFP_KERNEL);
 	if (memcg->id.id < 0) {
 		error = memcg->id.id;
@@ -5070,7 +5039,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 	spin_lock_init(&memcg->event_list_lock);
 	memcg->socket_pressure = jiffies;
 #ifdef CONFIG_MEMCG_KMEM
-	memcg->kmemcg_id = -1;
+	memcg->kmemcg_id = MEM_CGROUP_KMEM_ID_MIN;
 	INIT_LIST_HEAD(&memcg->objcg_list);
 #endif
 #ifdef CONFIG_CGROUP_WRITEBACK
-- 
2.11.0


  parent reply	other threads:[~2021-09-14  7:46 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-14  7:28 [PATCH v3 00/76] Optimize list lru memory consumption Muchun Song
2021-09-14  7:28 ` [PATCH v3 01/76] mm: list_lru: fix the return value of list_lru_count_one() Muchun Song
2021-09-14  7:28 ` [PATCH v3 02/76] mm: memcontrol: remove kmemcg_id reparenting Muchun Song
2021-09-14  7:28 ` [PATCH v3 03/76] mm: memcontrol: remove the kmem states Muchun Song
2021-09-14  7:28 ` [PATCH v3 04/76] mm: memcontrol: move memcg_online_kmem() to mem_cgroup_css_online() Muchun Song
2021-09-14  7:28 ` [PATCH v3 05/76] mm: list_lru: remove holding lru lock Muchun Song
2021-09-14  7:28 ` [PATCH v3 06/76] mm: list_lru: only add memcg-aware lrus to the global lru list Muchun Song
2021-09-14  7:28 ` [PATCH v3 07/76] mm: list_lru: optimize memory consumption of arrays Muchun Song
2021-09-14  7:28 ` [PATCH v3 08/76] mm: introduce kmem_cache_alloc_lru Muchun Song
2021-09-14  7:28 ` [PATCH v3 09/76] fs: introduce alloc_inode_sb() to allocate filesystems specific inode Muchun Song
2021-09-14  7:28 ` [PATCH v3 10/76] dax: allocate inode by using alloc_inode_sb() Muchun Song
2021-09-14  7:28 ` [PATCH v3 11/76] 9p: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 12/76] adfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 13/76] affs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 14/76] afs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 15/76] befs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 16/76] bfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 17/76] block: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 18/76] btrfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 19/76] ceph: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 20/76] cifs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 21/76] coda: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 22/76] ecryptfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 23/76] efs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 24/76] erofs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 25/76] exfat: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 26/76] ext2: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 27/76] ext4: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 28/76] fat: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 29/76] freevxfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 30/76] fuse: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 31/76] gfs2: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 32/76] hfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 33/76] hfsplus: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 34/76] hostfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 35/76] hpfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 36/76] hugetlbfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 37/76] isofs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 38/76] jffs2: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 39/76] jfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 40/76] minix: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 41/76] nfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 42/76] nilfs2: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 43/76] ntfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 44/76] ocfs2: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 45/76] openpromfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 46/76] orangefs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 47/76] overlayfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 48/76] proc: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 49/76] qnx4: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 50/76] qnx6: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 51/76] reiserfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 52/76] romfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 53/76] squashfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 54/76] sysv: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 55/76] ubifs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 56/76] udf: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 57/76] ufs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 58/76] vboxsf: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 59/76] xfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 60/76] zonefs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 61/76] ipc: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 62/76] shmem: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 63/76] net: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 64/76] rpc: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 65/76] f2fs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 66/76] nfs42: use a specific kmem_cache to allocate nfs4_xattr_entry Muchun Song
2021-09-14  7:29 ` [PATCH v3 67/76] mm: dcache: use kmem_cache_alloc_lru() to allocate dentry Muchun Song
2021-09-14  7:29 ` [PATCH v3 68/76] xarray: use kmem_cache_alloc_lru to allocate xa_node Muchun Song
2021-09-14  7:29 ` [PATCH v3 69/76] mm: workingset: use xas_set_lru() to pass shadow_nodes Muchun Song
2021-09-14  7:29 ` [PATCH v3 70/76] mm: list_lru: allocate list_lru_one only when needed Muchun Song
2021-09-14  7:29 ` [PATCH v3 71/76] mm: list_lru: rename memcg_drain_all_list_lrus to memcg_reparent_list_lrus Muchun Song
2021-09-14  7:29 ` [PATCH v3 72/76] mm: list_lru: replace linear array with xarray Muchun Song
2021-09-14  7:29 ` Muchun Song [this message]
2021-09-14  7:29 ` [PATCH v3 74/76] mm: memcontrol: fix cannot alloc the maximum memcg ID Muchun Song
2021-09-14  7:29 ` [PATCH v3 75/76] mm: list_lru: rename list_lru_per_memcg to list_lru_memcg Muchun Song
2021-09-14  7:29 ` [PATCH v3 76/76] mm: memcontrol: rename memcg_cache_id to memcg_kmem_id Muchun Song
2021-09-14 20:22 ` [PATCH v3 00/76] Optimize list lru memory consumption Theodore Ts'o
2021-09-15  7:30   ` Muchun Song
2021-09-18  6:56 ` Kari Argillander
2021-09-18  7:59   ` Muchun Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210914072938.6440-74-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexs@kernel.org \
    --cc=anna.schumaker@netapp.com \
    --cc=david@fromorbit.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=fam.zheng@bytedance.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=mhocko@kernel.org \
    --cc=richard.weiyang@gmail.com \
    --cc=shakeelb@google.com \
    --cc=shy828301@gmail.com \
    --cc=smuchun@gmail.com \
    --cc=trond.myklebust@hammerspace.com \
    --cc=vdavydov.dev@gmail.com \
    --cc=willy@infradead.org \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).