linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: willy@infradead.org, akpm@linux-foundation.org,
	hannes@cmpxchg.org, mhocko@kernel.org, vdavydov.dev@gmail.com,
	shakeelb@google.com, guro@fb.com, shy828301@gmail.com,
	alexs@kernel.org, richard.weiyang@gmail.com, david@fromorbit.com,
	trond.myklebust@hammerspace.com, anna.schumaker@netapp.com
Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-nfs@vger.kernel.org,
	zhengqi.arch@bytedance.com, duanxiongchun@bytedance.com,
	fam.zheng@bytedance.com, smuchun@gmail.com,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v3 07/76] mm: list_lru: optimize memory consumption of arrays
Date: Tue, 14 Sep 2021 15:28:29 +0800	[thread overview]
Message-ID: <20210914072938.6440-8-songmuchun@bytedance.com> (raw)
In-Reply-To: <20210914072938.6440-1-songmuchun@bytedance.com>

The list_lru use an array to store the list_lru_one pointers, which is
per-memcg per-node. What if we run 10k containers in the system? The
size of the array of every list_lru can be 10k * number_of_node *
sizeof(void *). The array size becomes very big, the more numa node
in the system, the more memory it consumes. We can convert the array
to per-memcg instead of per-memcg per-node. It can save memory
especially when there are many numa nodes in the system. And also
simplify the code. In my test case (10k memory cgroup and 2 NUMA
nodes), it can save 2.5GB memory.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 include/linux/list_lru.h |  17 +++--
 mm/list_lru.c            | 191 +++++++++++++++++------------------------------
 2 files changed, 79 insertions(+), 129 deletions(-)

diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 1b5fceb565df..2b32dbd89214 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -31,10 +31,15 @@ struct list_lru_one {
 	long			nr_items;
 };
 
+struct list_lru_per_memcg {
+	/* array of per cgroup per node lists, indexed by node id */
+	struct list_lru_one	nodes[0];
+};
+
 struct list_lru_memcg {
-	struct rcu_head		rcu;
+	struct rcu_head			rcu;
 	/* array of per cgroup lists, indexed by memcg_cache_id */
-	struct list_lru_one	*lru[];
+	struct list_lru_per_memcg	*lrus[];
 };
 
 struct list_lru_node {
@@ -42,11 +47,7 @@ struct list_lru_node {
 	spinlock_t		lock;
 	/* global list, used for the root cgroup in cgroup aware lrus */
 	struct list_lru_one	lru;
-#ifdef CONFIG_MEMCG_KMEM
-	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
-	struct list_lru_memcg	__rcu *memcg_lrus;
-#endif
-	long nr_items;
+	long			nr_items;
 } ____cacheline_aligned_in_smp;
 
 struct list_lru {
@@ -55,6 +56,8 @@ struct list_lru {
 	struct list_head	list;
 	int			shrinker_id;
 	bool			memcg_aware;
+	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
+	struct list_lru_memcg	__rcu *memcg_lrus;
 #endif
 };
 
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 39828632631c..f1c73b53af9a 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -49,35 +49,38 @@ static int lru_shrinker_id(struct list_lru *lru)
 }
 
 static inline struct list_lru_one *
-list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
+list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
 {
 	struct list_lru_memcg *memcg_lrus;
+	struct list_lru_node *nlru = &lru->node[nid];
+
 	/*
 	 * Either lock or RCU protects the array of per cgroup lists
-	 * from relocation (see memcg_update_list_lru_node).
+	 * from relocation (see memcg_update_list_lru).
 	 */
-	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
+	memcg_lrus = rcu_dereference_check(lru->memcg_lrus,
 					   lockdep_is_held(&nlru->lock));
 	if (memcg_lrus && idx >= 0)
-		return memcg_lrus->lru[idx];
+		return &memcg_lrus->lrus[idx]->nodes[nid];
 	return &nlru->lru;
 }
 
 static inline struct list_lru_one *
-list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
+list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
 		   struct mem_cgroup **memcg_ptr)
 {
+	struct list_lru_node *nlru = &lru->node[nid];
 	struct list_lru_one *l = &nlru->lru;
 	struct mem_cgroup *memcg = NULL;
 
-	if (!nlru->memcg_lrus)
+	if (!lru->memcg_lrus)
 		goto out;
 
 	memcg = mem_cgroup_from_obj(ptr);
 	if (!memcg)
 		goto out;
 
-	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
+	l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg));
 out:
 	if (memcg_ptr)
 		*memcg_ptr = memcg;
@@ -103,18 +106,18 @@ static inline bool list_lru_memcg_aware(struct list_lru *lru)
 }
 
 static inline struct list_lru_one *
-list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
+list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
 {
-	return &nlru->lru;
+	return &lru->node[nid].lru;
 }
 
 static inline struct list_lru_one *
-list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
+list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
 		   struct mem_cgroup **memcg_ptr)
 {
 	if (memcg_ptr)
 		*memcg_ptr = NULL;
-	return &nlru->lru;
+	return &lru->node[nid].lru;
 }
 #endif /* CONFIG_MEMCG_KMEM */
 
@@ -127,7 +130,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
 
 	spin_lock(&nlru->lock);
 	if (list_empty(item)) {
-		l = list_lru_from_kmem(nlru, item, &memcg);
+		l = list_lru_from_kmem(lru, nid, item, &memcg);
 		list_add_tail(item, &l->list);
 		/* Set shrinker bit if the first element was added */
 		if (!l->nr_items++)
@@ -150,7 +153,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
 
 	spin_lock(&nlru->lock);
 	if (!list_empty(item)) {
-		l = list_lru_from_kmem(nlru, item, NULL);
+		l = list_lru_from_kmem(lru, nid, item, NULL);
 		list_del_init(item);
 		l->nr_items--;
 		nlru->nr_items--;
@@ -180,12 +183,11 @@ EXPORT_SYMBOL_GPL(list_lru_isolate_move);
 unsigned long list_lru_count_one(struct list_lru *lru,
 				 int nid, struct mem_cgroup *memcg)
 {
-	struct list_lru_node *nlru = &lru->node[nid];
 	struct list_lru_one *l;
 	long count;
 
 	rcu_read_lock();
-	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
+	l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg));
 	count = READ_ONCE(l->nr_items);
 	rcu_read_unlock();
 
@@ -206,16 +208,16 @@ unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 EXPORT_SYMBOL_GPL(list_lru_count_node);
 
 static unsigned long
-__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
+__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
 		    list_lru_walk_cb isolate, void *cb_arg,
 		    unsigned long *nr_to_walk)
 {
-
+	struct list_lru_node *nlru = &lru->node[nid];
 	struct list_lru_one *l;
 	struct list_head *item, *n;
 	unsigned long isolated = 0;
 
-	l = list_lru_from_memcg_idx(nlru, memcg_idx);
+	l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
 restart:
 	list_for_each_safe(item, n, &l->list) {
 		enum lru_status ret;
@@ -272,8 +274,8 @@ list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
 	unsigned long ret;
 
 	spin_lock(&nlru->lock);
-	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
-				  nr_to_walk);
+	ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate,
+				  cb_arg, nr_to_walk);
 	spin_unlock(&nlru->lock);
 	return ret;
 }
@@ -288,8 +290,8 @@ list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
 	unsigned long ret;
 
 	spin_lock_irq(&nlru->lock);
-	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
-				  nr_to_walk);
+	ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate,
+				  cb_arg, nr_to_walk);
 	spin_unlock_irq(&nlru->lock);
 	return ret;
 }
@@ -308,7 +310,7 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
 			struct list_lru_node *nlru = &lru->node[nid];
 
 			spin_lock(&nlru->lock);
-			isolated += __list_lru_walk_one(nlru, memcg_idx,
+			isolated += __list_lru_walk_one(lru, nid, memcg_idx,
 							isolate, cb_arg,
 							nr_to_walk);
 			spin_unlock(&nlru->lock);
@@ -328,167 +330,112 @@ static void init_one_lru(struct list_lru_one *l)
 }
 
 #ifdef CONFIG_MEMCG_KMEM
-static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
-					  int begin, int end)
+static void memcg_destroy_list_lru_range(struct list_lru_memcg *memcg_lrus,
+					 int begin, int end)
 {
 	int i;
 
 	for (i = begin; i < end; i++)
-		kfree(memcg_lrus->lru[i]);
+		kfree(memcg_lrus->lrus[i]);
 }
 
-static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
-				      int begin, int end)
+static int memcg_init_list_lru_range(struct list_lru_memcg *memcg_lrus,
+				     int begin, int end)
 {
 	int i;
 
 	for (i = begin; i < end; i++) {
-		struct list_lru_one *l;
+		int nid;
+		struct list_lru_per_memcg *lru;
 
-		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
-		if (!l)
+		lru = kmalloc(struct_size(lru, nodes, nr_node_ids), GFP_KERNEL);
+		if (!lru)
 			goto fail;
 
-		init_one_lru(l);
-		memcg_lrus->lru[i] = l;
+		for_each_node(nid)
+			init_one_lru(&lru->nodes[nid]);
+		memcg_lrus->lrus[i] = lru;
 	}
 	return 0;
 fail:
-	__memcg_destroy_list_lru_node(memcg_lrus, begin, i);
+	memcg_destroy_list_lru_range(memcg_lrus, begin, i);
 	return -ENOMEM;
 }
 
-static int memcg_init_list_lru_node(struct list_lru_node *nlru)
+static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
 {
 	struct list_lru_memcg *memcg_lrus;
 	int size = memcg_nr_cache_ids;
 
+	lru->memcg_aware = memcg_aware;
+	if (!memcg_aware)
+		return 0;
+
 	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
-			      size * sizeof(void *), GFP_KERNEL);
+			      size * sizeof(memcg_lrus->lrus[0]), GFP_KERNEL);
 	if (!memcg_lrus)
 		return -ENOMEM;
 
-	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
+	if (memcg_init_list_lru_range(memcg_lrus, 0, size)) {
 		kvfree(memcg_lrus);
 		return -ENOMEM;
 	}
-	RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
+	RCU_INIT_POINTER(lru->memcg_lrus, memcg_lrus);
 
 	return 0;
 }
 
-static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
+static void memcg_destroy_list_lru(struct list_lru *lru)
 {
 	struct list_lru_memcg *memcg_lrus;
+
+	if (!list_lru_memcg_aware(lru))
+		return;
+
 	/*
 	 * This is called when shrinker has already been unregistered,
 	 * and nobody can use it. So, there is no need to use kvfree_rcu().
 	 */
-	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
-	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
+	memcg_lrus = rcu_dereference_protected(lru->memcg_lrus, true);
+	memcg_destroy_list_lru_range(memcg_lrus, 0, memcg_nr_cache_ids);
 	kvfree(memcg_lrus);
 }
 
-static int memcg_update_list_lru_node(struct list_lru_node *nlru,
-				      int old_size, int new_size)
+static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size)
 {
 	struct list_lru_memcg *old, *new;
 
 	BUG_ON(old_size > new_size);
 
-	old = rcu_dereference_protected(nlru->memcg_lrus,
+	old = rcu_dereference_protected(lru->memcg_lrus,
 					lockdep_is_held(&list_lrus_mutex));
-	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
+	new = kvmalloc(sizeof(*new) + new_size * sizeof(new->lrus[0]), GFP_KERNEL);
 	if (!new)
 		return -ENOMEM;
 
-	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
+	if (memcg_init_list_lru_range(new, old_size, new_size)) {
 		kvfree(new);
 		return -ENOMEM;
 	}
 
-	memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
-	rcu_assign_pointer(nlru->memcg_lrus, new);
+	memcpy(&new->lrus, &old->lrus, old_size * sizeof(new->lrus[0]));
+	rcu_assign_pointer(lru->memcg_lrus, new);
 	kvfree_rcu(old, rcu);
 	return 0;
 }
 
-static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
-					      int old_size, int new_size)
-{
-	struct list_lru_memcg *memcg_lrus;
-
-	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
-					       lockdep_is_held(&list_lrus_mutex));
-	/* do not bother shrinking the array back to the old size, because we
-	 * cannot handle allocation failures here */
-	__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
-}
-
-static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
-{
-	int i;
-
-	lru->memcg_aware = memcg_aware;
-
-	if (!memcg_aware)
-		return 0;
-
-	for_each_node(i) {
-		if (memcg_init_list_lru_node(&lru->node[i]))
-			goto fail;
-	}
-	return 0;
-fail:
-	for (i = i - 1; i >= 0; i--) {
-		if (!lru->node[i].memcg_lrus)
-			continue;
-		memcg_destroy_list_lru_node(&lru->node[i]);
-	}
-	return -ENOMEM;
-}
-
-static void memcg_destroy_list_lru(struct list_lru *lru)
-{
-	int i;
-
-	if (!list_lru_memcg_aware(lru))
-		return;
-
-	for_each_node(i)
-		memcg_destroy_list_lru_node(&lru->node[i]);
-}
-
-static int memcg_update_list_lru(struct list_lru *lru,
-				 int old_size, int new_size)
-{
-	int i;
-
-	for_each_node(i) {
-		if (memcg_update_list_lru_node(&lru->node[i],
-					       old_size, new_size))
-			goto fail;
-	}
-	return 0;
-fail:
-	for (i = i - 1; i >= 0; i--) {
-		if (!lru->node[i].memcg_lrus)
-			continue;
-
-		memcg_cancel_update_list_lru_node(&lru->node[i],
-						  old_size, new_size);
-	}
-	return -ENOMEM;
-}
-
 static void memcg_cancel_update_list_lru(struct list_lru *lru,
 					 int old_size, int new_size)
 {
-	int i;
+	struct list_lru_memcg *memcg_lrus;
 
-	for_each_node(i)
-		memcg_cancel_update_list_lru_node(&lru->node[i],
-						  old_size, new_size);
+	memcg_lrus = rcu_dereference_protected(lru->memcg_lrus,
+					       lockdep_is_held(&list_lrus_mutex));
+	/*
+	 * Do not bother shrinking the array back to the old size, because we
+	 * cannot handle allocation failures here.
+	 */
+	memcg_destroy_list_lru_range(memcg_lrus, old_size, new_size);
 }
 
 int memcg_update_all_list_lrus(int new_size)
@@ -525,8 +472,8 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
 	 */
 	spin_lock_irq(&nlru->lock);
 
-	src = list_lru_from_memcg_idx(nlru, src_idx);
-	dst = list_lru_from_memcg_idx(nlru, dst_idx);
+	src = list_lru_from_memcg_idx(lru, nid, src_idx);
+	dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
 
 	list_splice_init(&src->list, &dst->list);
 
-- 
2.11.0


  parent reply	other threads:[~2021-09-14  7:34 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-14  7:28 [PATCH v3 00/76] Optimize list lru memory consumption Muchun Song
2021-09-14  7:28 ` [PATCH v3 01/76] mm: list_lru: fix the return value of list_lru_count_one() Muchun Song
2021-09-14  7:28 ` [PATCH v3 02/76] mm: memcontrol: remove kmemcg_id reparenting Muchun Song
2021-09-14  7:28 ` [PATCH v3 03/76] mm: memcontrol: remove the kmem states Muchun Song
2021-09-14  7:28 ` [PATCH v3 04/76] mm: memcontrol: move memcg_online_kmem() to mem_cgroup_css_online() Muchun Song
2021-09-14  7:28 ` [PATCH v3 05/76] mm: list_lru: remove holding lru lock Muchun Song
2021-09-14  7:28 ` [PATCH v3 06/76] mm: list_lru: only add memcg-aware lrus to the global lru list Muchun Song
2021-09-14  7:28 ` Muchun Song [this message]
2021-09-14  7:28 ` [PATCH v3 08/76] mm: introduce kmem_cache_alloc_lru Muchun Song
2021-09-14  7:28 ` [PATCH v3 09/76] fs: introduce alloc_inode_sb() to allocate filesystems specific inode Muchun Song
2021-09-14  7:28 ` [PATCH v3 10/76] dax: allocate inode by using alloc_inode_sb() Muchun Song
2021-09-14  7:28 ` [PATCH v3 11/76] 9p: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 12/76] adfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 13/76] affs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 14/76] afs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 15/76] befs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 16/76] bfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 17/76] block: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 18/76] btrfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 19/76] ceph: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 20/76] cifs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 21/76] coda: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 22/76] ecryptfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 23/76] efs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 24/76] erofs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 25/76] exfat: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 26/76] ext2: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 27/76] ext4: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 28/76] fat: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 29/76] freevxfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 30/76] fuse: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 31/76] gfs2: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 32/76] hfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 33/76] hfsplus: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 34/76] hostfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 35/76] hpfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 36/76] hugetlbfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 37/76] isofs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 38/76] jffs2: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 39/76] jfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 40/76] minix: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 41/76] nfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 42/76] nilfs2: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 43/76] ntfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 44/76] ocfs2: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 45/76] openpromfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 46/76] orangefs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 47/76] overlayfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 48/76] proc: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 49/76] qnx4: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 50/76] qnx6: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 51/76] reiserfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 52/76] romfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 53/76] squashfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 54/76] sysv: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 55/76] ubifs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 56/76] udf: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 57/76] ufs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 58/76] vboxsf: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 59/76] xfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 60/76] zonefs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 61/76] ipc: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 62/76] shmem: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 63/76] net: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 64/76] rpc: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 65/76] f2fs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 66/76] nfs42: use a specific kmem_cache to allocate nfs4_xattr_entry Muchun Song
2021-09-14  7:29 ` [PATCH v3 67/76] mm: dcache: use kmem_cache_alloc_lru() to allocate dentry Muchun Song
2021-09-14  7:29 ` [PATCH v3 68/76] xarray: use kmem_cache_alloc_lru to allocate xa_node Muchun Song
2021-09-14  7:29 ` [PATCH v3 69/76] mm: workingset: use xas_set_lru() to pass shadow_nodes Muchun Song
2021-09-14  7:29 ` [PATCH v3 70/76] mm: list_lru: allocate list_lru_one only when needed Muchun Song
2021-09-14  7:29 ` [PATCH v3 71/76] mm: list_lru: rename memcg_drain_all_list_lrus to memcg_reparent_list_lrus Muchun Song
2021-09-14  7:29 ` [PATCH v3 72/76] mm: list_lru: replace linear array with xarray Muchun Song
2021-09-14  7:29 ` [PATCH v3 73/76] mm: memcontrol: reuse memory cgroup ID for kmem ID Muchun Song
2021-09-14  7:29 ` [PATCH v3 74/76] mm: memcontrol: fix cannot alloc the maximum memcg ID Muchun Song
2021-09-14  7:29 ` [PATCH v3 75/76] mm: list_lru: rename list_lru_per_memcg to list_lru_memcg Muchun Song
2021-09-14  7:29 ` [PATCH v3 76/76] mm: memcontrol: rename memcg_cache_id to memcg_kmem_id Muchun Song
2021-09-14 20:22 ` [PATCH v3 00/76] Optimize list lru memory consumption Theodore Ts'o
2021-09-15  7:30   ` Muchun Song
2021-09-18  6:56 ` Kari Argillander
2021-09-18  7:59   ` Muchun Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210914072938.6440-8-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexs@kernel.org \
    --cc=anna.schumaker@netapp.com \
    --cc=david@fromorbit.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=fam.zheng@bytedance.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=mhocko@kernel.org \
    --cc=richard.weiyang@gmail.com \
    --cc=shakeelb@google.com \
    --cc=shy828301@gmail.com \
    --cc=smuchun@gmail.com \
    --cc=trond.myklebust@hammerspace.com \
    --cc=vdavydov.dev@gmail.com \
    --cc=willy@infradead.org \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).