All of lore.kernel.org
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: willy@infradead.org, akpm@linux-foundation.org,
	hannes@cmpxchg.org, mhocko@kernel.org, vdavydov.dev@gmail.com,
	shakeelb@google.com, guro@fb.com, shy828301@gmail.com,
	alexs@kernel.org, richard.weiyang@gmail.com, david@fromorbit.com,
	trond.myklebust@hammerspace.com, anna.schumaker@netapp.com
Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-nfs@vger.kernel.org,
	zhengqi.arch@bytedance.com, duanxiongchun@bytedance.com,
	fam.zheng@bytedance.com, smuchun@gmail.com,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v3 08/76] mm: introduce kmem_cache_alloc_lru
Date: Tue, 14 Sep 2021 15:28:30 +0800	[thread overview]
Message-ID: <20210914072938.6440-9-songmuchun@bytedance.com> (raw)
In-Reply-To: <20210914072938.6440-1-songmuchun@bytedance.com>

We currently allocate scope for every memcg to be able to tracked on
every superblock instantiated in the system, regardless of whether
that superblock is even accessible to that memcg.

These huge memcg counts come from container hosts where memcgs are
confined to just a small subset of the total number of superblocks
that instantiated at any given point in time.

For these systems with huge container counts, list_lru does not need
the capability of tracking every memcg on every superblock. What it
comes down to is that adding the memcg to the list_lru at the first
insert. So introduce kmem_cache_alloc_lru to allocate objects and its
list_lru. In the later patch, we will convert all inode and dentry
allocation from kmem_cache_alloc to kmem_cache_alloc_lru.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 include/linux/list_lru.h   |   3 ++
 include/linux/memcontrol.h |  14 ++++++
 include/linux/slab.h       |   3 ++
 mm/list_lru.c              | 114 +++++++++++++++++++++++++++++++++++++++++----
 mm/memcontrol.c            |  14 ------
 mm/slab.c                  |  39 +++++++++++-----
 mm/slab.h                  |  17 ++++++-
 mm/slob.c                  |   6 +++
 mm/slub.c                  |  42 +++++++++++------
 9 files changed, 202 insertions(+), 50 deletions(-)

diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 2b32dbd89214..50a3144016b4 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -56,11 +56,14 @@ struct list_lru {
 	struct list_head	list;
 	int			shrinker_id;
 	bool			memcg_aware;
+	/* protects ->memcg_lrus->lrus[i] */
+	spinlock_t		lock;
 	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
 	struct list_lru_memcg	__rcu *memcg_lrus;
 #endif
 };
 
+int list_lru_memcg_alloc(struct list_lru *lru, struct mem_cgroup *memcg, gfp_t gfp);
 void list_lru_destroy(struct list_lru *lru);
 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
 		    struct lock_class_key *key, struct shrinker *shrinker);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7267cf9d1f3d..06ee32822fd4 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -520,6 +520,20 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
 	return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
 }
 
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+	struct mem_cgroup *memcg;
+
+	rcu_read_lock();
+retry:
+	memcg = obj_cgroup_memcg(objcg);
+	if (unlikely(!css_tryget(&memcg->css)))
+		goto retry;
+	rcu_read_unlock();
+
+	return memcg;
+}
+
 #ifdef CONFIG_MEMCG_KMEM
 /*
  * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6ce826d8194d..441f4e87cb34 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -135,6 +135,7 @@
 
 #include <linux/kasan.h>
 
+struct list_lru;
 struct mem_cgroup;
 /*
  * struct kmem_cache related prototypes
@@ -429,6 +430,8 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
 __alloc_size(1)
 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_kmalloc_alignment __malloc;
+void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+			   gfp_t gfpflags) __assume_kmalloc_alignment __malloc;
 void kmem_cache_free(struct kmem_cache *s, void *objp);
 
 /*
diff --git a/mm/list_lru.c b/mm/list_lru.c
index f1c73b53af9a..eea29eb4cf48 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -339,22 +339,30 @@ static void memcg_destroy_list_lru_range(struct list_lru_memcg *memcg_lrus,
 		kfree(memcg_lrus->lrus[i]);
 }
 
+static struct list_lru_per_memcg *memcg_list_lru_alloc(gfp_t gfp)
+{
+	int nid;
+	struct list_lru_per_memcg *lru;
+
+	lru = kmalloc(struct_size(lru, nodes, nr_node_ids), gfp);
+	if (!lru)
+		return NULL;
+
+	for_each_node(nid)
+		init_one_lru(&lru->nodes[nid]);
+
+	return lru;
+}
+
 static int memcg_init_list_lru_range(struct list_lru_memcg *memcg_lrus,
 				     int begin, int end)
 {
 	int i;
 
 	for (i = begin; i < end; i++) {
-		int nid;
-		struct list_lru_per_memcg *lru;
-
-		lru = kmalloc(struct_size(lru, nodes, nr_node_ids), GFP_KERNEL);
-		if (!lru)
+		memcg_lrus->lrus[i] = memcg_list_lru_alloc(GFP_KERNEL);
+		if (!memcg_lrus->lrus[i])
 			goto fail;
-
-		for_each_node(nid)
-			init_one_lru(&lru->nodes[nid]);
-		memcg_lrus->lrus[i] = lru;
 	}
 	return 0;
 fail:
@@ -371,6 +379,8 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
 	if (!memcg_aware)
 		return 0;
 
+	spin_lock_init(&lru->lock);
+
 	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
 			      size * sizeof(memcg_lrus->lrus[0]), GFP_KERNEL);
 	if (!memcg_lrus)
@@ -418,8 +428,11 @@ static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_siz
 		return -ENOMEM;
 	}
 
+	spin_lock_irq(&lru->lock);
 	memcpy(&new->lrus, &old->lrus, old_size * sizeof(new->lrus[0]));
 	rcu_assign_pointer(lru->memcg_lrus, new);
+	spin_unlock_irq(&lru->lock);
+
 	kvfree_rcu(old, rcu);
 	return 0;
 }
@@ -504,6 +517,89 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
 		memcg_drain_list_lru(lru, src_idx, dst_memcg);
 	mutex_unlock(&list_lrus_mutex);
 }
+
+static bool memcg_list_lru_skip_alloc(struct list_lru *lru,
+				      struct mem_cgroup *memcg)
+{
+	struct list_lru_memcg *memcg_lrus;
+	int idx = memcg_cache_id(memcg);
+
+	if (unlikely(idx < 0))
+		return true;
+
+	rcu_read_lock();
+	memcg_lrus = rcu_dereference(lru->memcg_lrus);
+	if (memcg_lrus->lrus[idx]) {
+		rcu_read_unlock();
+		return true;
+	}
+	rcu_read_unlock();
+
+	return false;
+}
+
+int list_lru_memcg_alloc(struct list_lru *lru, struct mem_cgroup *memcg, gfp_t gfp)
+{
+	unsigned long flags;
+	struct list_lru_memcg *memcg_lrus;
+	int i;
+
+	struct list_lru_memcg {
+		struct list_lru_per_memcg *mlru;
+		struct mem_cgroup *memcg;
+	} *table;
+
+	if (!list_lru_memcg_aware(lru))
+		return 0;
+
+	if (memcg_list_lru_skip_alloc(lru, memcg))
+		return 0;
+
+	/*
+	 * The allocated list_lru_per_memcg array is not accounted directly.
+	 * Moreover, it should not come from DMA buffer and is not readily
+	 * reclaimable. So those GFP bits should be masked off.
+	 */
+	gfp &= ~(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT | __GFP_ZERO);
+	table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
+	if (!table)
+		return -ENOMEM;
+
+	/*
+	 * Because the list_lru can be reparented to the parent cgroup's
+	 * list_lru, we should make sure that this cgroup and all its
+	 * ancestors have allocated list_lru_per_memcg.
+	 */
+	for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
+		if (memcg_list_lru_skip_alloc(lru, memcg))
+			break;
+
+		table[i].memcg = memcg;
+		table[i].mlru = memcg_list_lru_alloc(gfp);
+		if (!table[i].mlru) {
+			while (i--)
+				kfree(table[i].mlru);
+			kfree(table);
+			return -ENOMEM;
+		}
+	}
+
+	spin_lock_irqsave(&lru->lock, flags);
+	memcg_lrus = rcu_dereference_protected(lru->memcg_lrus, true);
+	while (i--) {
+		int index = memcg_cache_id(table[i].memcg);
+
+		if (memcg_lrus->lrus[index])
+			kfree(table[i].mlru);
+		else
+			memcg_lrus->lrus[index] = table[i].mlru;
+	}
+	spin_unlock_irqrestore(&lru->lock, flags);
+
+	kfree(table);
+
+	return 0;
+}
 #else
 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
 {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a85b52968666..0e8c8d8465e5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2763,20 +2763,6 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
 	folio->memcg_data = (unsigned long)memcg;
 }
 
-static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
-{
-	struct mem_cgroup *memcg;
-
-	rcu_read_lock();
-retry:
-	memcg = obj_cgroup_memcg(objcg);
-	if (unlikely(!css_tryget(&memcg->css)))
-		goto retry;
-	rcu_read_unlock();
-
-	return memcg;
-}
-
 #ifdef CONFIG_MEMCG_KMEM
 /*
  * The allocated objcg pointers array is not accounted directly.
diff --git a/mm/slab.c b/mm/slab.c
index d0f725637663..9a001aabc77b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3219,7 +3219,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
 	bool init = false;
 
 	flags &= gfp_allowed_mask;
-	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
+	cachep = slab_pre_alloc_hook(cachep, NULL, &objcg, 1, flags);
 	if (unlikely(!cachep))
 		return NULL;
 
@@ -3295,7 +3295,8 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 #endif /* CONFIG_NUMA */
 
 static __always_inline void *
-slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
+slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
+	   size_t orig_size, unsigned long caller)
 {
 	unsigned long save_flags;
 	void *objp;
@@ -3303,7 +3304,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned lo
 	bool init = false;
 
 	flags &= gfp_allowed_mask;
-	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
+	cachep = slab_pre_alloc_hook(cachep, lru, &objcg, 1, flags);
 	if (unlikely(!cachep))
 		return NULL;
 
@@ -3492,6 +3493,18 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
 	__free_one(ac, objp);
 }
 
+static __always_inline
+void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
+			     gfp_t flags)
+{
+	void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
+
+	trace_kmem_cache_alloc(_RET_IP_, ret,
+			       cachep->object_size, cachep->size, flags);
+
+	return ret;
+}
+
 /**
  * kmem_cache_alloc - Allocate an object
  * @cachep: The cache to allocate from.
@@ -3504,15 +3517,17 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
  */
 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
-	void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
-
-	trace_kmem_cache_alloc(_RET_IP_, ret,
-			       cachep->object_size, cachep->size, flags);
-
-	return ret;
+	return __kmem_cache_alloc_lru(cachep, NULL, flags);
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
+			   gfp_t flags)
+{
+	return __kmem_cache_alloc_lru(cachep, lru, flags);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_lru);
+
 static __always_inline void
 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
 				  size_t size, void **p, unsigned long caller)
@@ -3529,7 +3544,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 	size_t i;
 	struct obj_cgroup *objcg = NULL;
 
-	s = slab_pre_alloc_hook(s, &objcg, size, flags);
+	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
 	if (!s)
 		return 0;
 
@@ -3570,7 +3585,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
 {
 	void *ret;
 
-	ret = slab_alloc(cachep, flags, size, _RET_IP_);
+	ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
 
 	ret = kasan_kmalloc(cachep, ret, size, flags);
 	trace_kmalloc(_RET_IP_, ret,
@@ -3697,7 +3712,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
 	cachep = kmalloc_slab(size, flags);
 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
 		return cachep;
-	ret = slab_alloc(cachep, flags, size, caller);
+	ret = slab_alloc(cachep, NULL, flags, size, caller);
 
 	ret = kasan_kmalloc(cachep, ret, size, flags);
 	trace_kmalloc(caller, ret,
diff --git a/mm/slab.h b/mm/slab.h
index 58c01a34e5b8..c6fbfda824df 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -46,6 +46,7 @@ struct kmem_cache {
 #include <linux/kmemleak.h>
 #include <linux/random.h>
 #include <linux/sched/mm.h>
+#include <linux/list_lru.h>
 
 /*
  * State of the slab allocator.
@@ -269,6 +270,7 @@ static inline size_t obj_full_size(struct kmem_cache *s)
  * Returns false if the allocation should fail.
  */
 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+					     struct list_lru *lru,
 					     struct obj_cgroup **objcgp,
 					     size_t objects, gfp_t flags)
 {
@@ -284,6 +286,17 @@ static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
 	if (!objcg)
 		return true;
 
+	if (lru) {
+		struct mem_cgroup *memcg = get_mem_cgroup_from_objcg(objcg);
+
+		if (list_lru_memcg_alloc(lru, memcg, flags)) {
+			css_put(&memcg->css);
+			obj_cgroup_put(objcg);
+			return false;
+		}
+		css_put(&memcg->css);
+	}
+
 	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
 		obj_cgroup_put(objcg);
 		return false;
@@ -386,6 +399,7 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
 }
 
 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+					     struct list_lru *lru,
 					     struct obj_cgroup **objcgp,
 					     size_t objects, gfp_t flags)
 {
@@ -484,6 +498,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
 }
 
 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
+						     struct list_lru *lru,
 						     struct obj_cgroup **objcgp,
 						     size_t size, gfp_t flags)
 {
@@ -494,7 +509,7 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
 	if (should_failslab(s, flags))
 		return NULL;
 
-	if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
+	if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
 		return NULL;
 
 	return s;
diff --git a/mm/slob.c b/mm/slob.c
index 74d3f6e60666..9db272c75928 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -633,6 +633,12 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+
+void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags)
+{
+	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_lru);
 #ifdef CONFIG_NUMA
 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
 {
diff --git a/mm/slub.c b/mm/slub.c
index df1ac8aff86f..41211a2de0da 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3083,7 +3083,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
  *
  * Otherwise we can simply pick the next object from the lockless free list.
  */
-static __always_inline void *slab_alloc_node(struct kmem_cache *s,
+static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
 {
 	void *object;
@@ -3093,7 +3093,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 	struct obj_cgroup *objcg = NULL;
 	bool init = false;
 
-	s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
+	s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
 	if (!s)
 		return NULL;
 
@@ -3184,27 +3184,41 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 	return object;
 }
 
-static __always_inline void *slab_alloc(struct kmem_cache *s,
+static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
 		gfp_t gfpflags, unsigned long addr, size_t orig_size)
 {
-	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
+	return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
 }
 
-void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
+static __always_inline
+void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+			     gfp_t gfpflags)
 {
-	void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
+	void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
 
 	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
 				s->size, gfpflags);
 
 	return ret;
 }
+
+void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
+{
+	return __kmem_cache_alloc_lru(s, NULL, gfpflags);
+}
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+			   gfp_t gfpflags)
+{
+	return __kmem_cache_alloc_lru(s, lru, gfpflags);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_lru);
+
 #ifdef CONFIG_TRACING
 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
-	void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
+	void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
 	ret = kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
@@ -3215,7 +3229,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
 #ifdef CONFIG_NUMA
 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
-	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size);
+	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
 
 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
 				    s->object_size, s->size, gfpflags, node);
@@ -3229,7 +3243,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
 				    gfp_t gfpflags,
 				    int node, size_t size)
 {
-	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size);
+	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
 
 	trace_kmalloc_node(_RET_IP_, ret,
 			   size, s->size, gfpflags, node);
@@ -3611,7 +3625,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 	struct obj_cgroup *objcg = NULL;
 
 	/* memcg and kmem_cache debug support */
-	s = slab_pre_alloc_hook(s, &objcg, size, flags);
+	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
 	if (unlikely(!s))
 		return false;
 	/*
@@ -4360,7 +4374,7 @@ void *__kmalloc(size_t size, gfp_t flags)
 	if (unlikely(ZERO_OR_NULL_PTR(s)))
 		return s;
 
-	ret = slab_alloc(s, flags, _RET_IP_, size);
+	ret = slab_alloc(s, NULL, flags, _RET_IP_, size);
 
 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
 
@@ -4408,7 +4422,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 	if (unlikely(ZERO_OR_NULL_PTR(s)))
 		return s;
 
-	ret = slab_alloc_node(s, flags, node, _RET_IP_, size);
+	ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size);
 
 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
 
@@ -4878,7 +4892,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
 	if (unlikely(ZERO_OR_NULL_PTR(s)))
 		return s;
 
-	ret = slab_alloc(s, gfpflags, caller, size);
+	ret = slab_alloc(s, NULL, gfpflags, caller, size);
 
 	/* Honor the call site pointer we received. */
 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -4909,7 +4923,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
 	if (unlikely(ZERO_OR_NULL_PTR(s)))
 		return s;
 
-	ret = slab_alloc_node(s, gfpflags, node, caller, size);
+	ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size);
 
 	/* Honor the call site pointer we received. */
 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
-- 
2.11.0


  parent reply	other threads:[~2021-09-14  7:34 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-14  7:28 [PATCH v3 00/76] Optimize list lru memory consumption Muchun Song
2021-09-14  7:28 ` [PATCH v3 01/76] mm: list_lru: fix the return value of list_lru_count_one() Muchun Song
2021-09-14  7:28 ` [PATCH v3 02/76] mm: memcontrol: remove kmemcg_id reparenting Muchun Song
2021-09-14  7:28 ` [PATCH v3 03/76] mm: memcontrol: remove the kmem states Muchun Song
2021-09-14  7:28 ` [PATCH v3 04/76] mm: memcontrol: move memcg_online_kmem() to mem_cgroup_css_online() Muchun Song
2021-09-14  7:28 ` [PATCH v3 05/76] mm: list_lru: remove holding lru lock Muchun Song
2021-09-14  7:28 ` [PATCH v3 06/76] mm: list_lru: only add memcg-aware lrus to the global lru list Muchun Song
2021-09-14  7:28 ` [PATCH v3 07/76] mm: list_lru: optimize memory consumption of arrays Muchun Song
2021-09-14  7:28 ` Muchun Song [this message]
2021-09-14  7:28 ` [PATCH v3 09/76] fs: introduce alloc_inode_sb() to allocate filesystems specific inode Muchun Song
2021-09-14  7:28 ` [PATCH v3 10/76] dax: allocate inode by using alloc_inode_sb() Muchun Song
2021-09-14  7:28 ` [PATCH v3 11/76] 9p: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 12/76] adfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 13/76] affs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 14/76] afs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 15/76] befs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 16/76] bfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 17/76] block: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 18/76] btrfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 19/76] ceph: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 20/76] cifs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 21/76] coda: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 22/76] ecryptfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 23/76] efs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 24/76] erofs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 25/76] exfat: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 26/76] ext2: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 27/76] ext4: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 28/76] fat: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 29/76] freevxfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 30/76] fuse: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 31/76] gfs2: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 32/76] hfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 33/76] hfsplus: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 34/76] hostfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 35/76] hpfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 36/76] hugetlbfs: " Muchun Song
2021-09-14  7:28 ` [PATCH v3 37/76] isofs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 38/76] jffs2: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 39/76] jfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 40/76] minix: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 41/76] nfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 42/76] nilfs2: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 43/76] ntfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 44/76] ocfs2: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 45/76] openpromfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 46/76] orangefs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 47/76] overlayfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 48/76] proc: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 49/76] qnx4: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 50/76] qnx6: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 51/76] reiserfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 52/76] romfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 53/76] squashfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 54/76] sysv: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 55/76] ubifs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 56/76] udf: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 57/76] ufs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 58/76] vboxsf: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 59/76] xfs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 60/76] zonefs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 61/76] ipc: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 62/76] shmem: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 63/76] net: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 64/76] rpc: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 65/76] f2fs: " Muchun Song
2021-09-14  7:29 ` [PATCH v3 66/76] nfs42: use a specific kmem_cache to allocate nfs4_xattr_entry Muchun Song
2021-09-14  7:29 ` [PATCH v3 67/76] mm: dcache: use kmem_cache_alloc_lru() to allocate dentry Muchun Song
2021-09-14  7:29 ` [PATCH v3 68/76] xarray: use kmem_cache_alloc_lru to allocate xa_node Muchun Song
2021-09-14  7:29 ` [PATCH v3 69/76] mm: workingset: use xas_set_lru() to pass shadow_nodes Muchun Song
2021-09-14  7:29 ` [PATCH v3 70/76] mm: list_lru: allocate list_lru_one only when needed Muchun Song
2021-09-14  7:29 ` [PATCH v3 71/76] mm: list_lru: rename memcg_drain_all_list_lrus to memcg_reparent_list_lrus Muchun Song
2021-09-14  7:29 ` [PATCH v3 72/76] mm: list_lru: replace linear array with xarray Muchun Song
2021-09-14  7:29 ` [PATCH v3 73/76] mm: memcontrol: reuse memory cgroup ID for kmem ID Muchun Song
2021-09-14  7:29 ` [PATCH v3 74/76] mm: memcontrol: fix cannot alloc the maximum memcg ID Muchun Song
2021-09-14  7:29 ` [PATCH v3 75/76] mm: list_lru: rename list_lru_per_memcg to list_lru_memcg Muchun Song
2021-09-14  7:29 ` [PATCH v3 76/76] mm: memcontrol: rename memcg_cache_id to memcg_kmem_id Muchun Song
2021-09-14 20:22 ` [PATCH v3 00/76] Optimize list lru memory consumption Theodore Ts'o
2021-09-15  7:30   ` Muchun Song
2021-09-15  7:30     ` Muchun Song
2021-09-18  6:56 ` Kari Argillander
2021-09-18  7:59   ` Muchun Song
2021-09-18  7:59     ` Muchun Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210914072938.6440-9-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexs@kernel.org \
    --cc=anna.schumaker@netapp.com \
    --cc=david@fromorbit.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=fam.zheng@bytedance.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=mhocko@kernel.org \
    --cc=richard.weiyang@gmail.com \
    --cc=shakeelb@google.com \
    --cc=shy828301@gmail.com \
    --cc=smuchun@gmail.com \
    --cc=trond.myklebust@hammerspace.com \
    --cc=vdavydov.dev@gmail.com \
    --cc=willy@infradead.org \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.