All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, cl@linux.com, guro@fb.com,
	hannes@cmpxchg.org, linux-mm@kvack.org, mhocko@kernel.org,
	mm-commits@vger.kernel.org, naresh.kamboju@linaro.org,
	shakeelb@google.com, tj@kernel.org,
	torvalds@linux-foundation.org, vbabka@suse.cz
Subject: [patch 078/163] mm: memcg/slab: use a single set of kmem_caches for all allocations
Date: Thu, 06 Aug 2020 23:21:27 -0700	[thread overview]
Message-ID: <20200807062127.V3pcUyOtA%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200806231643.a2711a608dd0f18bff2caf2b@linux-foundation.org>

From: Roman Gushchin <guro@fb.com>
Subject: mm: memcg/slab: use a single set of kmem_caches for all allocations

Instead of having two sets of kmem_caches: one for system-wide and
non-accounted allocations and the second one shared by all accounted
allocations, we can use just one.

The idea is simple: space for obj_cgroup metadata can be allocated on
demand and filled only for accounted allocations.

It allows to remove a bunch of code which is required to handle kmem_cache
clones for accounted allocations.  There is no more need to create them,
accumulate statistics, propagate attributes, etc.  It's a quite
significant simplification.

Also, because the total number of slab_caches is reduced almost twice (not
all kmem_caches have a memcg clone), some additional memory savings are
expected.  On my devvm it additionally saves about 3.5% of slab memory.

[guro@fb.com: fix build on MIPS]
  Link: http://lkml.kernel.org/r/20200717214810.3733082-1-guro@fb.com
Link: http://lkml.kernel.org/r/20200623174037.3951353-18-guro@fb.com
Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 include/linux/slab.h     |    2 
 include/linux/slab_def.h |    3 
 include/linux/slub_def.h |   10 -
 mm/memcontrol.c          |   25 +++-
 mm/slab.c                |   41 ------
 mm/slab.h                |  196 ++++++-------------------------
 mm/slab_common.c         |  230 -------------------------------------
 mm/slub.c                |  163 --------------------------
 8 files changed, 79 insertions(+), 591 deletions(-)

--- a/include/linux/slab_def.h~mm-memcg-slab-use-a-single-set-of-kmem_caches-for-all-allocations
+++ a/include/linux/slab_def.h
@@ -72,9 +72,6 @@ struct kmem_cache {
 	int obj_offset;
 #endif /* CONFIG_DEBUG_SLAB */
 
-#ifdef CONFIG_MEMCG
-	struct memcg_cache_params memcg_params;
-#endif
 #ifdef CONFIG_KASAN
 	struct kasan_cache kasan_info;
 #endif
--- a/include/linux/slab.h~mm-memcg-slab-use-a-single-set-of-kmem_caches-for-all-allocations
+++ a/include/linux/slab.h
@@ -155,8 +155,6 @@ struct kmem_cache *kmem_cache_create_use
 void kmem_cache_destroy(struct kmem_cache *);
 int kmem_cache_shrink(struct kmem_cache *);
 
-void memcg_create_kmem_cache(struct kmem_cache *cachep);
-
 /*
  * Please use this macro to create slab caches. Simply specify the
  * name of the structure and maybe some flags that are listed above.
--- a/include/linux/slub_def.h~mm-memcg-slab-use-a-single-set-of-kmem_caches-for-all-allocations
+++ a/include/linux/slub_def.h
@@ -108,17 +108,7 @@ struct kmem_cache {
 	struct list_head list;	/* List of slab caches */
 #ifdef CONFIG_SYSFS
 	struct kobject kobj;	/* For sysfs */
-	struct work_struct kobj_remove_work;
 #endif
-#ifdef CONFIG_MEMCG
-	struct memcg_cache_params memcg_params;
-	/* For propagation, maximum size of a stored attr */
-	unsigned int max_attr_size;
-#ifdef CONFIG_SYSFS
-	struct kset *memcg_kset;
-#endif
-#endif
-
 #ifdef CONFIG_SLAB_FREELIST_HARDENED
 	unsigned long random;
 #endif
--- a/mm/memcontrol.c~mm-memcg-slab-use-a-single-set-of-kmem_caches-for-all-allocations
+++ a/mm/memcontrol.c
@@ -2800,6 +2800,26 @@ static void commit_charge(struct page *p
 }
 
 #ifdef CONFIG_MEMCG_KMEM
+int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
+				 gfp_t gfp)
+{
+	unsigned int objects = objs_per_slab_page(s, page);
+	void *vec;
+
+	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
+			   page_to_nid(page));
+	if (!vec)
+		return -ENOMEM;
+
+	if (cmpxchg(&page->obj_cgroups, NULL,
+		    (struct obj_cgroup **) ((unsigned long)vec | 0x1UL)))
+		kfree(vec);
+	else
+		kmemleak_not_leak(vec);
+
+	return 0;
+}
+
 /*
  * Returns a pointer to the memory cgroup to which the kernel object is charged.
  *
@@ -2826,7 +2846,10 @@ struct mem_cgroup *mem_cgroup_from_obj(v
 
 		off = obj_to_index(page->slab_cache, page, p);
 		objcg = page_obj_cgroups(page)[off];
-		return obj_cgroup_memcg(objcg);
+		if (objcg)
+			return obj_cgroup_memcg(objcg);
+
+		return NULL;
 	}
 
 	/* All other pages use page->mem_cgroup */
--- a/mm/slab.c~mm-memcg-slab-use-a-single-set-of-kmem_caches-for-all-allocations
+++ a/mm/slab.c
@@ -1379,11 +1379,7 @@ static struct page *kmem_getpages(struct
 		return NULL;
 	}
 
-	if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
-		__free_pages(page, cachep->gfporder);
-		return NULL;
-	}
-
+	charge_slab_page(page, flags, cachep->gfporder, cachep);
 	__SetPageSlab(page);
 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
 	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -3799,8 +3795,8 @@ fail:
 }
 
 /* Always called with the slab_mutex held */
-static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
-				int batchcount, int shared, gfp_t gfp)
+static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+			    int batchcount, int shared, gfp_t gfp)
 {
 	struct array_cache __percpu *cpu_cache, *prev;
 	int cpu;
@@ -3845,30 +3841,6 @@ setup_node:
 	return setup_kmem_cache_nodes(cachep, gfp);
 }
 
-static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
-				int batchcount, int shared, gfp_t gfp)
-{
-	int ret;
-	struct kmem_cache *c;
-
-	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
-
-	if (slab_state < FULL)
-		return ret;
-
-	if ((ret < 0) || !is_root_cache(cachep))
-		return ret;
-
-	lockdep_assert_held(&slab_mutex);
-	c = memcg_cache(cachep);
-	if (c) {
-		/* return value determined by the root cache only */
-		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
-	}
-
-	return ret;
-}
-
 /* Called with slab_mutex held always */
 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
 {
@@ -3881,13 +3853,6 @@ static int enable_cpucache(struct kmem_c
 	if (err)
 		goto end;
 
-	if (!is_root_cache(cachep)) {
-		struct kmem_cache *root = memcg_root_cache(cachep);
-		limit = root->limit;
-		shared = root->shared;
-		batchcount = root->batchcount;
-	}
-
 	if (limit && shared && batchcount)
 		goto skip_setup;
 	/*
--- a/mm/slab_common.c~mm-memcg-slab-use-a-single-set-of-kmem_caches-for-all-allocations
+++ a/mm/slab_common.c
@@ -130,36 +130,6 @@ int __kmem_cache_alloc_bulk(struct kmem_
 	return i;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
-static void memcg_kmem_cache_create_func(struct work_struct *work)
-{
-	struct kmem_cache *cachep = container_of(work, struct kmem_cache,
-						 memcg_params.work);
-	memcg_create_kmem_cache(cachep);
-}
-
-void slab_init_memcg_params(struct kmem_cache *s)
-{
-	s->memcg_params.root_cache = NULL;
-	s->memcg_params.memcg_cache = NULL;
-	INIT_WORK(&s->memcg_params.work, memcg_kmem_cache_create_func);
-}
-
-static void init_memcg_params(struct kmem_cache *s,
-			      struct kmem_cache *root_cache)
-{
-	if (root_cache)
-		s->memcg_params.root_cache = root_cache;
-	else
-		slab_init_memcg_params(s);
-}
-#else
-static inline void init_memcg_params(struct kmem_cache *s,
-				     struct kmem_cache *root_cache)
-{
-}
-#endif /* CONFIG_MEMCG_KMEM */
-
 /*
  * Figure out what the alignment of the objects will be given a set of
  * flags, a user specified alignment and the size of the objects.
@@ -197,9 +167,6 @@ int slab_unmergeable(struct kmem_cache *
 	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
 		return 1;
 
-	if (!is_root_cache(s))
-		return 1;
-
 	if (s->ctor)
 		return 1;
 
@@ -286,7 +253,6 @@ static struct kmem_cache *create_cache(c
 	s->useroffset = useroffset;
 	s->usersize = usersize;
 
-	init_memcg_params(s, root_cache);
 	err = __kmem_cache_create(s, flags);
 	if (err)
 		goto out_free_cache;
@@ -344,7 +310,6 @@ kmem_cache_create_usercopy(const char *n
 
 	get_online_cpus();
 	get_online_mems();
-	memcg_get_cache_ids();
 
 	mutex_lock(&slab_mutex);
 
@@ -394,7 +359,6 @@ kmem_cache_create_usercopy(const char *n
 out_unlock:
 	mutex_unlock(&slab_mutex);
 
-	memcg_put_cache_ids();
 	put_online_mems();
 	put_online_cpus();
 
@@ -507,87 +471,6 @@ static int shutdown_cache(struct kmem_ca
 	return 0;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
-/*
- * memcg_create_kmem_cache - Create a cache for non-root memory cgroups.
- * @root_cache: The parent of the new cache.
- *
- * This function attempts to create a kmem cache that will serve allocation
- * requests going all non-root memory cgroups to @root_cache. The new cache
- * inherits properties from its parent.
- */
-void memcg_create_kmem_cache(struct kmem_cache *root_cache)
-{
-	struct kmem_cache *s = NULL;
-	char *cache_name;
-
-	get_online_cpus();
-	get_online_mems();
-
-	mutex_lock(&slab_mutex);
-
-	if (root_cache->memcg_params.memcg_cache)
-		goto out_unlock;
-
-	cache_name = kasprintf(GFP_KERNEL, "%s-memcg", root_cache->name);
-	if (!cache_name)
-		goto out_unlock;
-
-	s = create_cache(cache_name, root_cache->object_size,
-			 root_cache->align,
-			 root_cache->flags & CACHE_CREATE_MASK,
-			 root_cache->useroffset, root_cache->usersize,
-			 root_cache->ctor, root_cache);
-	/*
-	 * If we could not create a memcg cache, do not complain, because
-	 * that's not critical at all as we can always proceed with the root
-	 * cache.
-	 */
-	if (IS_ERR(s)) {
-		kfree(cache_name);
-		goto out_unlock;
-	}
-
-	/*
-	 * Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a
-	 * barrier here to ensure nobody will see the kmem_cache partially
-	 * initialized.
-	 */
-	smp_wmb();
-	root_cache->memcg_params.memcg_cache = s;
-
-out_unlock:
-	mutex_unlock(&slab_mutex);
-
-	put_online_mems();
-	put_online_cpus();
-}
-
-static int shutdown_memcg_caches(struct kmem_cache *s)
-{
-	BUG_ON(!is_root_cache(s));
-
-	if (s->memcg_params.memcg_cache)
-		WARN_ON(shutdown_cache(s->memcg_params.memcg_cache));
-
-	return 0;
-}
-
-static void cancel_memcg_cache_creation(struct kmem_cache *s)
-{
-	cancel_work_sync(&s->memcg_params.work);
-}
-#else
-static inline int shutdown_memcg_caches(struct kmem_cache *s)
-{
-	return 0;
-}
-
-static inline void cancel_memcg_cache_creation(struct kmem_cache *s)
-{
-}
-#endif /* CONFIG_MEMCG_KMEM */
-
 void slab_kmem_cache_release(struct kmem_cache *s)
 {
 	__kmem_cache_release(s);
@@ -602,8 +485,6 @@ void kmem_cache_destroy(struct kmem_cach
 	if (unlikely(!s))
 		return;
 
-	cancel_memcg_cache_creation(s);
-
 	get_online_cpus();
 	get_online_mems();
 
@@ -613,10 +494,7 @@ void kmem_cache_destroy(struct kmem_cach
 	if (s->refcount)
 		goto out_unlock;
 
-	err = shutdown_memcg_caches(s);
-	if (!err)
-		err = shutdown_cache(s);
-
+	err = shutdown_cache(s);
 	if (err) {
 		pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
 		       s->name);
@@ -653,33 +531,6 @@ int kmem_cache_shrink(struct kmem_cache
 }
 EXPORT_SYMBOL(kmem_cache_shrink);
 
-/**
- * kmem_cache_shrink_all - shrink root and memcg caches
- * @s: The cache pointer
- */
-void kmem_cache_shrink_all(struct kmem_cache *s)
-{
-	struct kmem_cache *c;
-
-	if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) {
-		kmem_cache_shrink(s);
-		return;
-	}
-
-	get_online_cpus();
-	get_online_mems();
-	kasan_cache_shrink(s);
-	__kmem_cache_shrink(s);
-
-	c = memcg_cache(s);
-	if (c) {
-		kasan_cache_shrink(c);
-		__kmem_cache_shrink(c);
-	}
-	put_online_mems();
-	put_online_cpus();
-}
-
 bool slab_is_available(void)
 {
 	return slab_state >= UP;
@@ -708,8 +559,6 @@ void __init create_boot_cache(struct kme
 	s->useroffset = useroffset;
 	s->usersize = usersize;
 
-	slab_init_memcg_params(s);
-
 	err = __kmem_cache_create(s, flags);
 
 	if (err)
@@ -1098,25 +947,6 @@ void slab_stop(struct seq_file *m, void
 	mutex_unlock(&slab_mutex);
 }
 
-static void
-memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
-{
-	struct kmem_cache *c;
-	struct slabinfo sinfo;
-
-	c = memcg_cache(s);
-	if (c) {
-		memset(&sinfo, 0, sizeof(sinfo));
-		get_slabinfo(c, &sinfo);
-
-		info->active_slabs += sinfo.active_slabs;
-		info->num_slabs += sinfo.num_slabs;
-		info->shared_avail += sinfo.shared_avail;
-		info->active_objs += sinfo.active_objs;
-		info->num_objs += sinfo.num_objs;
-	}
-}
-
 static void cache_show(struct kmem_cache *s, struct seq_file *m)
 {
 	struct slabinfo sinfo;
@@ -1124,10 +954,8 @@ static void cache_show(struct kmem_cache
 	memset(&sinfo, 0, sizeof(sinfo));
 	get_slabinfo(s, &sinfo);
 
-	memcg_accumulate_slabinfo(s, &sinfo);
-
 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
-		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
+		   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
 		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
 
 	seq_printf(m, " : tunables %4u %4u %4u",
@@ -1144,8 +972,7 @@ static int slab_show(struct seq_file *m,
 
 	if (p == slab_caches.next)
 		print_slabinfo_header(m);
-	if (is_root_cache(s))
-		cache_show(s, m);
+	cache_show(s, m);
 	return 0;
 }
 
@@ -1170,13 +997,13 @@ void dump_unreclaimable_slab(void)
 	pr_info("Name                      Used          Total\n");
 
 	list_for_each_entry_safe(s, s2, &slab_caches, list) {
-		if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT))
+		if (s->flags & SLAB_RECLAIM_ACCOUNT)
 			continue;
 
 		get_slabinfo(s, &sinfo);
 
 		if (sinfo.num_objs > 0)
-			pr_info("%-17s %10luKB %10luKB\n", cache_name(s),
+			pr_info("%-17s %10luKB %10luKB\n", s->name,
 				(sinfo.active_objs * s->size) / 1024,
 				(sinfo.num_objs * s->size) / 1024);
 	}
@@ -1235,53 +1062,6 @@ static int __init slab_proc_init(void)
 }
 module_init(slab_proc_init);
 
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM)
-/*
- * Display information about kmem caches that have memcg cache.
- */
-static int memcg_slabinfo_show(struct seq_file *m, void *unused)
-{
-	struct kmem_cache *s, *c;
-	struct slabinfo sinfo;
-
-	mutex_lock(&slab_mutex);
-	seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>");
-	seq_puts(m, " <active_slabs> <num_slabs>\n");
-	list_for_each_entry(s, &slab_caches, list) {
-		/*
-		 * Skip kmem caches that don't have the memcg cache.
-		 */
-		if (!s->memcg_params.memcg_cache)
-			continue;
-
-		memset(&sinfo, 0, sizeof(sinfo));
-		get_slabinfo(s, &sinfo);
-		seq_printf(m, "%-17s root       %6lu %6lu %6lu %6lu\n",
-			   cache_name(s), sinfo.active_objs, sinfo.num_objs,
-			   sinfo.active_slabs, sinfo.num_slabs);
-
-		c = s->memcg_params.memcg_cache;
-		memset(&sinfo, 0, sizeof(sinfo));
-		get_slabinfo(c, &sinfo);
-		seq_printf(m, "%-17s %4d %6lu %6lu %6lu %6lu\n",
-			   cache_name(c), root_mem_cgroup->css.id,
-			   sinfo.active_objs, sinfo.num_objs,
-			   sinfo.active_slabs, sinfo.num_slabs);
-	}
-	mutex_unlock(&slab_mutex);
-	return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(memcg_slabinfo);
-
-static int __init memcg_slabinfo_init(void)
-{
-	debugfs_create_file("memcg_slabinfo", S_IFREG | S_IRUGO,
-			    NULL, NULL, &memcg_slabinfo_fops);
-	return 0;
-}
-
-late_initcall(memcg_slabinfo_init);
-#endif /* CONFIG_DEBUG_FS && CONFIG_MEMCG_KMEM */
 #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
 
 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
--- a/mm/slab.h~mm-memcg-slab-use-a-single-set-of-kmem_caches-for-all-allocations
+++ a/mm/slab.h
@@ -30,28 +30,6 @@ struct kmem_cache {
 	struct list_head list;	/* List of all slab caches on the system */
 };
 
-#else /* !CONFIG_SLOB */
-
-/*
- * This is the main placeholder for memcg-related information in kmem caches.
- * Both the root cache and the child cache will have it. Some fields are used
- * in both cases, other are specific to root caches.
- *
- * @root_cache:	Common to root and child caches.  NULL for root, pointer to
- *		the root cache for children.
- *
- * The following fields are specific to root caches.
- *
- * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory
- *		cgroups.
- * @work: work struct used to create the non-root cache.
- */
-struct memcg_cache_params {
-	struct kmem_cache *root_cache;
-
-	struct kmem_cache *memcg_cache;
-	struct work_struct work;
-};
 #endif /* CONFIG_SLOB */
 
 #ifdef CONFIG_SLAB
@@ -196,7 +174,6 @@ int __kmem_cache_shutdown(struct kmem_ca
 void __kmem_cache_release(struct kmem_cache *);
 int __kmem_cache_shrink(struct kmem_cache *);
 void slab_kmem_cache_release(struct kmem_cache *);
-void kmem_cache_shrink_all(struct kmem_cache *s);
 
 struct seq_file;
 struct file;
@@ -263,43 +240,6 @@ static inline bool kmem_cache_debug_flag
 }
 
 #ifdef CONFIG_MEMCG_KMEM
-static inline bool is_root_cache(struct kmem_cache *s)
-{
-	return !s->memcg_params.root_cache;
-}
-
-static inline bool slab_equal_or_root(struct kmem_cache *s,
-				      struct kmem_cache *p)
-{
-	return p == s || p == s->memcg_params.root_cache;
-}
-
-/*
- * We use suffixes to the name in memcg because we can't have caches
- * created in the system with the same name. But when we print them
- * locally, better refer to them with the base name
- */
-static inline const char *cache_name(struct kmem_cache *s)
-{
-	if (!is_root_cache(s))
-		s = s->memcg_params.root_cache;
-	return s->name;
-}
-
-static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
-{
-	if (is_root_cache(s))
-		return s;
-	return s->memcg_params.root_cache;
-}
-
-static inline struct kmem_cache *memcg_cache(struct kmem_cache *s)
-{
-	if (is_root_cache(s))
-		return s->memcg_params.memcg_cache;
-	return NULL;
-}
-
 static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
 {
 	/*
@@ -317,21 +257,8 @@ static inline bool page_has_obj_cgroups(
 	return ((unsigned long)page->obj_cgroups & 0x1UL);
 }
 
-static inline int memcg_alloc_page_obj_cgroups(struct page *page,
-					       struct kmem_cache *s, gfp_t gfp)
-{
-	unsigned int objects = objs_per_slab_page(s, page);
-	void *vec;
-
-	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
-			   page_to_nid(page));
-	if (!vec)
-		return -ENOMEM;
-
-	kmemleak_not_leak(vec);
-	page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL);
-	return 0;
-}
+int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
+				 gfp_t gfp);
 
 static inline void memcg_free_page_obj_cgroups(struct page *page)
 {
@@ -348,38 +275,25 @@ static inline size_t obj_full_size(struc
 	return s->size + sizeof(struct obj_cgroup *);
 }
 
-static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
-						struct obj_cgroup **objcgp,
-						size_t objects, gfp_t flags)
+static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+							   size_t objects,
+							   gfp_t flags)
 {
-	struct kmem_cache *cachep;
 	struct obj_cgroup *objcg;
 
 	if (memcg_kmem_bypass())
-		return s;
-
-	cachep = READ_ONCE(s->memcg_params.memcg_cache);
-	if (unlikely(!cachep)) {
-		/*
-		 * If memcg cache does not exist yet, we schedule it's
-		 * asynchronous creation and let the current allocation
-		 * go through with the root cache.
-		 */
-		queue_work(system_wq, &s->memcg_params.work);
-		return s;
-	}
+		return NULL;
 
 	objcg = get_obj_cgroup_from_current();
 	if (!objcg)
-		return s;
+		return NULL;
 
 	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
 		obj_cgroup_put(objcg);
-		cachep = NULL;
+		return NULL;
 	}
 
-	*objcgp = objcg;
-	return cachep;
+	return objcg;
 }
 
 static inline void mod_objcg_state(struct obj_cgroup *objcg,
@@ -398,15 +312,27 @@ static inline void mod_objcg_state(struc
 
 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 					      struct obj_cgroup *objcg,
-					      size_t size, void **p)
+					      gfp_t flags, size_t size,
+					      void **p)
 {
 	struct page *page;
 	unsigned long off;
 	size_t i;
 
+	if (!objcg)
+		return;
+
+	flags &= ~__GFP_ACCOUNT;
 	for (i = 0; i < size; i++) {
 		if (likely(p[i])) {
 			page = virt_to_head_page(p[i]);
+
+			if (!page_has_obj_cgroups(page) &&
+			    memcg_alloc_page_obj_cgroups(page, s, flags)) {
+				obj_cgroup_uncharge(objcg, obj_full_size(s));
+				continue;
+			}
+
 			off = obj_to_index(s, page, p[i]);
 			obj_cgroup_get(objcg);
 			page_obj_cgroups(page)[off] = objcg;
@@ -425,13 +351,19 @@ static inline void memcg_slab_free_hook(
 	struct obj_cgroup *objcg;
 	unsigned int off;
 
-	if (!memcg_kmem_enabled() || is_root_cache(s))
+	if (!memcg_kmem_enabled())
+		return;
+
+	if (!page_has_obj_cgroups(page))
 		return;
 
 	off = obj_to_index(s, page, p);
 	objcg = page_obj_cgroups(page)[off];
 	page_obj_cgroups(page)[off] = NULL;
 
+	if (!objcg)
+		return;
+
 	obj_cgroup_uncharge(objcg, obj_full_size(s));
 	mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
 			-obj_full_size(s));
@@ -439,35 +371,7 @@ static inline void memcg_slab_free_hook(
 	obj_cgroup_put(objcg);
 }
 
-extern void slab_init_memcg_params(struct kmem_cache *);
-
 #else /* CONFIG_MEMCG_KMEM */
-static inline bool is_root_cache(struct kmem_cache *s)
-{
-	return true;
-}
-
-static inline bool slab_equal_or_root(struct kmem_cache *s,
-				      struct kmem_cache *p)
-{
-	return s == p;
-}
-
-static inline const char *cache_name(struct kmem_cache *s)
-{
-	return s->name;
-}
-
-static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
-{
-	return s;
-}
-
-static inline struct kmem_cache *memcg_cache(struct kmem_cache *s)
-{
-	return NULL;
-}
-
 static inline bool page_has_obj_cgroups(struct page *page)
 {
 	return false;
@@ -488,16 +392,17 @@ static inline void memcg_free_page_obj_c
 {
 }
 
-static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
-						struct obj_cgroup **objcgp,
-						size_t objects, gfp_t flags)
+static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+							   size_t objects,
+							   gfp_t flags)
 {
 	return NULL;
 }
 
 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 					      struct obj_cgroup *objcg,
-					      size_t size, void **p)
+					      gfp_t flags, size_t size,
+					      void **p)
 {
 }
 
@@ -505,11 +410,6 @@ static inline void memcg_slab_free_hook(
 					void *p)
 {
 }
-
-static inline void slab_init_memcg_params(struct kmem_cache *s)
-{
-}
-
 #endif /* CONFIG_MEMCG_KMEM */
 
 static inline struct kmem_cache *virt_to_cache(const void *obj)
@@ -523,27 +423,18 @@ static inline struct kmem_cache *virt_to
 	return page->slab_cache;
 }
 
-static __always_inline int charge_slab_page(struct page *page,
-					    gfp_t gfp, int order,
-					    struct kmem_cache *s)
-{
-	if (memcg_kmem_enabled() && !is_root_cache(s)) {
-		int ret;
-
-		ret = memcg_alloc_page_obj_cgroups(page, s, gfp);
-		if (ret)
-			return ret;
-	}
-
+static __always_inline void charge_slab_page(struct page *page,
+					     gfp_t gfp, int order,
+					     struct kmem_cache *s)
+{
 	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 			    PAGE_SIZE << order);
-	return 0;
 }
 
 static __always_inline void uncharge_slab_page(struct page *page, int order,
 					       struct kmem_cache *s)
 {
-	if (memcg_kmem_enabled() && !is_root_cache(s))
+	if (memcg_kmem_enabled())
 		memcg_free_page_obj_cgroups(page);
 
 	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
@@ -555,12 +446,11 @@ static inline struct kmem_cache *cache_f
 	struct kmem_cache *cachep;
 
 	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
-	    !memcg_kmem_enabled() &&
 	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
 		return s;
 
 	cachep = virt_to_cache(x);
-	if (WARN(cachep && !slab_equal_or_root(cachep, s),
+	if (WARN(cachep && cachep != s,
 		  "%s: Wrong slab cache. %s but object is from %s\n",
 		  __func__, s->name, cachep->name))
 		print_tracking(cachep, x);
@@ -613,7 +503,7 @@ static inline struct kmem_cache *slab_pr
 
 	if (memcg_kmem_enabled() &&
 	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
-		return memcg_slab_pre_alloc_hook(s, objcgp, size, flags);
+		*objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
 
 	return s;
 }
@@ -632,8 +522,8 @@ static inline void slab_post_alloc_hook(
 					 s->flags, flags);
 	}
 
-	if (memcg_kmem_enabled() && !is_root_cache(s))
-		memcg_slab_post_alloc_hook(s, objcg, size, p);
+	if (memcg_kmem_enabled())
+		memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
 }
 
 #ifndef CONFIG_SLOB
--- a/mm/slub.c~mm-memcg-slab-use-a-single-set-of-kmem_caches-for-all-allocations
+++ a/mm/slub.c
@@ -218,14 +218,10 @@ enum track_item { TRACK_ALLOC, TRACK_FRE
 #ifdef CONFIG_SYSFS
 static int sysfs_slab_add(struct kmem_cache *);
 static int sysfs_slab_alias(struct kmem_cache *, const char *);
-static void memcg_propagate_slab_attrs(struct kmem_cache *s);
-static void sysfs_slab_remove(struct kmem_cache *s);
 #else
 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
 							{ return 0; }
-static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
-static inline void sysfs_slab_remove(struct kmem_cache *s) { }
 #endif
 
 static inline void stat(const struct kmem_cache *s, enum stat_item si)
@@ -1624,10 +1620,8 @@ static inline struct page *alloc_slab_pa
 	else
 		page = __alloc_pages_node(node, flags, order);
 
-	if (page && charge_slab_page(page, flags, order, s)) {
-		__free_pages(page, order);
-		page = NULL;
-	}
+	if (page)
+		charge_slab_page(page, flags, order, s);
 
 	return page;
 }
@@ -3920,7 +3914,6 @@ int __kmem_cache_shutdown(struct kmem_ca
 		if (n->nr_partial || slabs_node(s, node))
 			return 1;
 	}
-	sysfs_slab_remove(s);
 	return 0;
 }
 
@@ -4358,7 +4351,6 @@ static struct kmem_cache * __init bootst
 			p->slab_cache = s;
 #endif
 	}
-	slab_init_memcg_params(s);
 	list_add(&s->list, &slab_caches);
 	return s;
 }
@@ -4414,7 +4406,7 @@ struct kmem_cache *
 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 		   slab_flags_t flags, void (*ctor)(void *))
 {
-	struct kmem_cache *s, *c;
+	struct kmem_cache *s;
 
 	s = find_mergeable(size, align, flags, name, ctor);
 	if (s) {
@@ -4427,12 +4419,6 @@ __kmem_cache_alias(const char *name, uns
 		s->object_size = max(s->object_size, size);
 		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
 
-		c = memcg_cache(s);
-		if (c) {
-			c->object_size = s->object_size;
-			c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
-		}
-
 		if (sysfs_slab_alias(s, name)) {
 			s->refcount--;
 			s = NULL;
@@ -4454,7 +4440,6 @@ int __kmem_cache_create(struct kmem_cach
 	if (slab_state <= UP)
 		return 0;
 
-	memcg_propagate_slab_attrs(s);
 	err = sysfs_slab_add(s);
 	if (err)
 		__kmem_cache_release(s);
@@ -5312,7 +5297,7 @@ static ssize_t shrink_store(struct kmem_
 			const char *buf, size_t length)
 {
 	if (buf[0] == '1')
-		kmem_cache_shrink_all(s);
+		kmem_cache_shrink(s);
 	else
 		return -EINVAL;
 	return length;
@@ -5536,99 +5521,9 @@ static ssize_t slab_attr_store(struct ko
 		return -EIO;
 
 	err = attribute->store(s, buf, len);
-#ifdef CONFIG_MEMCG
-	if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
-		struct kmem_cache *c;
-
-		mutex_lock(&slab_mutex);
-		if (s->max_attr_size < len)
-			s->max_attr_size = len;
-
-		/*
-		 * This is a best effort propagation, so this function's return
-		 * value will be determined by the parent cache only. This is
-		 * basically because not all attributes will have a well
-		 * defined semantics for rollbacks - most of the actions will
-		 * have permanent effects.
-		 *
-		 * Returning the error value of any of the children that fail
-		 * is not 100 % defined, in the sense that users seeing the
-		 * error code won't be able to know anything about the state of
-		 * the cache.
-		 *
-		 * Only returning the error code for the parent cache at least
-		 * has well defined semantics. The cache being written to
-		 * directly either failed or succeeded, in which case we loop
-		 * through the descendants with best-effort propagation.
-		 */
-		c = memcg_cache(s);
-		if (c)
-			attribute->store(c, buf, len);
-		mutex_unlock(&slab_mutex);
-	}
-#endif
 	return err;
 }
 
-static void memcg_propagate_slab_attrs(struct kmem_cache *s)
-{
-#ifdef CONFIG_MEMCG
-	int i;
-	char *buffer = NULL;
-	struct kmem_cache *root_cache;
-
-	if (is_root_cache(s))
-		return;
-
-	root_cache = s->memcg_params.root_cache;
-
-	/*
-	 * This mean this cache had no attribute written. Therefore, no point
-	 * in copying default values around
-	 */
-	if (!root_cache->max_attr_size)
-		return;
-
-	for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
-		char mbuf[64];
-		char *buf;
-		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
-		ssize_t len;
-
-		if (!attr || !attr->store || !attr->show)
-			continue;
-
-		/*
-		 * It is really bad that we have to allocate here, so we will
-		 * do it only as a fallback. If we actually allocate, though,
-		 * we can just use the allocated buffer until the end.
-		 *
-		 * Most of the slub attributes will tend to be very small in
-		 * size, but sysfs allows buffers up to a page, so they can
-		 * theoretically happen.
-		 */
-		if (buffer)
-			buf = buffer;
-		else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) &&
-			 !IS_ENABLED(CONFIG_SLUB_STATS))
-			buf = mbuf;
-		else {
-			buffer = (char *) get_zeroed_page(GFP_KERNEL);
-			if (WARN_ON(!buffer))
-				continue;
-			buf = buffer;
-		}
-
-		len = attr->show(root_cache, buf);
-		if (len > 0)
-			attr->store(s, buf, len);
-	}
-
-	if (buffer)
-		free_page((unsigned long)buffer);
-#endif	/* CONFIG_MEMCG */
-}
-
 static void kmem_cache_release(struct kobject *k)
 {
 	slab_kmem_cache_release(to_slab(k));
@@ -5648,10 +5543,6 @@ static struct kset *slab_kset;
 
 static inline struct kset *cache_kset(struct kmem_cache *s)
 {
-#ifdef CONFIG_MEMCG
-	if (!is_root_cache(s))
-		return s->memcg_params.root_cache->memcg_kset;
-#endif
 	return slab_kset;
 }
 
@@ -5694,27 +5585,6 @@ static char *create_unique_id(struct kme
 	return name;
 }
 
-static void sysfs_slab_remove_workfn(struct work_struct *work)
-{
-	struct kmem_cache *s =
-		container_of(work, struct kmem_cache, kobj_remove_work);
-
-	if (!s->kobj.state_in_sysfs)
-		/*
-		 * For a memcg cache, this may be called during
-		 * deactivation and again on shutdown.  Remove only once.
-		 * A cache is never shut down before deactivation is
-		 * complete, so no need to worry about synchronization.
-		 */
-		goto out;
-
-#ifdef CONFIG_MEMCG
-	kset_unregister(s->memcg_kset);
-#endif
-out:
-	kobject_put(&s->kobj);
-}
-
 static int sysfs_slab_add(struct kmem_cache *s)
 {
 	int err;
@@ -5722,8 +5592,6 @@ static int sysfs_slab_add(struct kmem_ca
 	struct kset *kset = cache_kset(s);
 	int unmergeable = slab_unmergeable(s);
 
-	INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
-
 	if (!kset) {
 		kobject_init(&s->kobj, &slab_ktype);
 		return 0;
@@ -5760,16 +5628,6 @@ static int sysfs_slab_add(struct kmem_ca
 	if (err)
 		goto out_del_kobj;
 
-#ifdef CONFIG_MEMCG
-	if (is_root_cache(s) && memcg_sysfs_enabled) {
-		s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
-		if (!s->memcg_kset) {
-			err = -ENOMEM;
-			goto out_del_kobj;
-		}
-	}
-#endif
-
 	if (!unmergeable) {
 		/* Setup first alias */
 		sysfs_slab_alias(s, s->name);
@@ -5783,19 +5641,6 @@ out_del_kobj:
 	goto out;
 }
 
-static void sysfs_slab_remove(struct kmem_cache *s)
-{
-	if (slab_state < FULL)
-		/*
-		 * Sysfs has not been setup yet so no need to remove the
-		 * cache from sysfs.
-		 */
-		return;
-
-	kobject_get(&s->kobj);
-	schedule_work(&s->kobj_remove_work);
-}
-
 void sysfs_slab_unlink(struct kmem_cache *s)
 {
 	if (slab_state >= FULL)
_

  parent reply	other threads:[~2020-08-07  6:21 UTC|newest]

Thread overview: 182+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-07  6:16 incoming Andrew Morton
2020-08-07  6:17 ` [patch 001/163] mm/memory.c: avoid access flag update TLB flush for retried page fault Andrew Morton
2020-08-07 18:17   ` Linus Torvalds
2020-08-07 18:17     ` Linus Torvalds
2020-08-07 20:53     ` Yang Shi
2020-08-07 20:53       ` Yang Shi
2020-08-08  4:33       ` Linus Torvalds
2020-08-08  4:33         ` Linus Torvalds
2020-08-10 17:48         ` Yang Shi
2020-08-10 17:48           ` Yang Shi
2020-08-10 18:57           ` Linus Torvalds
2020-08-10 18:57             ` Linus Torvalds
2020-08-07  6:17 ` [patch 002/163] mm/migrate: fix migrate_pgmap_owner w/o CONFIG_MMU_NOTIFIER Andrew Morton
2020-08-07  6:17 ` [patch 003/163] mm/shuffle: don't move pages between zones and don't read garbage memmaps Andrew Morton
2020-08-07  6:17 ` [patch 004/163] mm: fix kthread_use_mm() vs TLB invalidate Andrew Morton
2020-08-07  6:17 ` [patch 005/163] kthread: remove incorrect comment in kthread_create_on_cpu() Andrew Morton
2020-08-07  6:17 ` [patch 006/163] tools/: replace HTTP links with HTTPS ones Andrew Morton
2020-08-07  6:17 ` [patch 007/163] tools/testing/selftests/cgroup/cgroup_util.c: cg_read_strcmp: fix null pointer dereference Andrew Morton
2020-08-07  6:17 ` [patch 008/163] scripts/tags.sh: collect compiled source precisely Andrew Morton
2020-08-07  6:17 ` [patch 009/163] scripts/bloat-o-meter: Support comparing library archives Andrew Morton
2020-08-07  6:17 ` [patch 010/163] scripts/decode_stacktrace.sh: skip missing symbols Andrew Morton
2020-08-07  6:17 ` [patch 011/163] scripts/decode_stacktrace.sh: guess basepath if not specified Andrew Morton
2020-08-07  6:17 ` [patch 012/163] scripts/decode_stacktrace.sh: guess path to modules Andrew Morton
2020-08-07  6:17 ` [patch 013/163] scripts/decode_stacktrace.sh: guess path to vmlinux by release name Andrew Morton
2020-08-07  6:17 ` [patch 014/163] const_structs.checkpatch: add regulator_ops Andrew Morton
2020-08-07  6:17 ` [patch 015/163] scripts/spelling.txt: add more spellings to spelling.txt Andrew Morton
2020-08-07  6:17 ` [patch 016/163] ntfs: fix ntfs_test_inode and ntfs_init_locked_inode function type Andrew Morton
2020-08-07  6:17 ` [patch 017/163] ocfs2: fix remounting needed after setfacl command Andrew Morton
2020-08-07  6:17 ` [patch 018/163] ocfs2: suballoc.h: delete a duplicated word Andrew Morton
2020-08-07  6:18 ` [patch 019/163] ocfs2: change slot number type s16 to u16 Andrew Morton
2020-08-07  6:18 ` [patch 020/163] ocfs2: replace HTTP links with HTTPS ones Andrew Morton
2020-08-07  6:18 ` [patch 021/163] ocfs2: fix unbalanced locking Andrew Morton
2020-08-07  6:18 ` [patch 022/163] mm, treewide: rename kzfree() to kfree_sensitive() Andrew Morton
2020-08-07  6:18 ` [patch 023/163] mm: ksize() should silently accept a NULL pointer Andrew Morton
2020-08-07  6:18 ` [patch 024/163] mm/slab: expand CONFIG_SLAB_FREELIST_HARDENED to include SLAB Andrew Morton
2020-08-07  6:18 ` [patch 025/163] mm/slab: add naive detection of double free Andrew Morton
2020-08-07  6:18 ` [patch 026/163] mm, slab: check GFP_SLAB_BUG_MASK before alloc_pages in kmalloc_order Andrew Morton
2020-08-07  6:18 ` [patch 027/163] mm/slab.c: update outdated kmem_list3 in a comment Andrew Morton
2020-08-07  6:18 ` [patch 028/163] mm, slub: extend slub_debug syntax for multiple blocks Andrew Morton
2020-08-07  6:18 ` [patch 029/163] mm, slub: make some slub_debug related attributes read-only Andrew Morton
2020-08-07  6:18 ` [patch 030/163] mm, slub: remove runtime allocation order changes Andrew Morton
2020-08-07  6:18 ` [patch 031/163] mm, slub: make remaining slub_debug related attributes read-only Andrew Morton
2020-08-07  6:18 ` [patch 032/163] mm, slub: make reclaim_account attribute read-only Andrew Morton
2020-08-07  6:18 ` [patch 033/163] mm, slub: introduce static key for slub_debug() Andrew Morton
2020-08-07  6:18 ` [patch 034/163] mm, slub: introduce kmem_cache_debug_flags() Andrew Morton
2020-08-07  6:18 ` [patch 035/163] mm, slub: extend checks guarded by slub_debug static key Andrew Morton
2020-08-07  6:19 ` [patch 036/163] mm, slab/slub: move and improve cache_from_obj() Andrew Morton
2020-08-07  6:19 ` [patch 037/163] mm, slab/slub: improve error reporting and overhead of cache_from_obj() Andrew Morton
2020-08-07  6:19 ` [patch 038/163] mm/slub.c: drop lockdep_assert_held() from put_map() Andrew Morton
2020-08-07  6:19 ` [patch 039/163] mm, kcsan: instrument SLAB/SLUB free with "ASSERT_EXCLUSIVE_ACCESS" Andrew Morton
2020-08-07  6:19 ` [patch 040/163] mm/debug_vm_pgtable: add tests validating arch helpers for core MM features Andrew Morton
2020-08-07  6:19 ` [patch 041/163] mm/debug_vm_pgtable: add tests validating advanced arch page table helpers Andrew Morton
2020-08-07  6:19 ` [patch 042/163] mm/debug_vm_pgtable: add debug prints for individual tests Andrew Morton
2020-08-07  6:19 ` [patch 043/163] Documentation/mm: add descriptions for arch page table helpers Andrew Morton
2020-08-07  6:19 ` [patch 044/163] mm/debug: handle page->mapping better in dump_page Andrew Morton
2020-08-07  6:19 ` [patch 045/163] mm/debug: dump compound page information on a second line Andrew Morton
2020-08-07  6:19 ` [patch 046/163] mm/debug: print head flags in dump_page Andrew Morton
2020-08-07  6:19 ` [patch 047/163] mm/debug: switch dump_page to get_kernel_nofault Andrew Morton
2020-08-07  6:19 ` [patch 048/163] mm/debug: print the inode number in dump_page Andrew Morton
2020-08-07  6:19 ` [patch 049/163] mm/debug: print hashed address of struct page Andrew Morton
2020-08-07  6:19 ` [patch 050/163] mm, dump_page: do not crash with bad compound_mapcount() Andrew Morton
2020-08-07  6:19 ` [patch 051/163] mm: filemap: clear idle flag for writes Andrew Morton
2020-08-07  6:19 ` [patch 052/163] mm: filemap: add missing FGP_ flags in kerneldoc comment for pagecache_get_page Andrew Morton
2020-08-07  6:20 ` [patch 053/163] mm/gup.c: fix the comment of return value for populate_vma_page_range() Andrew Morton
2020-08-07  6:20 ` [patch 054/163] mm/swap_slots.c: simplify alloc_swap_slot_cache() Andrew Morton
2020-08-07  6:20 ` [patch 055/163] mm/swap_slots.c: simplify enable_swap_slots_cache() Andrew Morton
2020-08-07  6:20 ` [patch 056/163] mm/swap_slots.c: remove redundant check for swap_slot_cache_initialized Andrew Morton
2020-08-07  6:20 ` [patch 057/163] mm: swap: fix kerneldoc of swap_vma_readahead() Andrew Morton
2020-08-07  6:20 ` [patch 058/163] mm/page_io.c: use blk_io_schedule() for avoiding task hung in sync io Andrew Morton
2020-08-07  6:20 ` [patch 059/163] tmpfs: per-superblock i_ino support Andrew Morton
2020-08-07  6:20 ` [patch 060/163] tmpfs: support 64-bit inums per-sb Andrew Morton
2020-08-07  6:20 ` [patch 061/163] mm: kmem: make memcg_kmem_enabled() irreversible Andrew Morton
2020-08-07  6:20 ` [patch 062/163] mm: memcg: factor out memcg- and lruvec-level changes out of __mod_lruvec_state() Andrew Morton
2020-08-07  6:20 ` [patch 063/163] mm: memcg: prepare for byte-sized vmstat items Andrew Morton
2020-08-07  6:20 ` [patch 064/163] mm: memcg: convert vmstat slab counters to bytes Andrew Morton
2020-08-07  6:20 ` [patch 065/163] mm: slub: implement SLUB version of obj_to_index() Andrew Morton
2020-08-07  6:20 ` [patch 066/163] mm: memcontrol: decouple reference counting from page accounting Andrew Morton
2020-08-07  6:20 ` [patch 067/163] mm: memcg/slab: obj_cgroup API Andrew Morton
2020-08-07  6:20 ` [patch 068/163] mm: memcg/slab: allocate obj_cgroups for non-root slab pages Andrew Morton
2020-08-07  6:20 ` [patch 069/163] mm: memcg/slab: save obj_cgroup for non-root slab objects Andrew Morton
2020-08-07  6:20 ` [patch 070/163] mm: memcg/slab: charge individual slab objects instead of pages Andrew Morton
2020-08-07  6:21 ` [patch 071/163] mm: memcg/slab: deprecate memory.kmem.slabinfo Andrew Morton
2020-08-07  6:21 ` [patch 072/163] mm: memcg/slab: move memcg_kmem_bypass() to memcontrol.h Andrew Morton
2020-08-07  6:21 ` [patch 073/163] mm: memcg/slab: use a single set of kmem_caches for all accounted allocations Andrew Morton
2020-08-07  6:21 ` [patch 074/163] mm: memcg/slab: simplify memcg cache creation Andrew Morton
2020-08-07  6:21 ` [patch 075/163] mm: memcg/slab: remove memcg_kmem_get_cache() Andrew Morton
2020-08-07  6:21 ` [patch 076/163] mm: memcg/slab: deprecate slab_root_caches Andrew Morton
2020-08-07  6:21 ` [patch 077/163] mm: memcg/slab: remove redundant check in memcg_accumulate_slabinfo() Andrew Morton
2020-08-07  6:21 ` Andrew Morton [this message]
2020-08-07  6:21 ` [patch 079/163] kselftests: cgroup: add kernel memory accounting tests Andrew Morton
2020-08-07  6:21 ` [patch 080/163] tools/cgroup: add memcg_slabinfo.py tool Andrew Morton
2020-08-07  6:21 ` [patch 081/163] mm: memcontrol: account kernel stack per node Andrew Morton
2020-08-07  6:21 ` [patch 082/163] mm: memcg/slab: remove unused argument by charge_slab_page() Andrew Morton
2020-08-07  6:21 ` [patch 083/163] mm: slab: rename (un)charge_slab_page() to (un)account_slab_page() Andrew Morton
2020-08-07  6:21 ` [patch 084/163] mm: kmem: switch to static_branch_likely() in memcg_kmem_enabled() Andrew Morton
2020-08-07  6:21 ` [patch 085/163] mm: memcontrol: avoid workload stalls when lowering memory.high Andrew Morton
2020-08-07  6:21 ` [patch 086/163] mm, memcg: reclaim more aggressively before high allocator throttling Andrew Morton
2020-08-07  6:21 ` [patch 087/163] mm, memcg: unify reclaim retry limits with page allocator Andrew Morton
2020-08-07  6:22 ` [patch 088/163] mm, memcg: avoid stale protection values when cgroup is above protection Andrew Morton
2020-08-07  6:22 ` [patch 089/163] mm, memcg: decouple e{low,min} state mutations from protection checks Andrew Morton
2020-08-07  6:22 ` [patch 090/163] memcg, oom: check memcg margin for parallel oom Andrew Morton
2020-08-07  6:22 ` [patch 091/163] mm: memcontrol: restore proper dirty throttling when memory.high changes Andrew Morton
2020-08-07  6:22 ` [patch 092/163] mm: memcontrol: don't count limit-setting reclaim as memory pressure Andrew Morton
2020-08-07  6:22 ` [patch 093/163] mm/page_counter.c: fix protection usage propagation Andrew Morton
2020-08-07  6:22 ` [patch 094/163] mm: remove redundant check non_swap_entry() Andrew Morton
2020-08-07  6:22 ` [patch 095/163] mm/memory.c: make remap_pfn_range() reject unaligned addr Andrew Morton
2020-08-07  6:22 ` [patch 096/163] mm: remove unneeded includes of <asm/pgalloc.h> Andrew Morton
2020-08-07  6:22 ` [patch 097/163] opeinrisc: switch to generic version of pte allocation Andrew Morton
2020-08-07  6:22 ` [patch 098/163] xtensa: " Andrew Morton
2020-08-07  6:22 ` [patch 099/163] asm-generic: pgalloc: provide generic pmd_alloc_one() and pmd_free_one() Andrew Morton
2020-08-07  6:22 ` [patch 100/163] asm-generic: pgalloc: provide generic pud_alloc_one() and pud_free_one() Andrew Morton
2020-08-07  6:22 ` [patch 101/163] asm-generic: pgalloc: provide generic pgd_free() Andrew Morton
2020-08-07  6:22 ` [patch 102/163] mm: move lib/ioremap.c to mm/ Andrew Morton
2020-08-07  6:22 ` [patch 103/163] mm: move p?d_alloc_track to separate header file Andrew Morton
2020-08-07  6:22 ` [patch 104/163] mm/mmap: optimize a branch judgment in ksys_mmap_pgoff() Andrew Morton
2020-08-07  6:23 ` [patch 105/163] proc/meminfo: avoid open coded reading of vm_committed_as Andrew Morton
2020-08-07  6:23 ` [patch 106/163] mm/util.c: make vm_memory_committed() more accurate Andrew Morton
2020-08-07  6:23 ` [patch 107/163] percpu_counter: add percpu_counter_sync() Andrew Morton
2020-08-07  6:23 ` [patch 108/163] mm: adjust vm_committed_as_batch according to vm overcommit policy Andrew Morton
2020-08-07  6:23 ` [patch 109/163] mm/sparsemem: enable vmem_altmap support in vmemmap_populate_basepages() Andrew Morton
2020-08-07  6:23 ` [patch 110/163] mm/sparsemem: enable vmem_altmap support in vmemmap_alloc_block_buf() Andrew Morton
2020-08-07  6:23 ` [patch 111/163] arm64/mm: enable vmem_altmap support for vmemmap mappings Andrew Morton
2020-08-07  6:23 ` [patch 112/163] mm: mmap: merge vma after call_mmap() if possible Andrew Morton
2020-08-07  6:23 ` [patch 113/163] mm: remove unnecessary wrapper function do_mmap_pgoff() Andrew Morton
2020-08-07  6:23 ` [patch 114/163] mm/mremap: it is sure to have enough space when extent meets requirement Andrew Morton
2020-08-07  6:23 ` [patch 115/163] mm/mremap: calculate extent in one place Andrew Morton
2020-08-07  6:23 ` [patch 116/163] mm/mremap: start addresses are properly aligned Andrew Morton
2020-08-07  6:23 ` [patch 117/163] selftests: add mincore() tests Andrew Morton
2020-08-07  6:23 ` [patch 118/163] mm/sparse: never partially remove memmap for early section Andrew Morton
2020-08-07  6:23 ` [patch 119/163] mm/sparse: only sub-section aligned range would be populated Andrew Morton
2020-08-07  6:24 ` [patch 120/163] mm/sparse: cleanup the code surrounding memory_present() Andrew Morton
2020-08-07  6:24 ` [patch 121/163] vmalloc: convert to XArray Andrew Morton
2020-08-07  6:24 ` [patch 122/163] mm/vmalloc: simplify merge_or_add_vmap_area() Andrew Morton
2020-08-07  6:24 ` [patch 123/163] mm/vmalloc: simplify augment_tree_propagate_check() Andrew Morton
2020-08-07  6:24 ` [patch 124/163] mm/vmalloc: switch to "propagate()" callback Andrew Morton
2020-08-07  6:24 ` [patch 125/163] mm/vmalloc: update the header about KVA rework Andrew Morton
2020-08-07  6:24 ` [patch 126/163] mm: vmalloc: remove redundant assignment in unmap_kernel_range_noflush() Andrew Morton
2020-08-07  6:24 ` [patch 127/163] mm/vmalloc.c: remove BUG() from the find_va_links() Andrew Morton
2020-08-07  6:24 ` [patch 128/163] kasan: improve and simplify Kconfig.kasan Andrew Morton
2020-08-07  6:24 ` [patch 129/163] kasan: update required compiler versions in documentation Andrew Morton
2020-08-07  6:24 ` [patch 130/163] rcu: kasan: record and print call_rcu() call stack Andrew Morton
2020-08-07  6:24 ` [patch 131/163] kasan: record and print the free track Andrew Morton
2020-08-07  6:24 ` [patch 132/163] kasan: add tests for call_rcu stack recording Andrew Morton
2020-08-07  6:24 ` [patch 133/163] kasan: update documentation for generic kasan Andrew Morton
2020-08-07  6:24 ` [patch 134/163] kasan: remove kasan_unpoison_stack_above_sp_to() Andrew Morton
2020-08-07  6:24 ` [patch 135/163] lib/test_kasan.c: fix KASAN unit tests for tag-based KASAN Andrew Morton
2020-08-07  6:24 ` [patch 136/163] kasan: don't tag stacks allocated with pagealloc Andrew Morton
2020-08-07  6:25 ` [patch 137/163] efi: provide empty efi_enter_virtual_mode implementation Andrew Morton
2020-08-07  6:25 ` [patch 138/163] kasan, arm64: don't instrument functions that enable kasan Andrew Morton
2020-08-07  6:25 ` [patch 139/163] kasan: allow enabling stack tagging for tag-based mode Andrew Morton
2020-08-07  6:25 ` [patch 140/163] kasan: adjust kasan_stack_oob " Andrew Morton
2020-08-07  6:25 ` [patch 141/163] mm, page_alloc: use unlikely() in task_capc() Andrew Morton
2020-08-07  6:25 ` [patch 142/163] page_alloc: consider highatomic reserve in watermark fast Andrew Morton
2020-08-07  6:25 ` [patch 143/163] mm, page_alloc: skip ->waternark_boost for atomic order-0 allocations Andrew Morton
2020-08-07  6:25 ` [patch 144/163] mm: remove vm_total_pages Andrew Morton
2020-08-07  6:25 ` [patch 145/163] mm/page_alloc: remove nr_free_pagecache_pages() Andrew Morton
2020-08-07  6:25 ` [patch 146/163] mm/memory_hotplug: document why shuffle_zone() is relevant Andrew Morton
2020-08-07  6:25 ` [patch 147/163] mm/shuffle: remove dynamic reconfiguration Andrew Morton
2020-08-07  6:25 ` [patch 148/163] mm/page_alloc.c: replace the definition of NR_MIGRATETYPE_BITS with PB_migratetype_bits Andrew Morton
2020-08-07  6:25 ` [patch 149/163] mm/page_alloc.c: extract the common part in pfn_to_bitidx() Andrew Morton
2020-08-07  6:25 ` [patch 150/163] mm/page_alloc.c: simplify pageblock bitmap access Andrew Morton
2020-08-07  6:25 ` [patch 151/163] mm/page_alloc.c: remove unnecessary end_bitidx for [set|get]_pfnblock_flags_mask() Andrew Morton
2020-08-07  6:25 ` [patch 152/163] mm/page_alloc: silence a KASAN false positive Andrew Morton
2020-08-07  6:25 ` [patch 153/163] mm/page_alloc: fallbacks at most has 3 elements Andrew Morton
2020-08-07  6:26 ` [patch 154/163] mm/page_alloc.c: skip setting nodemask when we are in interrupt Andrew Morton
2020-08-07  6:26 ` [patch 155/163] mm/page_alloc: fix memalloc_nocma_{save/restore} APIs Andrew Morton
2020-08-07  6:26 ` [patch 156/163] mm: thp: replace HTTP links with HTTPS ones Andrew Morton
2020-08-07  6:26 ` [patch 157/163] mm/hugetlb: fix calculation of adjust_range_if_pmd_sharing_possible Andrew Morton
2020-08-07  6:26 ` [patch 158/163] khugepaged: collapse_pte_mapped_thp() flush the right range Andrew Morton
2020-08-07  6:26 ` [patch 159/163] khugepaged: collapse_pte_mapped_thp() protect the pmd lock Andrew Morton
2020-08-07  6:26 ` [patch 160/163] khugepaged: retract_page_tables() remember to test exit Andrew Morton
2020-08-07  6:26 ` [patch 161/163] khugepaged: khugepaged_test_exit() check mmget_still_valid() Andrew Morton
2020-08-07  6:26 ` [patch 162/163] mm/vmscan.c: fix typo Andrew Morton
2020-08-07  6:26 ` [patch 163/163] mm: vmscan: consistent update to pgrefill Andrew Morton
2020-08-07 19:32 ` [nacked] mm-avoid-access-flag-update-tlb-flush-for-retried-page-fault.patch removed from -mm tree Andrew Morton
2020-08-11  4:18 ` + mempolicyh-fix-typo.patch added to " Andrew Morton
2020-08-11  4:47 ` + mm-vunmap-add-cond_resched-in-vunmap_pmd_range.patch " Andrew Morton
2020-08-11 20:33 ` + mm-memcg-charge-memcg-percpu-memory-to-the-parent-cgroup-fix.patch " Andrew Morton
2020-08-11 20:49 ` + mm-slub-fix-conversion-of-freelist_corrupted.patch " Andrew Morton
2020-08-11 21:14 ` + revert-mm-vmstatc-do-not-show-lowmem-reserve-protection-information-of-empty-zone.patch " Andrew Morton
2020-08-11 21:16 ` + romfs-support-inode-blocks-calculation.patch " Andrew Morton
2020-08-11 22:20 ` + mm-vmstat-add-events-for-thp-migration-without-split-v4.patch " Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200807062127.V3pcUyOtA%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=mm-commits@vger.kernel.org \
    --cc=naresh.kamboju@linaro.org \
    --cc=shakeelb@google.com \
    --cc=tj@kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.