All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vlastimil Babka <vbabka@suse.cz>
To: Christoph Lameter <cl@linux.com>,
	David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Pekka Enberg <penberg@kernel.org>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	Andrew Morton <akpm@linux-foundation.org>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>,
	patches@lists.linux.dev, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH 09/12] mm, slub: split out allocations from pre/post hooks
Date: Mon, 21 Nov 2022 18:11:59 +0100	[thread overview]
Message-ID: <20221121171202.22080-10-vbabka@suse.cz> (raw)
In-Reply-To: <20221121171202.22080-1-vbabka@suse.cz>

In the following patch we want to introduce CONFIG_SLUB_TINY allocation
paths that don't use the percpu slab. To prepare, refactor the
allocation functions:

Split out __slab_alloc_node() from slab_alloc_node() where the former
does the actual allocation and the latter calls the pre/post hooks.

Analogically, split out __kmem_cache_alloc_bulk() from
kmem_cache_alloc_bulk().

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slub.c | 127 +++++++++++++++++++++++++++++++++---------------------
 1 file changed, 77 insertions(+), 50 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index fd56d7cca9c2..5677db3f6d15 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2907,10 +2907,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
 }
 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
 
+#ifdef CONFIG_SLUB_DEBUG
 static noinline void
 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
 {
-#ifdef CONFIG_SLUB_DEBUG
 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
 				      DEFAULT_RATELIMIT_BURST);
 	int node;
@@ -2941,8 +2941,11 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
 			node, nr_slabs, nr_objs, nr_free);
 	}
-#endif
 }
+#else /* CONFIG_SLUB_DEBUG */
+static inline void
+slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
+#endif
 
 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
 {
@@ -3239,45 +3242,13 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 	return p;
 }
 
-/*
- * If the object has been wiped upon free, make sure it's fully initialized by
- * zeroing out freelist pointer.
- */
-static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
-						   void *obj)
-{
-	if (unlikely(slab_want_init_on_free(s)) && obj)
-		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
-			0, sizeof(void *));
-}
-
-/*
- * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
- * have the fastpath folded into their functions. So no function call
- * overhead for requests that can be satisfied on the fastpath.
- *
- * The fastpath works by first checking if the lockless freelist can be used.
- * If not then __slab_alloc is called for slow processing.
- *
- * Otherwise we can simply pick the next object from the lockless free list.
- */
-static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
+static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
 {
-	void *object;
 	struct kmem_cache_cpu *c;
 	struct slab *slab;
 	unsigned long tid;
-	struct obj_cgroup *objcg = NULL;
-	bool init = false;
-
-	s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
-	if (!s)
-		return NULL;
-
-	object = kfence_alloc(s, orig_size, gfpflags);
-	if (unlikely(object))
-		goto out;
+	void *object;
 
 redo:
 	/*
@@ -3347,6 +3318,48 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
 		stat(s, ALLOC_FASTPATH);
 	}
 
+	return object;
+}
+
+/*
+ * If the object has been wiped upon free, make sure it's fully initialized by
+ * zeroing out freelist pointer.
+ */
+static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
+						   void *obj)
+{
+	if (unlikely(slab_want_init_on_free(s)) && obj)
+		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
+			0, sizeof(void *));
+}
+
+/*
+ * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
+ * have the fastpath folded into their functions. So no function call
+ * overhead for requests that can be satisfied on the fastpath.
+ *
+ * The fastpath works by first checking if the lockless freelist can be used.
+ * If not then __slab_alloc is called for slow processing.
+ *
+ * Otherwise we can simply pick the next object from the lockless free list.
+ */
+static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
+		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
+{
+	void *object;
+	struct obj_cgroup *objcg = NULL;
+	bool init = false;
+
+	s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
+	if (!s)
+		return NULL;
+
+	object = kfence_alloc(s, orig_size, gfpflags);
+	if (unlikely(object))
+		goto out;
+
+	object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
+
 	maybe_wipe_obj_freeptr(s, object);
 	init = slab_want_init_on_alloc(gfpflags, s);
 
@@ -3799,18 +3812,12 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);
 
-/* Note that interrupts must be enabled when calling this function. */
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
-			  void **p)
+static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
+			size_t size, void **p, struct obj_cgroup *objcg)
 {
 	struct kmem_cache_cpu *c;
 	int i;
-	struct obj_cgroup *objcg = NULL;
 
-	/* memcg and kmem_cache debug support */
-	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
-	if (unlikely(!s))
-		return false;
 	/*
 	 * Drain objects in the per cpu slab, while disabling local
 	 * IRQs, which protects against PREEMPT and interrupts
@@ -3864,18 +3871,38 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 	local_unlock_irq(&s->cpu_slab->lock);
 	slub_put_cpu_ptr(s->cpu_slab);
 
-	/*
-	 * memcg and kmem_cache debug support and memory initialization.
-	 * Done outside of the IRQ disabled fastpath loop.
-	 */
-	slab_post_alloc_hook(s, objcg, flags, size, p,
-				slab_want_init_on_alloc(flags, s));
 	return i;
+
 error:
 	slub_put_cpu_ptr(s->cpu_slab);
 	slab_post_alloc_hook(s, objcg, flags, i, p, false);
 	kmem_cache_free_bulk(s, i, p);
 	return 0;
+
+}
+
+/* Note that interrupts must be enabled when calling this function. */
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+			  void **p)
+{
+	int i;
+	struct obj_cgroup *objcg = NULL;
+
+	/* memcg and kmem_cache debug support */
+	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
+	if (unlikely(!s))
+		return false;
+
+	i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
+
+	/*
+	 * memcg and kmem_cache debug support and memory initialization.
+	 * Done outside of the IRQ disabled fastpath loop.
+	 */
+	if (i != 0)
+		slab_post_alloc_hook(s, objcg, flags, size, p,
+				slab_want_init_on_alloc(flags, s));
+	return i;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
 
-- 
2.38.1


  parent reply	other threads:[~2022-11-21 17:12 UTC|newest]

Thread overview: 102+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-21 17:11 [PATCH 00/12] Introduce CONFIG_SLUB_TINY and deprecate SLOB Vlastimil Babka
2022-11-21 17:11 ` Vlastimil Babka
2022-11-21 17:11 ` Vlastimil Babka
2022-11-21 17:11 ` Vlastimil Babka
2022-11-21 17:11 ` [PATCH 01/12] mm, slab: ignore hardened usercopy parameters when disabled Vlastimil Babka
2022-11-21 21:35   ` Kees Cook
2022-11-23 14:23     ` Vlastimil Babka
2022-11-24 11:16       ` Hyeonggon Yoo
2022-11-24 11:26         ` Vlastimil Babka
2022-11-24 12:33       ` Hyeonggon Yoo
2022-11-21 17:11 ` [PATCH 02/12] mm, slub: add CONFIG_SLUB_TINY Vlastimil Babka
2022-11-24  1:08   ` Roman Gushchin
2022-11-24 11:33   ` Hyeonggon Yoo
2022-11-25  7:55     ` Vlastimil Babka
2022-11-21 17:11 ` [PATCH 03/12] mm, slub: disable SYSFS support with CONFIG_SLUB_TINY Vlastimil Babka
2022-11-24  1:12   ` Roman Gushchin
2022-11-24  9:00     ` Vlastimil Babka
2022-11-21 17:11 ` [PATCH 04/12] mm, slub: retain no free slabs on partial list " Vlastimil Babka
2022-11-24  1:12   ` Roman Gushchin
2022-11-24 11:38   ` Hyeonggon Yoo
2022-11-21 17:11 ` [PATCH 05/12] mm, slub: lower the default slub_max_order " Vlastimil Babka
2022-11-24  1:16   ` Roman Gushchin
2022-11-24 11:40   ` Hyeonggon Yoo
2022-11-21 17:11 ` [PATCH 06/12] mm, slub: don't create kmalloc-rcl caches " Vlastimil Babka
2022-11-23 13:53   ` Vlastimil Babka
2022-11-24 12:06     ` Hyeonggon Yoo
2022-11-24 12:12       ` Vlastimil Babka
2022-11-24 12:55         ` Hyeonggon Yoo
2022-11-24 13:23     ` Hyeonggon Yoo
2022-11-24 14:25       ` Hyeonggon Yoo
2022-11-21 17:11 ` [PATCH 07/12] mm, slab: ignore SLAB_RECLAIM_ACCOUNT " Vlastimil Babka
2022-11-24  1:20   ` Roman Gushchin
2022-11-24  9:09     ` Vlastimil Babka
2022-11-24  9:21       ` Christoph Lameter
2022-11-27 23:11   ` Vlastimil Babka
2022-11-21 17:11 ` [PATCH 08/12] mm, slub: refactor free debug processing Vlastimil Babka
2022-11-27 10:18   ` Hyeonggon Yoo
2022-11-21 17:11 ` Vlastimil Babka [this message]
2022-11-27 10:54   ` [PATCH 09/12] mm, slub: split out allocations from pre/post hooks Hyeonggon Yoo
2022-11-27 23:01     ` Vlastimil Babka
2022-11-28 13:06       ` Hyeonggon Yoo
2022-11-21 17:12 ` [PATCH 10/12] mm, slub: remove percpu slabs with CONFIG_SLUB_TINY Vlastimil Babka
2022-11-27 11:05   ` Hyeonggon Yoo
2022-12-12 10:54     ` Vlastimil Babka
2022-12-12 13:11       ` Dennis Zhou
2022-12-13  3:04         ` Baoquan He
2022-12-13 14:02           ` Hyeonggon Yoo
2022-12-18 10:16   ` Hyeonggon Yoo
2022-11-21 17:12 ` [PATCH 11/12] mm, slub: don't aggressively inline " Vlastimil Babka
2022-11-28 13:19   ` Hyeonggon Yoo
2022-11-21 17:12 ` [PATCH 12/12] mm, slob: rename CONFIG_SLOB to CONFIG_SLOB_DEPRECATED Vlastimil Babka
2022-11-21 17:12   ` Vlastimil Babka
2022-11-21 17:12   ` Vlastimil Babka
2022-11-21 17:12   ` Vlastimil Babka
2022-11-21 18:41   ` Aaro Koskinen
2022-11-21 18:41     ` Aaro Koskinen
2022-11-21 18:41     ` Aaro Koskinen
2022-11-21 18:41     ` Aaro Koskinen
2022-11-21 19:42   ` Vlastimil Babka
2022-11-21 19:42     ` Vlastimil Babka
2022-11-21 19:42     ` Vlastimil Babka
2022-11-21 19:42     ` Vlastimil Babka
2022-11-22  6:47   ` Damien Le Moal
2022-11-22  6:47     ` Damien Le Moal
2022-11-22  6:47     ` Damien Le Moal
2022-11-22  6:47     ` Damien Le Moal
2022-11-22 16:08   ` Arnd Bergmann
2022-11-22 16:08     ` Arnd Bergmann
2022-11-22 16:08     ` Arnd Bergmann
2022-11-22 16:08     ` Arnd Bergmann
2022-11-24  1:21   ` Roman Gushchin
2022-11-24  1:21     ` Roman Gushchin
2022-11-24  1:21     ` Roman Gushchin
2022-11-24  1:21     ` Roman Gushchin
2022-12-02 17:59   ` Palmer Dabbelt
2022-12-02 17:59     ` Palmer Dabbelt
2022-12-02 17:59     ` Palmer Dabbelt
2022-12-02 17:59     ` Palmer Dabbelt
2022-12-05 12:25     ` Damien Le Moal
2022-12-05 12:25       ` Damien Le Moal
2022-12-05 12:25       ` Damien Le Moal
2022-12-05 12:25       ` Damien Le Moal
2022-12-13 13:41   ` Hyeonggon Yoo
2022-12-13 13:41     ` Hyeonggon Yoo
2022-12-13 13:41     ` Hyeonggon Yoo
2022-12-13 13:41     ` Hyeonggon Yoo
2022-11-22 16:33 ` [PATCH 00/12] Introduce CONFIG_SLUB_TINY and deprecate SLOB Arnd Bergmann
2022-11-22 16:33   ` Arnd Bergmann
2022-11-22 16:33   ` Arnd Bergmann
2022-11-22 16:33   ` Arnd Bergmann
2022-11-22 16:59   ` Vlastimil Babka
2022-11-22 16:59     ` Vlastimil Babka
2022-11-22 16:59     ` Vlastimil Babka
2022-11-22 16:59     ` Vlastimil Babka
2022-11-22 17:15     ` Arnd Bergmann
2022-11-22 17:15       ` Arnd Bergmann
2022-11-22 17:15       ` Arnd Bergmann
2022-11-22 17:15       ` Arnd Bergmann
2022-11-24 20:30 ` Mike Rapoport
2022-11-24 20:30   ` Mike Rapoport
2022-11-24 20:30   ` Mike Rapoport
2022-11-24 20:30   ` Mike Rapoport

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221121171202.22080-10-vbabka@suse.cz \
    --to=vbabka@suse.cz \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=patches@lists.linux.dev \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=torvalds@linux-foundation.org \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.