All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vlastimil Babka <vbabka@suse.cz>
To: Matthew Wilcox <willy@infradead.org>,
	Christoph Lameter <cl@linux.com>,
	David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Pekka Enberg <penberg@kernel.org>
Cc: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Roman Gushchin <guro@fb.com>, Hyeonggon Yoo <42.hyeyoo@gmail.com>,
	patches@lists.linux.dev, Vlastimil Babka <vbabka@suse.cz>,
	Marco Elver <elver@google.com>,
	Alexander Potapenko <glider@google.com>,
	Dmitry Vyukov <dvyukov@google.com>,
	kasan-dev@googlegroups.com
Subject: [PATCH v4 27/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations
Date: Tue,  4 Jan 2022 01:10:41 +0100	[thread overview]
Message-ID: <20220104001046.12263-28-vbabka@suse.cz> (raw)
In-Reply-To: <20220104001046.12263-1-vbabka@suse.cz>

With a struct slab definition separate from struct page, we can go
further and define only fields that the chosen sl*b implementation uses.
This means everything between __page_flags and __page_refcount
placeholders now depends on the chosen CONFIG_SL*B. Some fields exist in
all implementations (slab_list) but can be part of a union in some, so
it's simpler to repeat them than complicate the definition with ifdefs
even more.

The patch doesn't change physical offsets of the fields, although it
could be done later - for example it's now clear that tighter packing in
SLOB could be possible.

This should also prevent accidental use of fields that don't exist in
given implementation. Before this patch virt_to_cache() and
cache_from_obj() were visible for SLOB (albeit not used), although they
rely on the slab_cache field that isn't set by SLOB. With this patch
it's now a compile error, so these functions are now hidden behind
an #ifndef CONFIG_SLOB.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Marco Elver <elver@google.com> # kfence
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: <kasan-dev@googlegroups.com>
---
 mm/kfence/core.c |  9 +++++----
 mm/slab.h        | 48 ++++++++++++++++++++++++++++++++++++++----------
 2 files changed, 43 insertions(+), 14 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 4eb60cf5ff8b..267dfde43b91 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	/* Set required slab fields. */
 	slab = virt_to_slab((void *)meta->addr);
 	slab->slab_cache = cache;
-	if (IS_ENABLED(CONFIG_SLUB))
-		slab->objects = 1;
-	if (IS_ENABLED(CONFIG_SLAB))
-		slab->s_mem = addr;
+#if defined(CONFIG_SLUB)
+	slab->objects = 1;
+#elif defined(CONFIG_SLAB)
+	slab->s_mem = addr;
+#endif
 
 	/* Memory initialization. */
 	for_each_canary(meta, set_canary_byte);
diff --git a/mm/slab.h b/mm/slab.h
index 36e0022d8267..b8da249f44f9 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -8,9 +8,24 @@
 /* Reuses the bits in struct page */
 struct slab {
 	unsigned long __page_flags;
+
+#if defined(CONFIG_SLAB)
+
 	union {
 		struct list_head slab_list;
-		struct {	/* Partial pages */
+		struct rcu_head rcu_head;
+	};
+	struct kmem_cache *slab_cache;
+	void *freelist;	/* array of free object indexes */
+	void *s_mem;	/* first object */
+	unsigned int active;
+
+#elif defined(CONFIG_SLUB)
+
+	union {
+		struct list_head slab_list;
+		struct rcu_head rcu_head;
+		struct {
 			struct slab *next;
 #ifdef CONFIG_64BIT
 			int slabs;	/* Nr of slabs left */
@@ -18,25 +33,32 @@ struct slab {
 			short int slabs;
 #endif
 		};
-		struct rcu_head rcu_head;
 	};
-	struct kmem_cache *slab_cache; /* not slob */
+	struct kmem_cache *slab_cache;
 	/* Double-word boundary */
 	void *freelist;		/* first free object */
 	union {
-		void *s_mem;	/* slab: first object */
-		unsigned long counters;		/* SLUB */
-		struct {			/* SLUB */
+		unsigned long counters;
+		struct {
 			unsigned inuse:16;
 			unsigned objects:15;
 			unsigned frozen:1;
 		};
 	};
+	unsigned int __unused;
+
+#elif defined(CONFIG_SLOB)
+
+	struct list_head slab_list;
+	void *__unused_1;
+	void *freelist;		/* first free block */
+	void *__unused_2;
+	int units;
+
+#else
+#error "Unexpected slab allocator configured"
+#endif
 
-	union {
-		unsigned int active;		/* SLAB */
-		int units;			/* SLOB */
-	};
 	atomic_t __page_refcount;
 #ifdef CONFIG_MEMCG
 	unsigned long memcg_data;
@@ -48,10 +70,14 @@ struct slab {
 SLAB_MATCH(flags, __page_flags);
 SLAB_MATCH(compound_head, slab_list);	/* Ensure bit 0 is clear */
 SLAB_MATCH(slab_list, slab_list);
+#ifndef CONFIG_SLOB
 SLAB_MATCH(rcu_head, rcu_head);
 SLAB_MATCH(slab_cache, slab_cache);
+#endif
+#ifdef CONFIG_SLAB
 SLAB_MATCH(s_mem, s_mem);
 SLAB_MATCH(active, active);
+#endif
 SLAB_MATCH(_refcount, __page_refcount);
 #ifdef CONFIG_MEMCG
 SLAB_MATCH(memcg_data, memcg_data);
@@ -602,6 +628,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s,
 }
 #endif /* CONFIG_MEMCG_KMEM */
 
+#ifndef CONFIG_SLOB
 static inline struct kmem_cache *virt_to_cache(const void *obj)
 {
 	struct slab *slab;
@@ -648,6 +675,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 		print_tracking(cachep, x);
 	return cachep;
 }
+#endif /* CONFIG_SLOB */
 
 static inline size_t slab_ksize(const struct kmem_cache *s)
 {
-- 
2.34.1


  parent reply	other threads:[~2022-01-04  0:11 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-04  0:10 [PATCH v4 00/32] Separate struct slab from struct page Vlastimil Babka
2022-01-04  0:10 ` Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 01/32] mm: add virt_to_folio() and folio_address() Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 02/32] mm/slab: Dissolve slab_map_pages() in its caller Vlastimil Babka
2022-01-06  6:40   ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 03/32] mm/slub: Make object_err() static Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 04/32] mm: Split slab into its own type Vlastimil Babka
2022-01-06 11:54   ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 05/32] mm: Convert [un]account_slab_page() to struct slab Vlastimil Babka
2022-01-06 13:04   ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 06/32] mm: Convert virt_to_cache() to use " Vlastimil Babka
2022-01-06  6:44   ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 07/32] mm: Convert __ksize() to " Vlastimil Babka
2022-01-06 13:42   ` Hyeonggon Yoo
2022-01-06 17:26     ` Vlastimil Babka
2022-01-08  6:21       ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 08/32] mm: Use struct slab in kmem_obj_info() Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 09/32] mm: Convert check_heap_object() to use struct slab Vlastimil Babka
2022-01-06 13:56   ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 10/32] mm/slub: Convert detached_freelist to use a " Vlastimil Babka
2022-01-05  0:58   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 11/32] mm/slub: Convert kfree() " Vlastimil Babka
2022-01-05  1:00   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 12/32] mm/slub: Convert __slab_lock() and __slab_unlock() to " Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 13/32] mm/slub: Convert print_page_info() to print_slab_info() Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 14/32] mm/slub: Convert alloc_slab_page() to return a struct slab Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 15/32] mm/slub: Convert __free_slab() to use " Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 16/32] mm/slub: Convert pfmemalloc_match() to take a " Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 17/32] mm/slub: Convert most struct page to struct slab by spatch Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 18/32] mm/slub: Finish struct page to struct slab conversion Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 19/32] mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 20/32] mm/slab: Convert most struct page to struct slab by spatch Vlastimil Babka
2022-01-05  1:52   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 21/32] mm/slab: Finish struct page to struct slab conversion Vlastimil Babka
2022-01-05  2:05   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 22/32] mm: Convert struct page to struct slab in functions used by other subsystems Vlastimil Babka
2022-01-04  0:10   ` Vlastimil Babka
2022-01-05  2:12   ` Roman Gushchin
2022-01-05  2:12     ` Roman Gushchin
2022-01-05 16:39     ` Vlastimil Babka
2022-01-05 16:39       ` Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 23/32] mm/memcg: Convert slab objcgs from struct page to struct slab Vlastimil Babka
2022-01-04  0:10   ` Vlastimil Babka
2022-01-05  2:41   ` Roman Gushchin
2022-01-05  2:41     ` Roman Gushchin
2022-01-05 17:08     ` Vlastimil Babka
2022-01-05 17:08       ` Vlastimil Babka
2022-01-06  3:36       ` Roman Gushchin
2022-01-06  3:36         ` Roman Gushchin
2022-01-05  2:55   ` Roman Gushchin
2022-01-05  2:55     ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 24/32] mm/slob: Convert SLOB to use struct slab and struct folio Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 25/32] mm/kasan: Convert to struct folio and struct slab Vlastimil Babka
2022-01-06  4:06   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 26/32] mm/kfence: Convert kfence_guarded_alloc() to " Vlastimil Babka
2022-01-04  0:10 ` Vlastimil Babka [this message]
2022-01-06  4:12   ` [PATCH v4 27/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 28/32] mm/slub: Simplify struct slab slabs field definition Vlastimil Babka
2022-01-06  4:13   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 29/32] mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled Vlastimil Babka
2022-01-06  4:16   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 30/32] zsmalloc: Stop using slab fields in struct page Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 31/32] bootmem: Use page->index instead of page->freelist Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 32/32] mm/slob: Remove unnecessary page_mapcount_reset() function call Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220104001046.12263-28-vbabka@suse.cz \
    --to=vbabka@suse.cz \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=glider@google.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-mm@kvack.org \
    --cc=patches@lists.linux.dev \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.