From: Vlastimil Babka <vbabka@suse.cz>
To: Matthew Wilcox <willy@infradead.org>,
Christoph Lameter <cl@linux.com>,
David Rientjes <rientjes@google.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Pekka Enberg <penberg@kernel.org>
Cc: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>,
Johannes Weiner <hannes@cmpxchg.org>,
Roman Gushchin <guro@fb.com>, Hyeonggon Yoo <42.hyeyoo@gmail.com>,
patches@lists.linux.dev, Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH v4 24/32] mm/slob: Convert SLOB to use struct slab and struct folio
Date: Tue, 4 Jan 2022 01:10:38 +0100 [thread overview]
Message-ID: <20220104001046.12263-25-vbabka@suse.cz> (raw)
In-Reply-To: <20220104001046.12263-1-vbabka@suse.cz>
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Use struct slab throughout the slob allocator. Where non-slab page can
appear use struct folio instead of struct page.
[ vbabka@suse.cz: don't introduce wrappers for PageSlobFree in mm/slab.h
just for the single callers being wrappers in mm/slob.c ]
[ Hyeonggon Yoo <42.hyeyoo@gmail.com>: fix NULL pointer deference ]
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
mm/slob.c | 51 +++++++++++++++++++++++++++------------------------
1 file changed, 27 insertions(+), 24 deletions(-)
diff --git a/mm/slob.c b/mm/slob.c
index d2d15e7f191c..3c6cadbbc238 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -30,7 +30,7 @@
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* alloc_pages() directly, allocating compound pages so the page order
* does not have to be separately tracked.
- * These objects are detected in kfree() because PageSlab()
+ * These objects are detected in kfree() because folio_test_slab()
* is false for them.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
@@ -105,21 +105,21 @@ static LIST_HEAD(free_slob_large);
/*
* slob_page_free: true for pages on free_slob_pages list.
*/
-static inline int slob_page_free(struct page *sp)
+static inline int slob_page_free(struct slab *slab)
{
- return PageSlobFree(sp);
+ return PageSlobFree(slab_page(slab));
}
-static void set_slob_page_free(struct page *sp, struct list_head *list)
+static void set_slob_page_free(struct slab *slab, struct list_head *list)
{
- list_add(&sp->slab_list, list);
- __SetPageSlobFree(sp);
+ list_add(&slab->slab_list, list);
+ __SetPageSlobFree(slab_page(slab));
}
-static inline void clear_slob_page_free(struct page *sp)
+static inline void clear_slob_page_free(struct slab *slab)
{
- list_del(&sp->slab_list);
- __ClearPageSlobFree(sp);
+ list_del(&slab->slab_list);
+ __ClearPageSlobFree(slab_page(slab));
}
#define SLOB_UNIT sizeof(slob_t)
@@ -234,7 +234,7 @@ static void slob_free_pages(void *b, int order)
* freelist, in this case @page_removed_from_list will be set to
* true (set to false otherwise).
*/
-static void *slob_page_alloc(struct page *sp, size_t size, int align,
+static void *slob_page_alloc(struct slab *sp, size_t size, int align,
int align_offset, bool *page_removed_from_list)
{
slob_t *prev, *cur, *aligned = NULL;
@@ -301,7 +301,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
int align_offset)
{
- struct page *sp;
+ struct folio *folio;
+ struct slab *sp;
struct list_head *slob_list;
slob_t *b = NULL;
unsigned long flags;
@@ -323,7 +324,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
* If there's a node specification, search for a partial
* page with a matching node id in the freelist.
*/
- if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
+ if (node != NUMA_NO_NODE && slab_nid(sp) != node)
continue;
#endif
/* Enough room on this page? */
@@ -358,8 +359,9 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
return NULL;
- sp = virt_to_page(b);
- __SetPageSlab(sp);
+ folio = virt_to_folio(b);
+ __folio_set_slab(folio);
+ sp = folio_slab(folio);
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
@@ -381,7 +383,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
*/
static void slob_free(void *block, int size)
{
- struct page *sp;
+ struct slab *sp;
slob_t *prev, *next, *b = (slob_t *)block;
slobidx_t units;
unsigned long flags;
@@ -391,7 +393,7 @@ static void slob_free(void *block, int size)
return;
BUG_ON(!size);
- sp = virt_to_page(block);
+ sp = virt_to_slab(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
@@ -401,8 +403,8 @@ static void slob_free(void *block, int size)
if (slob_page_free(sp))
clear_slob_page_free(sp);
spin_unlock_irqrestore(&slob_lock, flags);
- __ClearPageSlab(sp);
- page_mapcount_reset(sp);
+ __folio_clear_slab(slab_folio(sp));
+ page_mapcount_reset(slab_page(sp));
slob_free_pages(b, 0);
return;
}
@@ -544,7 +546,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
void kfree(const void *block)
{
- struct page *sp;
+ struct folio *sp;
trace_kfree(_RET_IP_, block);
@@ -552,16 +554,17 @@ void kfree(const void *block)
return;
kmemleak_free(block);
- sp = virt_to_page(block);
- if (PageSlab(sp)) {
+ sp = virt_to_folio(block);
+ if (folio_test_slab(sp)) {
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
} else {
- unsigned int order = compound_order(sp);
- mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
+ unsigned int order = folio_order(sp);
+
+ mod_node_page_state(folio_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
- __free_pages(sp, order);
+ __free_pages(folio_page(sp, 0), order);
}
}
--
2.34.1
next prev parent reply other threads:[~2022-01-04 0:11 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-04 0:10 [PATCH v4 00/32] Separate struct slab from struct page Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 01/32] mm: add virt_to_folio() and folio_address() Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 02/32] mm/slab: Dissolve slab_map_pages() in its caller Vlastimil Babka
2022-01-06 6:40 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 03/32] mm/slub: Make object_err() static Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 04/32] mm: Split slab into its own type Vlastimil Babka
2022-01-06 11:54 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 05/32] mm: Convert [un]account_slab_page() to struct slab Vlastimil Babka
2022-01-06 13:04 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 06/32] mm: Convert virt_to_cache() to use " Vlastimil Babka
2022-01-06 6:44 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 07/32] mm: Convert __ksize() to " Vlastimil Babka
2022-01-06 13:42 ` Hyeonggon Yoo
2022-01-06 17:26 ` Vlastimil Babka
2022-01-08 6:21 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 08/32] mm: Use struct slab in kmem_obj_info() Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 09/32] mm: Convert check_heap_object() to use struct slab Vlastimil Babka
2022-01-06 13:56 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 10/32] mm/slub: Convert detached_freelist to use a " Vlastimil Babka
2022-01-05 0:58 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 11/32] mm/slub: Convert kfree() " Vlastimil Babka
2022-01-05 1:00 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 12/32] mm/slub: Convert __slab_lock() and __slab_unlock() to " Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 13/32] mm/slub: Convert print_page_info() to print_slab_info() Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 14/32] mm/slub: Convert alloc_slab_page() to return a struct slab Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 15/32] mm/slub: Convert __free_slab() to use " Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 16/32] mm/slub: Convert pfmemalloc_match() to take a " Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 17/32] mm/slub: Convert most struct page to struct slab by spatch Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 18/32] mm/slub: Finish struct page to struct slab conversion Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 19/32] mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 20/32] mm/slab: Convert most struct page to struct slab by spatch Vlastimil Babka
2022-01-05 1:52 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 21/32] mm/slab: Finish struct page to struct slab conversion Vlastimil Babka
2022-01-05 2:05 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 22/32] mm: Convert struct page to struct slab in functions used by other subsystems Vlastimil Babka
2022-01-05 2:12 ` Roman Gushchin
2022-01-05 16:39 ` Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 23/32] mm/memcg: Convert slab objcgs from struct page to struct slab Vlastimil Babka
2022-01-05 2:41 ` Roman Gushchin
2022-01-05 17:08 ` Vlastimil Babka
2022-01-06 3:36 ` Roman Gushchin
2022-01-05 2:55 ` Roman Gushchin
2022-01-04 0:10 ` Vlastimil Babka [this message]
2022-01-04 0:10 ` [PATCH v4 25/32] mm/kasan: Convert to struct folio and " Vlastimil Babka
2022-01-06 4:06 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 26/32] mm/kfence: Convert kfence_guarded_alloc() to " Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 27/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations Vlastimil Babka
2022-01-06 4:12 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 28/32] mm/slub: Simplify struct slab slabs field definition Vlastimil Babka
2022-01-06 4:13 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 29/32] mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled Vlastimil Babka
2022-01-06 4:16 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 30/32] zsmalloc: Stop using slab fields in struct page Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 31/32] bootmem: Use page->index instead of page->freelist Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 32/32] mm/slob: Remove unnecessary page_mapcount_reset() function call Vlastimil Babka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220104001046.12263-25-vbabka@suse.cz \
--to=vbabka@suse.cz \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=guro@fb.com \
--cc=hannes@cmpxchg.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=linux-mm@kvack.org \
--cc=patches@lists.linux.dev \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).