All of lore.kernel.org
 help / color / mirror / Atom feed
From: chengming.zhou@linux.dev
To: cl@linux.com, penberg@kernel.org
Cc: rientjes@google.com, iamjoonsoo.kim@lge.com,
	akpm@linux-foundation.org, vbabka@suse.cz,
	roman.gushchin@linux.dev, 42.hyeyoo@gmail.com,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	chengming.zhou@linux.dev,
	Chengming Zhou <zhouchengming@bytedance.com>
Subject: [RFC PATCH 1/5] slub: Introduce on_partial()
Date: Tue, 17 Oct 2023 15:44:35 +0000	[thread overview]
Message-ID: <20231017154439.3036608-2-chengming.zhou@linux.dev> (raw)
In-Reply-To: <20231017154439.3036608-1-chengming.zhou@linux.dev>

From: Chengming Zhou <zhouchengming@bytedance.com>

We change slab->__unused to slab->flags to use it as SLUB_FLAGS, which
now only include SF_NODE_PARTIAL flag. It indicates whether or not the
slab is on node partial list.

The following patches will change to don't freeze slab when moving it
from node partial list to cpu partial list. So we can't rely on frozen
bit to see if we should manipulate the slab->slab_list.

Instead we will rely on this SF_NODE_PARTIAL flag, which is protected
by node list_lock.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 mm/slab.h |  2 +-
 mm/slub.c | 28 ++++++++++++++++++++++++++++
 2 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/mm/slab.h b/mm/slab.h
index 8cd3294fedf5..11e9c9a0f648 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -89,7 +89,7 @@ struct slab {
 		};
 		struct rcu_head rcu_head;
 	};
-	unsigned int __unused;
+	unsigned int flags;
 
 #else
 #error "Unexpected slab allocator configured"
diff --git a/mm/slub.c b/mm/slub.c
index 63d281dfacdb..e5356ad14951 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1993,6 +1993,12 @@ static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
 }
 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
 
+enum SLUB_FLAGS {
+	SF_INIT_VALUE = 0,
+	SF_EXIT_VALUE = -1,
+	SF_NODE_PARTIAL = 1 << 0,
+};
+
 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 {
 	struct slab *slab;
@@ -2031,6 +2037,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 	slab->objects = oo_objects(oo);
 	slab->inuse = 0;
 	slab->frozen = 0;
+	slab->flags = SF_INIT_VALUE;
 
 	account_slab(slab, oo_order(oo), s, flags);
 
@@ -2077,6 +2084,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
 	int order = folio_order(folio);
 	int pages = 1 << order;
 
+	slab->flags = SF_EXIT_VALUE;
 	__slab_clear_pfmemalloc(slab);
 	folio->mapping = NULL;
 	/* Make the mapping reset visible before clearing the flag */
@@ -2119,9 +2127,28 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab)
 /*
  * Management of partially allocated slabs.
  */
+static void ___add_partial(struct kmem_cache_node *n, struct slab *slab)
+{
+	lockdep_assert_held(&n->list_lock);
+	slab->flags |= SF_NODE_PARTIAL;
+}
+
+static void ___remove_partial(struct kmem_cache_node *n, struct slab *slab)
+{
+	lockdep_assert_held(&n->list_lock);
+	slab->flags &= ~SF_NODE_PARTIAL;
+}
+
+static inline bool on_partial(struct kmem_cache_node *n, struct slab *slab)
+{
+	lockdep_assert_held(&n->list_lock);
+	return slab->flags & SF_NODE_PARTIAL;
+}
+
 static inline void
 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
 {
+	___add_partial(n, slab);
 	n->nr_partial++;
 	if (tail == DEACTIVATE_TO_TAIL)
 		list_add_tail(&slab->slab_list, &n->partial);
@@ -2142,6 +2169,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
 	lockdep_assert_held(&n->list_lock);
 	list_del(&slab->slab_list);
 	n->nr_partial--;
+	___remove_partial(n, slab);
 }
 
 /*
-- 
2.40.1


  reply	other threads:[~2023-10-17 15:45 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-17 15:44 [RFC PATCH 0/5] slub: Delay freezing of CPU partial slabs chengming.zhou
2023-10-17 15:44 ` chengming.zhou [this message]
2023-10-17 15:54   ` [RFC PATCH 1/5] slub: Introduce on_partial() Matthew Wilcox
2023-10-18  7:37     ` Chengming Zhou
2023-10-27  5:26   ` kernel test robot
2023-10-27  9:43     ` Chengming Zhou
2023-10-17 15:44 ` [RFC PATCH 2/5] slub: Don't manipulate slab list when used by cpu chengming.zhou
2023-10-17 15:44 ` [RFC PATCH 3/5] slub: Optimize deactivate_slab() chengming.zhou
2023-10-17 15:44 ` [RFC PATCH 4/5] slub: Don't freeze slabs for cpu partial chengming.zhou
2023-10-17 15:44 ` [RFC PATCH 5/5] slub: Introduce get_cpu_partial() chengming.zhou
2023-10-18  6:34 ` [RFC PATCH 0/5] slub: Delay freezing of CPU partial slabs Hyeonggon Yoo
2023-10-18  7:44   ` Chengming Zhou

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231017154439.3036608-2-chengming.zhou@linux.dev \
    --to=chengming.zhou@linux.dev \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=vbabka@suse.cz \
    --cc=zhouchengming@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.