From: Vlastimil Babka <vbabka@suse.cz>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Christoph Lameter <cl@linux.com>,
David Rientjes <rientjes@google.com>,
Pekka Enberg <penberg@kernel.org>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
Thomas Gleixner <tglx@linutronix.de>,
Mel Gorman <mgorman@techsingularity.net>,
Jesper Dangaard Brouer <brouer@redhat.com>,
Peter Zijlstra <peterz@infradead.org>,
Jann Horn <jannh@google.com>, Vlastimil Babka <vbabka@suse.cz>
Subject: [RFC v2 31/34] mm, slub: optionally save/restore irqs in slab_[un]lock()/
Date: Wed, 9 Jun 2021 13:39:00 +0200 [thread overview]
Message-ID: <20210609113903.1421-32-vbabka@suse.cz> (raw)
In-Reply-To: <20210609113903.1421-1-vbabka@suse.cz>
For PREEMPT_RT we will need to disable irqs for this bit spinlock. As a
preparation, add a flags parameter, and an internal version that takes
additional bool parameter to control irq saving/restoring (the flags
parameter is compile-time unused if the bool is a constant false).
Convert ___cmpxchg_double_slab(), which also comes with the same bool
parameter, to use the internal version.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
mm/slub.c | 49 +++++++++++++++++++++++++++++++------------------
1 file changed, 31 insertions(+), 18 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index cfd5a7660375..6721169f816d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -353,18 +353,35 @@ static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
/*
* Per slab locking using the pagelock
*/
-static __always_inline void slab_lock(struct page *page)
+static __always_inline void
+__slab_lock(struct page *page, unsigned long *flags, bool disable_irqs)
{
VM_BUG_ON_PAGE(PageTail(page), page);
+ if (disable_irqs)
+ local_irq_save(*flags);
bit_spin_lock(PG_locked, &page->flags);
}
-static __always_inline void slab_unlock(struct page *page)
+static __always_inline void
+__slab_unlock(struct page *page, unsigned long *flags, bool disable_irqs)
{
VM_BUG_ON_PAGE(PageTail(page), page);
+ if (disable_irqs)
+ local_irq_restore(*flags);
__bit_spin_unlock(PG_locked, &page->flags);
}
+static __always_inline void
+slab_lock(struct page *page, unsigned long *flags)
+{
+ __slab_lock(page, flags, false);
+}
+
+static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
+{
+ __slab_unlock(page, flags, false);
+}
+
static inline bool ___cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
void *freelist_new, unsigned long counters_new,
@@ -384,21 +401,15 @@ static inline bool ___cmpxchg_double_slab(struct kmem_cache *s, struct page *pag
{
unsigned long flags;
- if (disable_irqs)
- local_irq_save(flags);
- slab_lock(page);
+ __slab_lock(page, &flags, disable_irqs);
if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new;
page->counters = counters_new;
- slab_unlock(page);
- if (disable_irqs)
- local_irq_restore(flags);
+ __slab_unlock(page, &flags, disable_irqs);
return true;
}
- slab_unlock(page);
- if (disable_irqs)
- local_irq_restore(flags);
+ __slab_unlock(page, &flags, disable_irqs);
}
cpu_relax();
@@ -1214,11 +1225,11 @@ static noinline int free_debug_processing(
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
void *object = head;
int cnt = 0;
- unsigned long flags;
+ unsigned long flags, flags2;
int ret = 0;
spin_lock_irqsave(&n->list_lock, flags);
- slab_lock(page);
+ slab_lock(page, &flags2);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
if (!check_slab(s, page))
@@ -1251,7 +1262,7 @@ static noinline int free_debug_processing(
slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
bulk_cnt, cnt);
- slab_unlock(page);
+ slab_unlock(page, &flags2);
spin_unlock_irqrestore(&n->list_lock, flags);
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
@@ -4007,9 +4018,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
void *addr = page_address(page);
unsigned long *map;
void *p;
+ unsigned long flags;
slab_err(s, page, text, s->name);
- slab_lock(page);
+ slab_lock(page, &flags);
map = get_map(s, page);
for_each_object(p, s, addr, page->objects) {
@@ -4020,7 +4032,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
}
}
put_map(map);
- slab_unlock(page);
+ slab_unlock(page, &flags);
#endif
}
@@ -4736,8 +4748,9 @@ static void validate_slab(struct kmem_cache *s, struct page *page,
{
void *p;
void *addr = page_address(page);
+ unsigned long flags;
- slab_lock(page);
+ slab_lock(page, &flags);
if (!check_slab(s, page) || !on_freelist(s, page, NULL))
goto unlock;
@@ -4752,7 +4765,7 @@ static void validate_slab(struct kmem_cache *s, struct page *page,
break;
}
unlock:
- slab_unlock(page);
+ slab_unlock(page, &flags);
}
static int validate_slab_node(struct kmem_cache *s,
--
2.31.1
next prev parent reply other threads:[~2021-06-09 11:41 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-09 11:38 [RFC v2 00/34] SLUB: reduce irq disabled scope and make it RT compatible Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 01/34] mm, slub: don't call flush_all() from list_locations() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 02/34] mm, slub: allocate private object map for sysfs listings Vlastimil Babka
2021-06-09 13:29 ` Christoph Lameter
2021-06-09 11:38 ` [RFC v2 03/34] mm, slub: allocate private object map for validate_slab_cache() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 04/34] mm, slub: don't disable irq for debug_check_no_locks_freed() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 05/34] mm, slub: remove redundant unfreeze_partials() from put_cpu_partial() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 06/34] mm, slub: unify cmpxchg_double_slab() and __cmpxchg_double_slab() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 07/34] mm, slub: extract get_partial() from new_slab_objects() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 08/34] mm, slub: dissolve new_slab_objects() into ___slab_alloc() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 09/34] mm, slub: return slab page from get_partial() and set c->page afterwards Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 10/34] mm, slub: restructure new page checks in ___slab_alloc() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 11/34] mm, slub: simplify kmem_cache_cpu and tid setup Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 12/34] mm, slub: move disabling/enabling irqs to ___slab_alloc() Vlastimil Babka
2021-07-06 4:38 ` Mike Galbraith
2021-06-09 11:38 ` [RFC v2 13/34] mm, slub: do initial checks in ___slab_alloc() with irqs enabled Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 14/34] mm, slub: move disabling irqs closer to get_partial() in ___slab_alloc() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 15/34] mm, slub: restore irqs around calling new_slab() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 16/34] mm, slub: validate slab from partial list or page allocator before making it cpu slab Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 17/34] mm, slub: check new pages with restored irqs Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 18/34] mm, slub: stop disabling irqs around get_partial() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 19/34] mm, slub: move reset of c->page and freelist out of deactivate_slab() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 20/34] mm, slub: make locking in deactivate_slab() irq-safe Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 21/34] mm, slub: call deactivate_slab() without disabling irqs Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 22/34] mm, slub: move irq control into unfreeze_partials() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 23/34] mm, slub: discard slabs in unfreeze_partials() without irqs disabled Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 24/34] mm, slub: detach whole partial list at once in unfreeze_partials() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 25/34] mm, slub: detach percpu partial list in unfreeze_partials() using this_cpu_cmpxchg() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 26/34] mm, slub: only disable irq with spin_lock in __unfreeze_partials() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 27/34] mm, slub: don't disable irqs in slub_cpu_dead() Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 28/34] mm, slab: make flush_slab() possible to call with irqs enabled Vlastimil Babka
2021-06-09 11:38 ` [RFC v2 29/34] mm: slub: Move flush_cpu_slab() invocations __free_slab() invocations out of IRQ context Vlastimil Babka
2021-06-09 22:29 ` Cyrill Gorcunov
2021-06-10 8:32 ` Vlastimil Babka
2021-06-10 8:36 ` Cyrill Gorcunov
2021-06-09 11:38 ` [RFC v2 30/34] mm: slub: Make object_map_lock a raw_spinlock_t Vlastimil Babka
2021-06-09 11:39 ` Vlastimil Babka [this message]
2021-07-02 12:17 ` [RFC v2 31/34] mm, slub: optionally save/restore irqs in slab_[un]lock()/ Sebastian Andrzej Siewior
2021-06-09 11:39 ` [RFC v2 32/34] mm, slub: make slab_lock() disable irqs with PREEMPT_RT Vlastimil Babka
2021-06-09 11:39 ` [RFC v2 33/34] mm, slub: use migrate_disable() on PREEMPT_RT Vlastimil Babka
2021-06-14 11:07 ` Vlastimil Babka
2021-06-14 11:16 ` Sebastian Andrzej Siewior
2021-06-14 11:33 ` Vlastimil Babka
2021-06-14 12:54 ` Vlastimil Babka
2021-06-14 14:01 ` Sebastian Andrzej Siewior
2021-06-09 11:39 ` [RFC v2 34/34] mm, slub: convert kmem_cpu_slab protection to local_lock Vlastimil Babka
2021-06-14 9:49 ` [RFC v2 00/34] SLUB: reduce irq disabled scope and make it RT compatible Mel Gorman
2021-06-14 11:31 ` Mel Gorman
2021-06-14 11:10 ` Vlastimil Babka
2021-07-02 18:29 ` Sebastian Andrzej Siewior
2021-07-02 20:25 ` Vlastimil Babka
2021-07-29 13:49 ` Sebastian Andrzej Siewior
2021-07-29 14:17 ` Vlastimil Babka
2021-07-29 14:37 ` Sebastian Andrzej Siewior
2021-07-03 7:24 ` Mike Galbraith
2021-07-03 15:47 ` Mike Galbraith
2021-07-04 5:37 ` Mike Galbraith
2021-07-18 7:41 ` Vlastimil Babka
2021-07-18 8:29 ` Mike Galbraith
2021-07-18 12:09 ` Mike Galbraith
2021-07-05 16:00 ` Mike Galbraith
2021-07-06 17:56 ` Mike Galbraith
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210609113903.1421-32-vbabka@suse.cz \
--to=vbabka@suse.cz \
--cc=bigeasy@linutronix.de \
--cc=brouer@redhat.com \
--cc=cl@linux.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=jannh@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=penberg@kernel.org \
--cc=peterz@infradead.org \
--cc=rientjes@google.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).