From: Yu Zhao <yuzhao@google.com>
To: Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
David Rientjes <rientjes@google.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Andrew Morton <akpm@linux-foundation.org>,
"Kirill A . Shutemov" <kirill@shutemov.name>,
Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Yu Zhao <yuzhao@google.com>
Subject: [PATCH v2 3/4] mm: avoid slub allocation while holding list_lock
Date: Wed, 11 Sep 2019 20:31:10 -0600 [thread overview]
Message-ID: <20190912023111.219636-3-yuzhao@google.com> (raw)
In-Reply-To: <20190912023111.219636-1-yuzhao@google.com>
If we are already under list_lock, don't call kmalloc(). Otherwise we
will run into deadlock because kmalloc() also tries to grab the same
lock.
Fixing the problem by using a static bitmap instead.
WARNING: possible recursive locking detected
--------------------------------------------
mount-encrypted/4921 is trying to acquire lock:
(&(&n->list_lock)->rlock){-.-.}, at: ___slab_alloc+0x104/0x437
but task is already holding lock:
(&(&n->list_lock)->rlock){-.-.}, at: __kmem_cache_shutdown+0x81/0x3cb
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&(&n->list_lock)->rlock);
lock(&(&n->list_lock)->rlock);
*** DEADLOCK ***
Signed-off-by: Yu Zhao <yuzhao@google.com>
---
mm/slub.c | 88 +++++++++++++++++++++++++++++--------------------------
1 file changed, 47 insertions(+), 41 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 7b7e1ee264ef..baa60dd73942 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -443,19 +443,38 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
}
#ifdef CONFIG_SLUB_DEBUG
+static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
+static DEFINE_SPINLOCK(object_map_lock);
+
/*
* Determine a map of object in use on a page.
*
* Node listlock must be held to guarantee that the page does
* not vanish from under us.
*/
-static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
+static unsigned long *get_map(struct kmem_cache *s, struct page *page)
{
void *p;
void *addr = page_address(page);
+ VM_BUG_ON(!irqs_disabled());
+
+ spin_lock(&object_map_lock);
+
+ bitmap_zero(object_map, page->objects);
+
for (p = page->freelist; p; p = get_freepointer(s, p))
- set_bit(slab_index(p, s, addr), map);
+ set_bit(slab_index(p, s, addr), object_map);
+
+ return object_map;
+}
+
+static void put_map(unsigned long *map)
+{
+ VM_BUG_ON(map != object_map);
+ lockdep_assert_held(&object_map_lock);
+
+ spin_unlock(&object_map_lock);
}
static inline unsigned int size_from_object(struct kmem_cache *s)
@@ -3685,13 +3704,12 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
void *p;
- unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
- if (!map)
- return;
+ unsigned long *map;
+
slab_err(s, page, text, s->name);
slab_lock(page);
- get_map(s, page, map);
+ map = get_map(s, page);
for_each_object(p, s, addr, page->objects) {
if (!test_bit(slab_index(p, s, addr), map)) {
@@ -3699,8 +3717,9 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
print_tracking(s, p);
}
}
+ put_map(map);
+
slab_unlock(page);
- bitmap_free(map);
#endif
}
@@ -4386,19 +4405,19 @@ static int count_total(struct page *page)
#endif
#ifdef CONFIG_SLUB_DEBUG
-static void validate_slab(struct kmem_cache *s, struct page *page,
- unsigned long *map)
+static void validate_slab(struct kmem_cache *s, struct page *page)
{
void *p;
void *addr = page_address(page);
+ unsigned long *map;
+
+ slab_lock(page);
if (!check_slab(s, page) || !on_freelist(s, page, NULL))
- return;
+ goto unlock;
/* Now we know that a valid freelist exists */
- bitmap_zero(map, page->objects);
-
- get_map(s, page, map);
+ map = get_map(s, page);
for_each_object(p, s, addr, page->objects) {
u8 val = test_bit(slab_index(p, s, addr), map) ?
SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
@@ -4406,18 +4425,13 @@ static void validate_slab(struct kmem_cache *s, struct page *page,
if (!check_object(s, page, p, val))
break;
}
-}
-
-static void validate_slab_slab(struct kmem_cache *s, struct page *page,
- unsigned long *map)
-{
- slab_lock(page);
- validate_slab(s, page, map);
+ put_map(map);
+unlock:
slab_unlock(page);
}
static int validate_slab_node(struct kmem_cache *s,
- struct kmem_cache_node *n, unsigned long *map)
+ struct kmem_cache_node *n)
{
unsigned long count = 0;
struct page *page;
@@ -4426,7 +4440,7 @@ static int validate_slab_node(struct kmem_cache *s,
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list) {
- validate_slab_slab(s, page, map);
+ validate_slab(s, page);
count++;
}
if (count != n->nr_partial)
@@ -4437,7 +4451,7 @@ static int validate_slab_node(struct kmem_cache *s,
goto out;
list_for_each_entry(page, &n->full, slab_list) {
- validate_slab_slab(s, page, map);
+ validate_slab(s, page);
count++;
}
if (count != atomic_long_read(&n->nr_slabs))
@@ -4454,15 +4468,11 @@ static long validate_slab_cache(struct kmem_cache *s)
int node;
unsigned long count = 0;
struct kmem_cache_node *n;
- unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
-
- if (!map)
- return -ENOMEM;
flush_all(s);
for_each_kmem_cache_node(s, node, n)
- count += validate_slab_node(s, n, map);
- bitmap_free(map);
+ count += validate_slab_node(s, n);
+
return count;
}
/*
@@ -4592,18 +4602,17 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
}
static void process_slab(struct loc_track *t, struct kmem_cache *s,
- struct page *page, enum track_item alloc,
- unsigned long *map)
+ struct page *page, enum track_item alloc)
{
void *addr = page_address(page);
void *p;
+ unsigned long *map;
- bitmap_zero(map, page->objects);
- get_map(s, page, map);
-
+ map = get_map(s, page);
for_each_object(p, s, addr, page->objects)
if (!test_bit(slab_index(p, s, addr), map))
add_location(t, s, get_track(s, p, alloc));
+ put_map(map);
}
static int list_locations(struct kmem_cache *s, char *buf,
@@ -4614,11 +4623,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
struct loc_track t = { 0, 0, NULL };
int node;
struct kmem_cache_node *n;
- unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
- if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
- GFP_KERNEL)) {
- bitmap_free(map);
+ if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
+ GFP_KERNEL)) {
return sprintf(buf, "Out of memory\n");
}
/* Push back cpu slabs */
@@ -4633,9 +4640,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list)
- process_slab(&t, s, page, alloc, map);
+ process_slab(&t, s, page, alloc);
list_for_each_entry(page, &n->full, slab_list)
- process_slab(&t, s, page, alloc, map);
+ process_slab(&t, s, page, alloc);
spin_unlock_irqrestore(&n->list_lock, flags);
}
@@ -4684,7 +4691,6 @@ static int list_locations(struct kmem_cache *s, char *buf,
}
free_loc_track(&t);
- bitmap_free(map);
if (!t.count)
len += sprintf(buf, "No data\n");
return len;
--
2.23.0.162.g0b9fbb3734-goog
next prev parent reply other threads:[~2019-09-12 2:31 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-09 6:10 [PATCH] mm: avoid slub allocation while holding list_lock Yu Zhao
2019-09-09 16:00 ` Kirill A. Shutemov
2019-09-09 20:57 ` Tetsuo Handa
2019-09-09 21:39 ` Yu Zhao
2019-09-10 1:41 ` Tetsuo Handa
2019-09-10 2:16 ` Yu Zhao
2019-09-10 9:16 ` Kirill A. Shutemov
2019-09-11 14:13 ` Andrew Morton
2019-09-12 0:29 ` [PATCH 1/3] mm: correct mask size for slub page->objects Yu Zhao
2019-09-12 0:29 ` [PATCH 2/3] mm: avoid slub allocation while holding list_lock Yu Zhao
2019-09-12 0:44 ` Kirill A. Shutemov
2019-09-12 1:31 ` Yu Zhao
2019-09-12 2:31 ` [PATCH v2 1/4] mm: correct mask size for slub page->objects Yu Zhao
2019-09-12 2:31 ` [PATCH v2 2/4] mm: clean up validate_slab() Yu Zhao
2019-09-12 9:46 ` Kirill A. Shutemov
2019-09-12 2:31 ` Yu Zhao [this message]
2019-09-12 10:04 ` [PATCH v2 3/4] mm: avoid slub allocation while holding list_lock Kirill A. Shutemov
2019-09-12 2:31 ` [PATCH v2 4/4] mm: lock slub page when listing objects Yu Zhao
2019-09-12 10:06 ` Kirill A. Shutemov
2019-09-12 21:12 ` Yu Zhao
2019-09-13 14:58 ` Christopher Lameter
2019-09-12 9:40 ` [PATCH v2 1/4] mm: correct mask size for slub page->objects Kirill A. Shutemov
2019-09-12 21:11 ` Yu Zhao
2019-09-12 22:03 ` Kirill A. Shutemov
2019-09-14 0:07 ` [PATCH v3 1/2] mm: clean up validate_slab() Yu Zhao
2019-09-14 0:07 ` [PATCH v3 2/2] mm: avoid slub allocation while holding list_lock Yu Zhao
2019-09-16 8:39 ` [PATCH v3 1/2] mm: clean up validate_slab() Kirill A. Shutemov
2019-11-08 19:39 ` [PATCH v4 " Yu Zhao
2019-11-08 19:39 ` [PATCH v4 2/2] mm: avoid slub allocation while holding list_lock Yu Zhao
2019-11-09 20:52 ` Christopher Lameter
2019-11-09 23:01 ` Yu Zhao
2019-11-09 23:16 ` Christopher Lameter
2019-11-10 18:47 ` Yu Zhao
2019-11-11 15:47 ` Christopher Lameter
2019-11-11 15:55 ` [FIX] slub: Remove kmalloc under list_lock from list_slab_objects() V2 Christopher Lameter
2019-11-30 23:09 ` Andrew Morton
2019-12-01 1:17 ` Tetsuo Handa
2019-12-02 15:12 ` Christopher Lameter
2019-12-07 22:03 ` Yu Zhao
2020-01-10 14:11 ` Vlastimil Babka
2020-01-12 11:03 ` Tetsuo Handa
2020-01-13 1:34 ` Christopher Lameter
2019-11-11 18:15 ` [PATCH v4 2/2] mm: avoid slub allocation while holding list_lock Shakeel Butt
2019-09-12 0:29 ` [PATCH 3/3] mm: lock slub page when listing objects Yu Zhao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190912023111.219636-3-yuzhao@google.com \
--to=yuzhao@google.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=kirill@shutemov.name \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=penberg@kernel.org \
--cc=penguin-kernel@i-love.sakura.ne.jp \
--cc=rientjes@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).