RCU Archive on lore.kernel.org
 help / color / Atom feed
From: paulmck@kernel.org
To: rcu@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, kernel-team@fb.com,
	mingo@kernel.org, jiangshanlai@gmail.com,
	akpm@linux-foundation.org, mathieu.desnoyers@efficios.com,
	josh@joshtriplett.org, tglx@linutronix.de, peterz@infradead.org,
	rostedt@goodmis.org, dhowells@redhat.com, edumazet@google.com,
	fweisbec@gmail.com, oleg@redhat.com, joel@joelfernandes.org,
	"Paul E. McKenney" <paulmck@kernel.org>,
	Christoph Lameter <cl@linux.com>,
	Pekka Enberg <penberg@kernel.org>,
	David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	linux-mm@kvack.org
Subject: [PATCH sl-b 4/6] mm: Create kmem_last_alloc_stack() to provide stack trace in slub
Date: Fri,  4 Dec 2020 16:40:55 -0800
Message-ID: <20201205004057.32199-4-paulmck@kernel.org> (raw)
In-Reply-To: <20201205004022.GA31166@paulmck-ThinkPad-P72>

From: "Paul E. McKenney" <paulmck@kernel.org>

In some cases, the allocator return address is in a common function,
so that more information is desired.  For example, a percpu_ref
reference-count underflow only has access to a data structure that is
allocated in percpu_ref_init().  In this case, the return address from
the allocator provides no additional information.

This commit therefore creates a kmem_cache_last_alloc() function that
can be passed stackp and nstackp parameters, allowing CONFIG_STACKTRACE=y
slub stack traces to be provided to the caller.

Please note that stack traces cannot be provided unless they are
collected.  Collecting stack traces requires that the kernel: (1) Use
the slub allocator, (2) Be built with CONFIG_STACKTRACE=y (which is the
case when ftrace is configured), and (3) Have slub debugging enabled
one way or another, for example, by booting with the "slub_debug=U"
kernel boot parameter.

Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: <linux-mm@kvack.org>
Reported-by: Andrii Nakryiko <andrii@kernel.org>
[ paulmck: Move slab definition per Stephen Rothwell and kbuild test robot. ]
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/slab.h |  3 ++-
 mm/slab.c            | 40 +++++++++++++++++++++-------------------
 mm/slab_common.c     | 39 ++++++++++++++++++++++++++++++++-------
 mm/slob.c            |  4 +++-
 mm/slub.c            | 14 +++++++++++++-
 5 files changed, 71 insertions(+), 29 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 031e630..bdedefd 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -195,8 +195,9 @@ void kfree(const void *);
 void kfree_sensitive(const void *);
 size_t __ksize(const void *);
 size_t ksize(const void *);
-void *kmem_cache_last_alloc(struct kmem_cache *s, void *object);
+void *kmem_cache_last_alloc(struct kmem_cache *s, void *object, void **stackp, int nstackp);
 void *kmem_last_alloc(void *object);
+void *kmem_last_alloc_stack(void *object, void **stackp, int nstackp);
 const char *kmem_last_alloc_errstring(void *lastalloc);
 
 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
diff --git a/mm/slab.c b/mm/slab.c
index 1f3b263..ae1a74c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3602,25 +3602,6 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
 #endif
 
-void *kmem_cache_last_alloc(struct kmem_cache *cachep, void *object)
-{
-#ifdef DEBUG
-	unsigned int objnr;
-	void *objp;
-	struct page *page;
-
-	if (!(cachep->flags & SLAB_STORE_USER))
-		return ERR_PTR(-KMEM_LA_NO_DEBUG);
-	objp = object - obj_offset(cachep);
-	page = virt_to_head_page(objp);
-	objnr = obj_to_index(cachep, page, objp);
-	objp = index_to_obj(cachep, page, objnr);
-	return *dbg_userword(cachep, objp);
-#else
-	return NULL;
-#endif
-}
-
 static __always_inline void *
 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
 {
@@ -3652,6 +3633,27 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
 EXPORT_SYMBOL(__kmalloc_node_track_caller);
 #endif /* CONFIG_NUMA */
 
+void *kmem_cache_last_alloc(struct kmem_cache *cachep, void *object, void **stackp, int nstackp)
+{
+#ifdef DEBUG
+	unsigned int objnr;
+	void *objp;
+	struct page *page;
+
+	if (!(cachep->flags & SLAB_STORE_USER))
+		return ERR_PTR(-KMEM_LA_NO_DEBUG);
+	objp = object - obj_offset(cachep);
+	page = virt_to_head_page(objp);
+	objnr = obj_to_index(cachep, page, objp);
+	objp = index_to_obj(cachep, page, objnr);
+	if (stackp && nstackp)
+		stackp[0] = NULL;
+	return *dbg_userword(cachep, objp);
+#else
+	return NULL;
+#endif
+}
+
 /**
  * __do_kmalloc - allocate memory
  * @size: how many bytes of memory are required.
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8430a14..b70f357 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -560,14 +560,22 @@ const char *kmem_last_alloc_errstring(void *lastalloc)
 }
 EXPORT_SYMBOL_GPL(kmem_last_alloc_errstring);
 
-/*
+/**
+ * kmem_last_alloc_stack - Get return address and stack for last allocation
+ * @object: object for which to find last-allocation return address.
+ * @stackp: %NULL or pointer to location to place return-address stack.
+ * @nstackp: maximum number of return addresses that may be stored.
+ *
  * If the pointer references a slab-allocated object and if sufficient
- * debugging is enabled, return the returrn address for the corresponding
- * allocation.  Otherwise, return NULL.  Note that passing random pointers
- * to this function (including addresses of on-stack variables) is likely
- * to result in panics.
+ * debugging is enabled, return the return address for the corresponding
+ * allocation.  If stackp is non-%NULL in %CONFIG_STACKTRACE kernels running
+ * the slub allocator, also copy the return-address stack into @stackp,
+ * limited by @nstackp.  Otherwise, return %NULL or an appropriate error
+ * code using %ERR_PTR().
+ *
+ * Return: return address from last allocation, %NULL or negative error code.
  */
-void *kmem_last_alloc(void *object)
+void *kmem_last_alloc_stack(void *object, void **stackp, int nstackp)
 {
 	struct page *page;
 
@@ -576,7 +584,24 @@ void *kmem_last_alloc(void *object)
 	page = virt_to_head_page(object);
 	if (!PageSlab(page))
 		return ERR_PTR(-KMEM_LA_NO_SLAB);
-	return kmem_cache_last_alloc(page->slab_cache, object);
+	return kmem_cache_last_alloc(page->slab_cache, object, stackp, nstackp);
+}
+EXPORT_SYMBOL_GPL(kmem_last_alloc_stack);
+
+/**
+ * kmem_last_alloc - Get return address for last allocation
+ * @object: object for which to find last-allocation return address.
+ *
+ * If the pointer references a slab-allocated object and if sufficient
+ * debugging is enabled, return the return address for the corresponding
+ * allocation.  Otherwise, return %NULL or an appropriate error code using
+ * %ERR_PTR().
+ *
+ * Return: return address from last allocation, %NULL or negative error code.
+ */
+void *kmem_last_alloc(void *object)
+{
+	return kmem_last_alloc_stack(object, NULL, 0);
 }
 EXPORT_SYMBOL_GPL(kmem_last_alloc);
 
diff --git a/mm/slob.c b/mm/slob.c
index e7d6b90..dab7f3b 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -461,8 +461,10 @@ static void slob_free(void *block, int size)
 	spin_unlock_irqrestore(&slob_lock, flags);
 }
 
-void *kmem_cache_last_alloc(struct kmem_cache *s, void *object)
+void *kmem_cache_last_alloc(struct kmem_cache *s, void *object, void **stackp, int nstackp)
 {
+	if (stackp && nstackp)
+		stackp[0] = NULL;
 	return ERR_PTR(-KMEM_LA_SLOB);
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index 3ddf16a..a918b1d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3918,10 +3918,11 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
 	return 0;
 }
 
-void *kmem_cache_last_alloc(struct kmem_cache *s, void *object)
+void *kmem_cache_last_alloc(struct kmem_cache *s, void *object, void **stackp, int nstackp)
 {
 #ifdef CONFIG_SLUB_DEBUG
 	void *base;
+	int i = 0;
 	unsigned int objnr;
 	void *objp;
 	struct page *page;
@@ -3938,6 +3939,17 @@ void *kmem_cache_last_alloc(struct kmem_cache *s, void *object)
 	if (objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size)
 		return ERR_PTR(-KMEM_LA_INCONSISTENT);
 	trackp = get_track(s, objp, TRACK_ALLOC);
+#ifdef CONFIG_STACKTRACE
+	if (stackp) {
+		for (; i < nstackp && i < TRACK_ADDRS_COUNT; i++) {
+			stackp[i] = (void *)trackp->addrs[i];
+			if (!stackp[i])
+				break;
+		}
+	}
+#endif
+	if (stackp && i < nstackp)
+		stackp[i] = NULL;
 	return (void *)trackp->addr;
 #else
 	return NULL;
-- 
2.9.5


  parent reply index

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-05  0:40 [PATCH RFC sl-b] Export return addresses for better diagnostics Paul E. McKenney
2020-12-05  0:40 ` [PATCH sl-b 1/6] mm: Add kmem_last_alloc() to return last allocation for memory block paulmck
2020-12-07  9:02   ` Joonsoo Kim
2020-12-07 17:25     ` Paul E. McKenney
2020-12-08  8:57       ` Joonsoo Kim
2020-12-08 15:17         ` Paul E. McKenney
2020-12-05  0:40 ` [PATCH sl-b 2/6] mm: Add kmem_last_alloc_errstring() to provide more kmem_last_alloc() info paulmck
2020-12-05  0:40 ` [PATCH sl-b 3/6] rcu: Make call_rcu() print allocation address of double-freed callback paulmck
2020-12-05  0:40 ` paulmck [this message]
2020-12-05  0:40 ` [PATCH sl-b 5/6] percpu_ref: Print allocator upon reference-count underflow paulmck
2020-12-05  0:40 ` [PATCH sl-b 6/6] percpu_ref: Print stack trace " paulmck
2020-12-09  1:11 ` [PATCH RFC v2 sl-b] Export return addresses etc. for better diagnostics Paul E. McKenney
2020-12-09  1:12   ` [PATCH v2 sl-b 1/5] mm: Add mem_dump_obj() to print source of memory block paulmck
2020-12-09  8:17     ` Christoph Hellwig
2020-12-09 14:57       ` Paul E. McKenney
2020-12-09 17:53         ` Christoph Hellwig
2020-12-09 17:59           ` Paul E. McKenney
2020-12-09 17:28     ` Vlastimil Babka
2020-12-09 23:04       ` Paul E. McKenney
2020-12-10 10:48         ` Vlastimil Babka
2020-12-10 19:56           ` Paul E. McKenney
2020-12-10 12:04     ` Joonsoo Kim
2020-12-10 23:41       ` Paul E. McKenney
2020-12-09  1:13   ` [PATCH v2 sl-b 2/5] mm: Make mem_dump_obj() handle NULL and zero-sized pointers paulmck
2020-12-09 17:48     ` Vlastimil Babka
2020-12-10  3:25       ` Paul E. McKenney
2020-12-09  1:13   ` [PATCH v2 sl-b 3/5] mm: Make mem_dump_obj() handle vmalloc() memory paulmck
2020-12-09 17:51     ` Vlastimil Babka
2020-12-09 19:39       ` Uladzislau Rezki
2020-12-09 23:23       ` Paul E. McKenney
2020-12-10 10:49         ` Vlastimil Babka
2020-12-09 19:36     ` Uladzislau Rezki
2020-12-09 19:42       ` Paul E. McKenney
2020-12-09 20:04         ` Uladzislau Rezki
2020-12-09  1:13   ` [PATCH v2 sl-b 4/5] rcu: Make call_rcu() print mem_dump_obj() info for double-freed callback paulmck
2020-12-09  1:13   ` [PATCH v2 sl-b 5/5] percpu_ref: Dump mem_dump_obj() info upon reference-count underflow paulmck
2020-12-11  1:19   ` [PATCH RFC v2 sl-b] Export return addresses etc. for better diagnostics Paul E. McKenney
2020-12-11  1:19     ` [PATCH v3 sl-b 1/6] mm: Add mem_dump_obj() to print source of memory block paulmck
2020-12-11  2:22       ` Joonsoo Kim
2020-12-11  3:33         ` Paul E. McKenney
2020-12-11  3:42           ` Paul E. McKenney
2020-12-11  6:58             ` Joonsoo Kim
2020-12-11 16:59               ` Paul E. McKenney
2020-12-11  6:54           ` Joonsoo Kim
2020-12-11  1:19     ` [PATCH v3 sl-b 2/6] mm: Make mem_dump_obj() handle NULL and zero-sized pointers paulmck
2020-12-11  1:20     ` [PATCH v3 sl-b 3/6] mm: Make mem_dump_obj() handle vmalloc() memory paulmck
2020-12-11  1:20     ` [PATCH v3 sl-b 4/6] mm: Make mem_obj_dump() vmalloc() dumps include start and length paulmck
2020-12-11  1:20     ` [PATCH v3 sl-b 5/6] rcu: Make call_rcu() print mem_dump_obj() info for double-freed callback paulmck
2020-12-11  1:20     ` [PATCH v3 sl-b 6/6] percpu_ref: Dump mem_dump_obj() info upon reference-count underflow paulmck

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201205004057.32199-4-paulmck@kernel.org \
    --to=paulmck@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=dhowells@redhat.com \
    --cc=edumazet@google.com \
    --cc=fweisbec@gmail.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=jiangshanlai@gmail.com \
    --cc=joel@joelfernandes.org \
    --cc=josh@joshtriplett.org \
    --cc=kernel-team@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mingo@kernel.org \
    --cc=oleg@redhat.com \
    --cc=penberg@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rcu@vger.kernel.org \
    --cc=rientjes@google.com \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

RCU Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/rcu/0 rcu/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 rcu rcu/ https://lore.kernel.org/rcu \
		rcu@vger.kernel.org
	public-inbox-index rcu

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.rcu


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git