From: Andrew Morton <akpm@linux-foundation.org>
To: vbabka@suse.cz,oliver.sang@intel.com,42.hyeyoo@gmail.com,elver@google.com,akpm@linux-foundation.org,patches@lists.linux.dev,linux-mm@kvack.org,mm-commits@vger.kernel.org,torvalds@linux-foundation.org,akpm@linux-foundation.org
Subject: [patch 06/14] mm, kfence: support kmem_dump_obj() for KFENCE objects
Date: Thu, 14 Apr 2022 19:13:40 -0700 [thread overview]
Message-ID: <20220415021340.D03BDC385A1@smtp.kernel.org> (raw)
In-Reply-To: <20220414191240.9f86d15a3e3afd848a9839a6@linux-foundation.org>
From: Marco Elver <elver@google.com>
Subject: mm, kfence: support kmem_dump_obj() for KFENCE objects
Calling kmem_obj_info() via kmem_dump_obj() on KFENCE objects has been
producing garbage data due to the object not actually being maintained by
SLAB or SLUB.
Fix this by implementing __kfence_obj_info() that copies relevant
information to struct kmem_obj_info when the object was allocated by
KFENCE; this is called by a common kmem_obj_info(), which also calls the
slab/slub/slob specific variant now called __kmem_obj_info().
For completeness, kmem_dump_obj() now displays if the object was allocated
by KFENCE.
Link: https://lore.kernel.org/all/20220323090520.GG16885@xsang-OptiPlex-9020/
Link: https://lkml.kernel.org/r/20220406131558.3558585-1-elver@google.com
Fixes: b89fb5ef0ce6 ("mm, kfence: insert KFENCE hooks for SLUB")
Fixes: d3fb45f370d9 ("mm, kfence: insert KFENCE hooks for SLAB")
Signed-off-by: Marco Elver <elver@google.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reported-by: kernel test robot <oliver.sang@intel.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz> [slab]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
include/linux/kfence.h | 24 +++++++++++++++++++
mm/kfence/core.c | 21 -----------------
mm/kfence/kfence.h | 21 +++++++++++++++++
mm/kfence/report.c | 47 +++++++++++++++++++++++++++++++++++++++
mm/slab.c | 2 -
mm/slab.h | 2 -
mm/slab_common.c | 9 +++++++
mm/slob.c | 2 -
mm/slub.c | 2 -
9 files changed, 105 insertions(+), 25 deletions(-)
--- a/include/linux/kfence.h~mm-kfence-support-kmem_dump_obj-for-kfence-objects
+++ a/include/linux/kfence.h
@@ -204,6 +204,22 @@ static __always_inline __must_check bool
*/
bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
#else /* CONFIG_KFENCE */
static inline bool is_kfence_address(const void *addr) { return false; }
@@ -221,6 +237,14 @@ static inline bool __must_check kfence_h
return false;
}
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ return false;
+}
+#endif
+
#endif
#endif /* _LINUX_KFENCE_H */
--- a/mm/kfence/core.c~mm-kfence-support-kmem_dump_obj-for-kfence-objects
+++ a/mm/kfence/core.c
@@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned lo
return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
}
-static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
-{
- long index;
-
- /* The checks do not affect performance; only called from slow-paths. */
-
- if (!is_kfence_address((void *)addr))
- return NULL;
-
- /*
- * May be an invalid index if called with an address at the edge of
- * __kfence_pool, in which case we would report an "invalid access"
- * error.
- */
- index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
- if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
- return NULL;
-
- return &kfence_metadata[index];
-}
-
static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
{
unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
--- a/mm/kfence/kfence.h~mm-kfence-support-kmem_dump_obj-for-kfence-objects
+++ a/mm/kfence/kfence.h
@@ -96,6 +96,27 @@ struct kfence_metadata {
extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
+static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
+{
+ long index;
+
+ /* The checks do not affect performance; only called from slow-paths. */
+
+ if (!is_kfence_address((void *)addr))
+ return NULL;
+
+ /*
+ * May be an invalid index if called with an address at the edge of
+ * __kfence_pool, in which case we would report an "invalid access"
+ * error.
+ */
+ index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
+ if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+ return NULL;
+
+ return &kfence_metadata[index];
+}
+
/* KFENCE error types for report generation. */
enum kfence_error_type {
KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */
--- a/mm/kfence/report.c~mm-kfence-support-kmem_dump_obj-for-kfence-objects
+++ a/mm/kfence/report.c
@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long a
/* We encountered a memory safety error, taint the kernel! */
add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
}
+
+#ifdef CONFIG_PRINTK
+static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
+{
+ int i, j;
+
+ i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
+ for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
+ kp_stack[j] = (void *)track->stack_entries[i];
+ if (j < KS_ADDRS_COUNT)
+ kp_stack[j] = NULL;
+}
+
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
+ unsigned long flags;
+
+ if (!meta)
+ return false;
+
+ /*
+ * If state is UNUSED at least show the pointer requested; the rest
+ * would be garbage data.
+ */
+ kpp->kp_ptr = object;
+
+ /* Requesting info an a never-used object is almost certainly a bug. */
+ if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
+ return true;
+
+ raw_spin_lock_irqsave(&meta->lock, flags);
+
+ kpp->kp_slab = slab;
+ kpp->kp_slab_cache = meta->cache;
+ kpp->kp_objp = (void *)meta->addr;
+ kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
+ if (meta->state == KFENCE_OBJECT_FREED)
+ kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
+ /* get_stack_skipnr() ensures the first entry is outside allocator. */
+ kpp->kp_ret = kpp->kp_stack[0];
+
+ raw_spin_unlock_irqrestore(&meta->lock, flags);
+
+ return true;
+}
+#endif
--- a/mm/slab.c~mm-kfence-support-kmem_dump_obj-for-kfence-objects
+++ a/mm/slab.c
@@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_calle
#endif /* CONFIG_NUMA */
#ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
struct kmem_cache *cachep;
unsigned int objnr;
--- a/mm/slab_common.c~mm-kfence-support-kmem_dump_obj-for-kfence-objects
+++ a/mm/slab_common.c
@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object)
}
EXPORT_SYMBOL_GPL(kmem_valid_obj);
+static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ if (__kfence_obj_info(kpp, object, slab))
+ return;
+ __kmem_obj_info(kpp, object, slab);
+}
+
/**
* kmem_dump_obj - Print available slab provenance information
* @object: slab object for which to find provenance information.
@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object)
pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
else
pr_cont(" slab%s", cp);
+ if (is_kfence_address(object))
+ pr_cont(" (kfence)");
if (kp.kp_objp)
pr_cont(" start %px", kp.kp_objp);
if (kp.kp_data_offset)
--- a/mm/slab.h~mm-kfence-support-kmem_dump_obj-for-kfence-objects
+++ a/mm/slab.h
@@ -868,7 +868,7 @@ struct kmem_obj_info {
void *kp_stack[KS_ADDRS_COUNT];
void *kp_free_stack[KS_ADDRS_COUNT];
};
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
#endif
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
--- a/mm/slob.c~mm-kfence-support-kmem_dump_obj-for-kfence-objects
+++ a/mm/slob.c
@@ -463,7 +463,7 @@ out:
}
#ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
kpp->kp_ptr = object;
kpp->kp_slab = slab;
--- a/mm/slub.c~mm-kfence-support-kmem_dump_obj-for-kfence-objects
+++ a/mm/slub.c
@@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_ca
}
#ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
void *base;
int __maybe_unused i;
_
next prev parent reply other threads:[~2022-04-15 2:13 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-15 2:12 incoming Andrew Morton
2022-04-15 2:13 ` [patch 01/14] MAINTAINERS: Broadcom internal lists aren't maintainers Andrew Morton
2022-04-15 2:13 ` [patch 02/14] tmpfs: fix regressions from wider use of ZERO_PAGE Andrew Morton
2022-04-15 22:10 ` Linus Torvalds
2022-04-15 22:21 ` Matthew Wilcox
2022-04-15 22:41 ` Hugh Dickins
[not found] ` <Ylpj9of+CP4ipDtm@zn.tnic>
2022-04-16 14:07 ` Mark Hemment
[not found] ` <Ylr8rR+LHQ1uGL47@zn.tnic>
2022-04-16 17:42 ` Linus Torvalds
[not found] ` <Ylsx5PwyUPOainHa@zn.tnic>
[not found] ` <YlxtTNFP58TcUHZQ@zn.tnic>
2022-04-17 20:56 ` Linus Torvalds
[not found] ` <Yl06PWVgeZplboXV@zn.tnic>
2022-04-18 17:10 ` Linus Torvalds
[not found] ` <Yl5+DCfQmG5C3BHf@zn.tnic>
2022-04-19 16:41 ` Linus Torvalds
[not found] ` <Yl7146AZDgfLviVv@zn.tnic>
[not found] ` <YmFy8DEqvX4FlnuB@zn.tnic>
[not found] ` <CAHk-=wgf2C9nFiC+3UFG4k7XVTQq5aV6fasSYuT_nQeo_Yew6A@mail.gmail.com>
2022-04-21 17:22 ` Linus Torvalds
[not found] ` <YmWm5AXdwgwu57KZ@zn.tnic>
2022-04-24 19:54 ` Linus Torvalds
2022-04-24 20:24 ` Linus Torvalds
[not found] ` <YmiK7Bos+zLAvL0t@zn.tnic>
2022-04-27 1:29 ` Linus Torvalds
[not found] ` <YmkdxaKdc2w/3I7o@zn.tnic>
2022-04-27 16:00 ` Linus Torvalds
[not found] ` <YnLMSWbz6BNfsBME@zn.tnic>
2022-05-04 19:22 ` Linus Torvalds
[not found] ` <YnLfl6lupN2nq7+t@zn.tnic>
2022-05-04 20:40 ` Linus Torvalds
[not found] ` <YnLplKy0Y66SsvQw@zn.tnic>
2022-05-04 21:09 ` Linus Torvalds
[not found] ` <Ynow8F3G8Kl6V3gu@zn.tnic>
2022-05-10 17:17 ` clear_user (was: [patch 02/14] tmpfs: fix regressions from wider use of ZERO_PAGE) Linus Torvalds
2022-05-10 17:28 ` Linus Torvalds
[not found] ` <YnqqhmYv75p+xl73@zn.tnic>
[not found] ` <Ynq1nVpu1xCpjnXm@zn.tnic>
2022-05-24 12:32 ` [PATCH] x86/clear_user: Make it faster Borislav Petkov
2022-05-24 16:51 ` Linus Torvalds
2022-05-24 17:30 ` Borislav Petkov
2022-05-25 12:11 ` Mark Hemment
2022-05-27 11:28 ` Borislav Petkov
2022-05-27 11:10 ` Ingo Molnar
2022-06-22 14:21 ` Borislav Petkov
2022-06-22 15:06 ` Linus Torvalds
2022-06-22 20:14 ` Borislav Petkov
2022-06-22 21:07 ` Linus Torvalds
2022-06-23 9:41 ` Borislav Petkov
2022-07-05 17:01 ` [PATCH -final] " Borislav Petkov
2022-07-06 9:24 ` Alexey Dobriyan
2022-07-11 10:33 ` Borislav Petkov
2022-07-12 12:32 ` Alexey Dobriyan
2022-08-06 12:49 ` Borislav Petkov
2022-04-15 2:13 ` [patch 03/14] mm/secretmem: fix panic when growing a memfd_secret Andrew Morton
2022-04-15 2:13 ` [patch 04/14] irq_work: use kasan_record_aux_stack_noalloc() record callstack Andrew Morton
2022-04-15 2:13 ` [patch 05/14] kasan: fix hw tags enablement when KUNIT tests are disabled Andrew Morton
2022-04-15 2:13 ` Andrew Morton [this message]
2022-04-15 2:13 ` [patch 08/14] mm: fix unexpected zeroed page mapping with zram swap Andrew Morton
2022-04-15 2:13 ` [patch 09/14] mm: compaction: fix compiler warning when CONFIG_COMPACTION=n Andrew Morton
2022-04-15 2:13 ` [patch 10/14] hugetlb: do not demote poisoned hugetlb pages Andrew Morton
2022-04-15 2:13 ` [patch 11/14] revert "fs/binfmt_elf: fix PT_LOAD p_align values for loaders" Andrew Morton
2022-04-15 2:13 ` [patch 12/14] revert "fs/binfmt_elf: use PT_LOAD p_align values for static PIE" Andrew Morton
2022-04-15 2:14 ` [patch 13/14] mm/vmalloc: fix spinning drain_vmap_work after reading from /proc/vmcore Andrew Morton
2022-04-15 2:14 ` [patch 14/14] mm: kmemleak: take a full lowmem check in kmemleak_*_phys() Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220415021340.D03BDC385A1@smtp.kernel.org \
--to=akpm@linux-foundation.org \
--cc=42.hyeyoo@gmail.com \
--cc=elver@google.com \
--cc=linux-mm@kvack.org \
--cc=mm-commits@vger.kernel.org \
--cc=oliver.sang@intel.com \
--cc=patches@lists.linux.dev \
--cc=torvalds@linux-foundation.org \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).