All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matteo Rizzo <matteorizzo@google.com>
To: cl@linux.com, penberg@kernel.org, rientjes@google.com,
	iamjoonsoo.kim@lge.com, akpm@linux-foundation.org,
	vbabka@suse.cz, roman.gushchin@linux.dev, 42.hyeyoo@gmail.com,
	keescook@chromium.org, linux-kernel@vger.kernel.org,
	linux-doc@vger.kernel.org, linux-mm@kvack.org,
	linux-hardening@vger.kernel.org, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, dave.hansen@linux.intel.com,
	x86@kernel.org, hpa@zytor.com, corbet@lwn.net, luto@kernel.org,
	peterz@infradead.org
Cc: jannh@google.com, matteorizzo@google.com, evn@google.com,
	poprdi@google.com, jordyzomer@google.com
Subject: [RFC PATCH 13/14] mm/slub: sanity-check freepointers
Date: Fri, 15 Sep 2023 10:59:32 +0000	[thread overview]
Message-ID: <20230915105933.495735-14-matteorizzo@google.com> (raw)
In-Reply-To: <20230915105933.495735-1-matteorizzo@google.com>

From: Jann Horn <jannh@google.com>

Sanity-check that:
 - non-NULL freepointers point into the slab
 - freepointers look plausibly aligned

Signed-off-by: Jann Horn <jannh@google.com>
Co-developed-by: Matteo Rizzo <matteorizzo@google.com>
Signed-off-by: Matteo Rizzo <matteorizzo@google.com>
---
 lib/slub_kunit.c |  4 ++++
 mm/slab.h        |  8 +++++++
 mm/slub.c        | 57 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 69 insertions(+)

diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index d4a3730b08fa..acf8600bd1fd 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -45,6 +45,10 @@ static void test_clobber_zone(struct kunit *test)
 #ifndef CONFIG_KASAN
 static void test_next_pointer(struct kunit *test)
 {
+	if (IS_ENABLED(CONFIG_SLAB_VIRTUAL))
+		kunit_skip(test,
+			"incompatible with freepointer corruption detection in CONFIG_SLAB_VIRTUAL");
+
 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
 							64, SLAB_POISON);
 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
diff --git a/mm/slab.h b/mm/slab.h
index 460c802924bd..8d10a011bdf0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -79,6 +79,14 @@ struct slab {
 
 	struct list_head flush_list_elem;
 
+	/*
+	 * Not in kmem_cache because it depends on whether the allocation is
+	 * normal order or fallback order.
+	 * an alternative might be to over-allocate virtual memory for
+	 * fallback-order pages.
+	 */
+	unsigned long align_mask;
+
 	/* Replaces the page lock */
 	spinlock_t slab_lock;
 
diff --git a/mm/slub.c b/mm/slub.c
index 0f7f5bf0b174..57474c8a6569 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -392,6 +392,44 @@ static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
 	return (freeptr_t){.v = encoded};
 }
 
+/*
+ * Does some validation of freelist pointers. Without SLAB_VIRTUAL this is
+ * currently a no-op.
+ */
+static inline bool freelist_pointer_corrupted(struct slab *slab, freeptr_t ptr,
+	void *decoded)
+{
+#ifdef CONFIG_SLAB_VIRTUAL
+	/*
+	 * If the freepointer decodes to 0, use 0 as the slab_base so that
+	 * the check below always passes (0 & slab->align_mask == 0).
+	 */
+	unsigned long slab_base = decoded ? (unsigned long)slab_to_virt(slab)
+		: 0;
+
+	/*
+	 * This verifies that the SLUB freepointer does not point outside the
+	 * slab. Since at that point we can basically do it for free, it also
+	 * checks that the pointer alignment looks vaguely sane.
+	 * However, we probably don't want the cost of a proper division here,
+	 * so instead we just do a cheap check whether the bottom bits that are
+	 * clear in the size are also clear in the pointer.
+	 * So for kmalloc-32, it does a perfect alignment check, but for
+	 * kmalloc-192, it just checks that the pointer is a multiple of 32.
+	 * This should probably be reconsidered - is this a good tradeoff, or
+	 * should that part be thrown out, or do we want a proper accurate
+	 * alignment check (and can we make it work with acceptable performance
+	 * cost compared to the security improvement - probably not)?
+	 */
+	return CHECK_DATA_CORRUPTION(
+		((unsigned long)decoded & slab->align_mask) != slab_base,
+		"bad freeptr (encoded %lx, ptr %p, base %lx, mask %lx",
+		ptr.v, decoded, slab_base, slab->align_mask);
+#else
+	return false;
+#endif
+}
+
 static inline void *freelist_ptr_decode(const struct kmem_cache *s,
 					freeptr_t ptr, unsigned long ptr_addr,
 					struct slab *slab)
@@ -403,6 +441,10 @@ static inline void *freelist_ptr_decode(const struct kmem_cache *s,
 #else
 	decoded = (void *)ptr.v;
 #endif
+
+	if (unlikely(freelist_pointer_corrupted(slab, ptr, decoded)))
+		return NULL;
+
 	return decoded;
 }
 
@@ -2122,6 +2164,21 @@ static struct slab *get_free_slab(struct kmem_cache *s,
 	if (slab == NULL)
 		return NULL;
 
+	/*
+	 * Bits that must be equal to start-of-slab address for all
+	 * objects inside the slab.
+	 * For compatibility with pointer tagging (like in HWASAN), this would
+	 * need to clear the pointer tag bits from the mask.
+	 */
+	slab->align_mask = ~((PAGE_SIZE << oo_order(oo)) - 1);
+
+	/*
+	 * Object alignment bits (must be zero, which is equal to the bits in
+	 * the start-of-slab address)
+	 */
+	if (s->red_left_pad == 0)
+		slab->align_mask |= (1 << (ffs(s->size) - 1)) - 1;
+
 	return slab;
 }
 
-- 
2.42.0.459.ge4e396fd5e-goog


  parent reply	other threads:[~2023-09-15 11:01 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-15 10:59 [RFC PATCH 00/14] Prevent cross-cache attacks in the SLUB allocator Matteo Rizzo
2023-09-15 10:59 ` [RFC PATCH 01/14] mm/slub: don't try to dereference invalid freepointers Matteo Rizzo
2023-09-15 20:50   ` Kees Cook
2023-09-30 11:04   ` Hyeonggon Yoo
2023-09-15 10:59 ` [RFC PATCH 02/14] mm/slub: add is_slab_addr/is_slab_page helpers Matteo Rizzo
2023-09-15 20:55   ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 03/14] mm/slub: move kmem_cache_order_objects to slab.h Matteo Rizzo
2023-09-15 20:56   ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 04/14] mm: use virt_to_slab instead of folio_slab Matteo Rizzo
2023-09-15 20:59   ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 05/14] mm/slub: create folio_set/clear_slab helpers Matteo Rizzo
2023-09-15 21:02   ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 06/14] mm/slub: pass additional args to alloc_slab_page Matteo Rizzo
2023-09-15 21:03   ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 07/14] mm/slub: pass slab pointer to the freeptr decode helper Matteo Rizzo
2023-09-15 21:06   ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 08/14] security: introduce CONFIG_SLAB_VIRTUAL Matteo Rizzo
2023-09-15 21:07   ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 09/14] mm/slub: add the slab freelists to kmem_cache Matteo Rizzo
2023-09-15 21:08   ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 10/14] x86: Create virtual memory region for SLUB Matteo Rizzo
2023-09-15 21:13   ` Kees Cook
2023-09-15 21:49     ` Dave Hansen
2023-09-18  8:54       ` Matteo Rizzo
2023-09-15 10:59 ` [RFC PATCH 11/14] mm/slub: allocate slabs from virtual memory Matteo Rizzo
2023-09-15 21:22   ` Kees Cook
2023-09-15 21:57   ` Dave Hansen
2023-10-11  9:17     ` Matteo Rizzo
2023-09-15 10:59 ` [RFC PATCH 12/14] mm/slub: introduce the deallocated_pages sysfs attribute Matteo Rizzo
2023-09-15 21:23   ` Kees Cook
2023-09-15 10:59 ` Matteo Rizzo [this message]
2023-09-15 21:26   ` [RFC PATCH 13/14] mm/slub: sanity-check freepointers Kees Cook
2023-09-15 10:59 ` [RFC PATCH 14/14] security: add documentation for SLAB_VIRTUAL Matteo Rizzo
2023-09-15 21:34   ` Kees Cook
2023-09-20  9:04   ` Vlastimil Babka
2023-09-15 15:19 ` [RFC PATCH 00/14] Prevent cross-cache attacks in the SLUB allocator Dave Hansen
2023-09-15 16:30   ` Lameter, Christopher
2023-09-18 12:08     ` Matteo Rizzo
2023-09-18 17:39       ` Ingo Molnar
2023-09-18 18:05         ` Linus Torvalds
2023-09-19 15:48           ` Matteo Rizzo
2023-09-19 16:02             ` Dave Hansen
2023-09-19 17:56               ` Kees Cook
2023-09-19 18:49             ` Linus Torvalds
2023-09-19 13:42         ` Matteo Rizzo
2023-09-19 15:56           ` Dave Hansen
2023-09-20  7:44           ` Ingo Molnar
2023-09-20  8:49       ` Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230915105933.495735-14-matteorizzo@google.com \
    --to=matteorizzo@google.com \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=bp@alien8.de \
    --cc=cl@linux.com \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=evn@google.com \
    --cc=hpa@zytor.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=jannh@google.com \
    --cc=jordyzomer@google.com \
    --cc=keescook@chromium.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-hardening@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=penberg@kernel.org \
    --cc=peterz@infradead.org \
    --cc=poprdi@google.com \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=tglx@linutronix.de \
    --cc=vbabka@suse.cz \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.