All of lore.kernel.org
 help / color / mirror / Atom feed
From: andrey.konovalov@linux.dev
To: Marco Elver <elver@google.com>, Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>,
	Dmitry Vyukov <dvyukov@google.com>,
	Andrey Ryabinin <ryabinin.a.a@gmail.com>,
	kasan-dev@googlegroups.com, Peter Collingbourne <pcc@google.com>,
	Evgenii Stepanov <eugenis@google.com>,
	Florian Mayer <fmayer@google.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	Andrey Konovalov <andreyknvl@google.com>
Subject: [PATCH mm v2 12/33] kasan: introduce kasan_init_cache_meta
Date: Tue, 19 Jul 2022 02:09:52 +0200	[thread overview]
Message-ID: <7ae0695bcf60921a040f6bc295876444f5c3cef1.1658189199.git.andreyknvl@google.com> (raw)
In-Reply-To: <cover.1658189199.git.andreyknvl@google.com>

From: Andrey Konovalov <andreyknvl@google.com>

Add a kasan_init_cache_meta() helper that initializes metadata-related
cache parameters and use this helper in the common KASAN code.

Put the implementation of this new helper into generic.c, as only the
Generic mode uses per-object metadata.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/kasan/common.c  | 80 ++--------------------------------------------
 mm/kasan/generic.c | 79 +++++++++++++++++++++++++++++++++++++++++++++
 mm/kasan/kasan.h   |  2 ++
 3 files changed, 83 insertions(+), 78 deletions(-)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index d2ec4e6af675..83a04834746f 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -117,28 +117,9 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
 			     KASAN_PAGE_FREE, init);
 }
 
-/*
- * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
- * For larger allocations larger redzones are used.
- */
-static inline unsigned int optimal_redzone(unsigned int object_size)
-{
-	return
-		object_size <= 64        - 16   ? 16 :
-		object_size <= 128       - 32   ? 32 :
-		object_size <= 512       - 64   ? 64 :
-		object_size <= 4096      - 128  ? 128 :
-		object_size <= (1 << 14) - 256  ? 256 :
-		object_size <= (1 << 15) - 512  ? 512 :
-		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
-}
-
 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
 			  slab_flags_t *flags)
 {
-	unsigned int ok_size;
-	unsigned int optimal_size;
-
 	/*
 	 * SLAB_KASAN is used to mark caches as ones that are sanitized by
 	 * KASAN. Currently this flag is used in two places:
@@ -148,65 +129,8 @@ void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
 	 */
 	*flags |= SLAB_KASAN;
 
-	if (!kasan_requires_meta())
-		return;
-
-	ok_size = *size;
-
-	/* Add alloc meta into redzone. */
-	cache->kasan_info.alloc_meta_offset = *size;
-	*size += sizeof(struct kasan_alloc_meta);
-
-	/*
-	 * If alloc meta doesn't fit, don't add it.
-	 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
-	 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
-	 * larger sizes.
-	 */
-	if (*size > KMALLOC_MAX_SIZE) {
-		cache->kasan_info.alloc_meta_offset = 0;
-		*size = ok_size;
-		/* Continue, since free meta might still fit. */
-	}
-
-	/* Only the generic mode uses free meta or flexible redzones. */
-	if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
-		cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
-		return;
-	}
-
-	/*
-	 * Add free meta into redzone when it's not possible to store
-	 * it in the object. This is the case when:
-	 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
-	 *    be touched after it was freed, or
-	 * 2. Object has a constructor, which means it's expected to
-	 *    retain its content until the next allocation, or
-	 * 3. Object is too small.
-	 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
-	 */
-	if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
-	    cache->object_size < sizeof(struct kasan_free_meta)) {
-		ok_size = *size;
-
-		cache->kasan_info.free_meta_offset = *size;
-		*size += sizeof(struct kasan_free_meta);
-
-		/* If free meta doesn't fit, don't add it. */
-		if (*size > KMALLOC_MAX_SIZE) {
-			cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
-			*size = ok_size;
-		}
-	}
-
-	/* Calculate size with optimal redzone. */
-	optimal_size = cache->object_size + optimal_redzone(cache->object_size);
-	/* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
-	if (optimal_size > KMALLOC_MAX_SIZE)
-		optimal_size = KMALLOC_MAX_SIZE;
-	/* Use optimal size if the size with added metas is not large enough. */
-	if (*size < optimal_size)
-		*size = optimal_size;
+	if (kasan_requires_meta())
+		kasan_init_cache_meta(cache, size);
 }
 
 void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index fa654cb96a0d..73aea784040a 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -328,6 +328,85 @@ DEFINE_ASAN_SET_SHADOW(f3);
 DEFINE_ASAN_SET_SHADOW(f5);
 DEFINE_ASAN_SET_SHADOW(f8);
 
+/*
+ * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
+ * For larger allocations larger redzones are used.
+ */
+static inline unsigned int optimal_redzone(unsigned int object_size)
+{
+	return
+		object_size <= 64        - 16   ? 16 :
+		object_size <= 128       - 32   ? 32 :
+		object_size <= 512       - 64   ? 64 :
+		object_size <= 4096      - 128  ? 128 :
+		object_size <= (1 << 14) - 256  ? 256 :
+		object_size <= (1 << 15) - 512  ? 512 :
+		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
+}
+
+void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size)
+{
+	unsigned int ok_size;
+	unsigned int optimal_size;
+
+	ok_size = *size;
+
+	/* Add alloc meta into redzone. */
+	cache->kasan_info.alloc_meta_offset = *size;
+	*size += sizeof(struct kasan_alloc_meta);
+
+	/*
+	 * If alloc meta doesn't fit, don't add it.
+	 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
+	 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
+	 * larger sizes.
+	 */
+	if (*size > KMALLOC_MAX_SIZE) {
+		cache->kasan_info.alloc_meta_offset = 0;
+		*size = ok_size;
+		/* Continue, since free meta might still fit. */
+	}
+
+	/* Only the generic mode uses free meta or flexible redzones. */
+	if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+		cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
+		return;
+	}
+
+	/*
+	 * Add free meta into redzone when it's not possible to store
+	 * it in the object. This is the case when:
+	 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
+	 *    be touched after it was freed, or
+	 * 2. Object has a constructor, which means it's expected to
+	 *    retain its content until the next allocation, or
+	 * 3. Object is too small.
+	 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
+	 */
+	if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
+	    cache->object_size < sizeof(struct kasan_free_meta)) {
+		ok_size = *size;
+
+		cache->kasan_info.free_meta_offset = *size;
+		*size += sizeof(struct kasan_free_meta);
+
+		/* If free meta doesn't fit, don't add it. */
+		if (*size > KMALLOC_MAX_SIZE) {
+			cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
+			*size = ok_size;
+		}
+	}
+
+	/* Calculate size with optimal redzone. */
+	optimal_size = cache->object_size + optimal_redzone(cache->object_size);
+	/* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
+	if (optimal_size > KMALLOC_MAX_SIZE)
+		optimal_size = KMALLOC_MAX_SIZE;
+	/* Use optimal size if the size with added metas is not large enough. */
+	if (*size < optimal_size)
+		*size = optimal_size;
+}
+
 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
 					      const void *object)
 {
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1736abd661b6..6da35370ba37 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -297,12 +297,14 @@ struct page *kasan_addr_to_page(const void *addr);
 struct slab *kasan_addr_to_slab(const void *addr);
 
 #ifdef CONFIG_KASAN_GENERIC
+void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size);
 void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
 						const void *object);
 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
 						const void *object);
 #else
+static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { }
 static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
 #endif
 
-- 
2.25.1


  parent reply	other threads:[~2022-07-19  0:12 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-19  0:09 [PATCH mm v2 00/33] kasan: switch tag-based modes to stack ring from per-object metadata andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 01/33] kasan: check KASAN_NO_FREE_META in __kasan_metadata_size andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 02/33] kasan: rename kasan_set_*_info to kasan_save_*_info andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 03/33] kasan: move is_kmalloc check out of save_alloc_info andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 04/33] kasan: split save_alloc_info implementations andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 05/33] kasan: drop CONFIG_KASAN_TAGS_IDENTIFY andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 06/33] kasan: introduce kasan_print_aux_stacks andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 07/33] kasan: introduce kasan_get_alloc_track andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 08/33] kasan: introduce kasan_init_object_meta andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 09/33] kasan: clear metadata functions for tag-based modes andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 10/33] kasan: move kasan_get_*_meta to generic.c andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 11/33] kasan: introduce kasan_requires_meta andrey.konovalov
2022-07-19  0:09 ` andrey.konovalov [this message]
2022-07-19  0:09 ` [PATCH mm v2 13/33] kasan: drop CONFIG_KASAN_GENERIC check from kasan_init_cache_meta andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 14/33] kasan: only define kasan_metadata_size for Generic mode andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 15/33] kasan: only define kasan_never_merge " andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 16/33] kasan: only define metadata offsets " andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 17/33] kasan: only define metadata structs " andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 18/33] kasan: only define kasan_cache_create " andrey.konovalov
2022-07-19  0:09 ` [PATCH mm v2 19/33] kasan: pass tagged pointers to kasan_save_alloc/free_info andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 20/33] kasan: move kasan_get_alloc/free_track definitions andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 21/33] kasan: cosmetic changes in report.c andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 22/33] kasan: use virt_addr_valid in kasan_addr_to_page/slab andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 23/33] kasan: use kasan_addr_to_slab in print_address_description andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 24/33] kasan: make kasan_addr_to_page static andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 25/33] kasan: simplify print_report andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 26/33] kasan: introduce complete_report_info andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 27/33] kasan: fill in cache and object in complete_report_info andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 28/33] kasan: rework function arguments in report.c andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 29/33] kasan: introduce kasan_complete_mode_report_info andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 30/33] kasan: implement stack ring for tag-based modes andrey.konovalov
2022-07-19 11:41   ` Marco Elver
2022-07-21 20:41     ` Andrey Konovalov
2022-08-02 20:45       ` Andrey Konovalov
2022-08-03 20:28         ` Marco Elver
2022-09-05 20:40           ` Andrey Konovalov
2022-07-19  0:10 ` [PATCH mm v2 31/33] kasan: support kasan.stacktrace for SW_TAGS andrey.konovalov
2022-07-19  0:10 ` [PATCH mm v2 32/33] kasan: dynamically allocate stack ring entries andrey.konovalov
2022-08-03 20:09   ` Marco Elver
2022-09-05 20:34     ` Andrey Konovalov
2022-07-19  0:10 ` [PATCH mm v2 33/33] kasan: better identify bug types for tag-based modes andrey.konovalov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7ae0695bcf60921a040f6bc295876444f5c3cef1.1658189199.git.andreyknvl@google.com \
    --to=andrey.konovalov@linux.dev \
    --cc=akpm@linux-foundation.org \
    --cc=andreyknvl@gmail.com \
    --cc=andreyknvl@google.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=eugenis@google.com \
    --cc=fmayer@google.com \
    --cc=glider@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=pcc@google.com \
    --cc=ryabinin.a.a@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.