All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, andreyknvl@google.com,
	aryabinin@virtuozzo.com, Branislav.Rankov@arm.com,
	catalin.marinas@arm.com, dvyukov@google.com, elver@google.com,
	eugenis@google.com, glider@google.com, gor@linux.ibm.com,
	kevin.brodsky@arm.com, linux-mm@kvack.org,
	mm-commits@vger.kernel.org, torvalds@linux-foundation.org,
	Vincenzo.Frascino@arm.com, will.deacon@arm.com
Subject: [patch 66/78] kasan, mm: check kasan_enabled in annotations
Date: Fri, 18 Dec 2020 14:05:03 -0800	[thread overview]
Message-ID: <20201218220503.bg9ovjDIo%akpm@linux-foundation.org> (raw)
In-Reply-To: <20201218140046.497484741326828e5b5d46ec@linux-foundation.org>

From: Andrey Konovalov <andreyknvl@google.com>
Subject: kasan, mm: check kasan_enabled in annotations

Declare the kasan_enabled static key in include/linux/kasan.h and in
include/linux/mm.h and check it in all kasan annotations. This allows to
avoid any slowdown caused by function calls when kasan_enabled is
disabled.

Link: https://lkml.kernel.org/r/9f90e3c0aa840dbb4833367c2335193299f69023.1606162397.git.andreyknvl@google.com
Link: https://linux-review.googlesource.com/id/I2589451d3c96c97abbcbf714baabe6161c6f153e
Co-developed-by: Vincenzo Frascino <Vincenzo.Frascino@arm.com>
Signed-off-by: Vincenzo Frascino <Vincenzo.Frascino@arm.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 include/linux/kasan.h |  217 ++++++++++++++++++++++++++++++----------
 include/linux/mm.h    |   22 ++--
 mm/kasan/common.c     |   56 +++++-----
 3 files changed, 212 insertions(+), 83 deletions(-)

--- a/include/linux/kasan.h~kasan-mm-check-kasan_enabled-in-annotations
+++ a/include/linux/kasan.h
@@ -2,6 +2,7 @@
 #ifndef _LINUX_KASAN_H
 #define _LINUX_KASAN_H
 
+#include <linux/static_key.h>
 #include <linux/types.h>
 
 struct kmem_cache;
@@ -75,54 +76,176 @@ static inline void kasan_disable_current
 
 #ifdef CONFIG_KASAN
 
-void kasan_unpoison_range(const void *address, size_t size);
-
-void kasan_alloc_pages(struct page *page, unsigned int order);
-void kasan_free_pages(struct page *page, unsigned int order);
-
-void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
-			slab_flags_t *flags);
-
-void kasan_poison_slab(struct page *page);
-void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-void kasan_poison_object_data(struct kmem_cache *cache, void *object);
-void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
-					const void *object);
-
-void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
-						gfp_t flags);
-void kasan_kfree_large(void *ptr, unsigned long ip);
-void kasan_poison_kfree(void *ptr, unsigned long ip);
-void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
-					size_t size, gfp_t flags);
-void * __must_check kasan_krealloc(const void *object, size_t new_size,
-					gfp_t flags);
-
-void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
-					gfp_t flags);
-bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
-
 struct kasan_cache {
 	int alloc_meta_offset;
 	int free_meta_offset;
 };
 
-size_t kasan_metadata_size(struct kmem_cache *cache);
+#ifdef CONFIG_KASAN_HW_TAGS
+DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
+static __always_inline bool kasan_enabled(void)
+{
+	return static_branch_likely(&kasan_flag_enabled);
+}
+#else
+static inline bool kasan_enabled(void)
+{
+	return true;
+}
+#endif
+
+void __kasan_unpoison_range(const void *addr, size_t size);
+static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
+{
+	if (kasan_enabled())
+		__kasan_unpoison_range(addr, size);
+}
+
+void __kasan_alloc_pages(struct page *page, unsigned int order);
+static __always_inline void kasan_alloc_pages(struct page *page,
+						unsigned int order)
+{
+	if (kasan_enabled())
+		__kasan_alloc_pages(page, order);
+}
+
+void __kasan_free_pages(struct page *page, unsigned int order);
+static __always_inline void kasan_free_pages(struct page *page,
+						unsigned int order)
+{
+	if (kasan_enabled())
+		__kasan_free_pages(page, order);
+}
+
+void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+				slab_flags_t *flags);
+static __always_inline void kasan_cache_create(struct kmem_cache *cache,
+				unsigned int *size, slab_flags_t *flags)
+{
+	if (kasan_enabled())
+		__kasan_cache_create(cache, size, flags);
+}
+
+size_t __kasan_metadata_size(struct kmem_cache *cache);
+static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
+{
+	if (kasan_enabled())
+		return __kasan_metadata_size(cache);
+	return 0;
+}
+
+void __kasan_poison_slab(struct page *page);
+static __always_inline void kasan_poison_slab(struct page *page)
+{
+	if (kasan_enabled())
+		__kasan_poison_slab(page);
+}
+
+void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
+static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+							void *object)
+{
+	if (kasan_enabled())
+		__kasan_unpoison_object_data(cache, object);
+}
+
+void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
+static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+							void *object)
+{
+	if (kasan_enabled())
+		__kasan_poison_object_data(cache, object);
+}
+
+void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
+					  const void *object);
+static __always_inline void * __must_check kasan_init_slab_obj(
+				struct kmem_cache *cache, const void *object)
+{
+	if (kasan_enabled())
+		return __kasan_init_slab_obj(cache, object);
+	return (void *)object;
+}
+
+bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
+static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
+						unsigned long ip)
+{
+	if (kasan_enabled())
+		return __kasan_slab_free(s, object, ip);
+	return false;
+}
+
+void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
+				       void *object, gfp_t flags);
+static __always_inline void * __must_check kasan_slab_alloc(
+				struct kmem_cache *s, void *object, gfp_t flags)
+{
+	if (kasan_enabled())
+		return __kasan_slab_alloc(s, object, flags);
+	return object;
+}
+
+void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
+				    size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
+				const void *object, size_t size, gfp_t flags)
+{
+	if (kasan_enabled())
+		return __kasan_kmalloc(s, object, size, flags);
+	return (void *)object;
+}
+
+void * __must_check __kasan_kmalloc_large(const void *ptr,
+					  size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
+						      size_t size, gfp_t flags)
+{
+	if (kasan_enabled())
+		return __kasan_kmalloc_large(ptr, size, flags);
+	return (void *)ptr;
+}
+
+void * __must_check __kasan_krealloc(const void *object,
+				     size_t new_size, gfp_t flags);
+static __always_inline void * __must_check kasan_krealloc(const void *object,
+						 size_t new_size, gfp_t flags)
+{
+	if (kasan_enabled())
+		return __kasan_krealloc(object, new_size, flags);
+	return (void *)object;
+}
+
+void __kasan_poison_kfree(void *ptr, unsigned long ip);
+static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip)
+{
+	if (kasan_enabled())
+		__kasan_poison_kfree(ptr, ip);
+}
+
+void __kasan_kfree_large(void *ptr, unsigned long ip);
+static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
+{
+	if (kasan_enabled())
+		__kasan_kfree_large(ptr, ip);
+}
 
 bool kasan_save_enable_multi_shot(void);
 void kasan_restore_multi_shot(bool enabled);
 
 #else /* CONFIG_KASAN */
 
+static inline bool kasan_enabled(void)
+{
+	return false;
+}
 static inline void kasan_unpoison_range(const void *address, size_t size) {}
-
 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
 static inline void kasan_free_pages(struct page *page, unsigned int order) {}
-
 static inline void kasan_cache_create(struct kmem_cache *cache,
 				      unsigned int *size,
 				      slab_flags_t *flags) {}
-
+static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
 static inline void kasan_poison_slab(struct page *page) {}
 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
 					void *object) {}
@@ -133,36 +256,32 @@ static inline void *kasan_init_slab_obj(
 {
 	return (void *)object;
 }
-
-static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
+				   unsigned long ip)
 {
-	return ptr;
+	return false;
+}
+static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
+				   gfp_t flags)
+{
+	return object;
 }
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
-static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
 				size_t size, gfp_t flags)
 {
 	return (void *)object;
 }
+static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
+{
+	return (void *)ptr;
+}
 static inline void *kasan_krealloc(const void *object, size_t new_size,
 				 gfp_t flags)
 {
 	return (void *)object;
 }
-
-static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
-				   gfp_t flags)
-{
-	return object;
-}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
-				   unsigned long ip)
-{
-	return false;
-}
-
-static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
+static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
+static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
 
 #endif /* CONFIG_KASAN */
 
--- a/include/linux/mm.h~kasan-mm-check-kasan_enabled-in-annotations
+++ a/include/linux/mm.h
@@ -31,6 +31,7 @@
 #include <linux/sizes.h>
 #include <linux/sched.h>
 #include <linux/pgtable.h>
+#include <linux/kasan.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -1422,22 +1423,30 @@ static inline bool cpupid_match_pid(stru
 #endif /* CONFIG_NUMA_BALANCING */
 
 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+
 static inline u8 page_kasan_tag(const struct page *page)
 {
-	return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+	if (kasan_enabled())
+		return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+	return 0xff;
 }
 
 static inline void page_kasan_tag_set(struct page *page, u8 tag)
 {
-	page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
-	page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+	if (kasan_enabled()) {
+		page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+		page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+	}
 }
 
 static inline void page_kasan_tag_reset(struct page *page)
 {
-	page_kasan_tag_set(page, 0xff);
+	if (kasan_enabled())
+		page_kasan_tag_set(page, 0xff);
 }
-#else
+
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
 static inline u8 page_kasan_tag(const struct page *page)
 {
 	return 0xff;
@@ -1445,7 +1454,8 @@ static inline u8 page_kasan_tag(const st
 
 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
 static inline void page_kasan_tag_reset(struct page *page) { }
-#endif
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
 
 static inline struct zone *page_zone(const struct page *page)
 {
--- a/mm/kasan/common.c~kasan-mm-check-kasan_enabled-in-annotations
+++ a/mm/kasan/common.c
@@ -58,7 +58,7 @@ void kasan_disable_current(void)
 }
 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 
-void kasan_unpoison_range(const void *address, size_t size)
+void __kasan_unpoison_range(const void *address, size_t size)
 {
 	unpoison_range(address, size);
 }
@@ -86,7 +86,7 @@ asmlinkage void kasan_unpoison_task_stac
 }
 #endif /* CONFIG_KASAN_STACK */
 
-void kasan_alloc_pages(struct page *page, unsigned int order)
+void __kasan_alloc_pages(struct page *page, unsigned int order)
 {
 	u8 tag;
 	unsigned long i;
@@ -100,7 +100,7 @@ void kasan_alloc_pages(struct page *page
 	unpoison_range(page_address(page), PAGE_SIZE << order);
 }
 
-void kasan_free_pages(struct page *page, unsigned int order)
+void __kasan_free_pages(struct page *page, unsigned int order)
 {
 	if (likely(!PageHighMem(page)))
 		poison_range(page_address(page),
@@ -127,8 +127,8 @@ static inline unsigned int optimal_redzo
 		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
 }
 
-void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
-			slab_flags_t *flags)
+void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+			  slab_flags_t *flags)
 {
 	unsigned int orig_size = *size;
 	unsigned int redzone_size;
@@ -173,7 +173,7 @@ void kasan_cache_create(struct kmem_cach
 	*flags |= SLAB_KASAN;
 }
 
-size_t kasan_metadata_size(struct kmem_cache *cache)
+size_t __kasan_metadata_size(struct kmem_cache *cache)
 {
 	if (!kasan_stack_collection_enabled())
 		return 0;
@@ -196,7 +196,7 @@ struct kasan_free_meta *kasan_get_free_m
 	return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
 }
 
-void kasan_poison_slab(struct page *page)
+void __kasan_poison_slab(struct page *page)
 {
 	unsigned long i;
 
@@ -206,12 +206,12 @@ void kasan_poison_slab(struct page *page
 		     KASAN_KMALLOC_REDZONE);
 }
 
-void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
 {
 	unpoison_range(object, cache->object_size);
 }
 
-void kasan_poison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
 {
 	poison_range(object,
 			round_up(cache->object_size, KASAN_GRANULE_SIZE),
@@ -264,7 +264,7 @@ static u8 assign_tag(struct kmem_cache *
 #endif
 }
 
-void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
+void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
 						const void *object)
 {
 	struct kasan_alloc_meta *alloc_meta;
@@ -283,7 +283,7 @@ void * __must_check kasan_init_slab_obj(
 	return (void *)object;
 }
 
-static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
+static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
 			      unsigned long ip, bool quarantine)
 {
 	u8 tag;
@@ -326,9 +326,9 @@ static bool __kasan_slab_free(struct kme
 	return IS_ENABLED(CONFIG_KASAN_GENERIC);
 }
 
-bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
+bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
 {
-	return __kasan_slab_free(cache, object, ip, true);
+	return ____kasan_slab_free(cache, object, ip, true);
 }
 
 static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
@@ -336,7 +336,7 @@ static void set_alloc_info(struct kmem_c
 	kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
 }
 
-static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
+static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
 				size_t size, gfp_t flags, bool keep_tag)
 {
 	unsigned long redzone_start;
@@ -368,20 +368,20 @@ static void *__kasan_kmalloc(struct kmem
 	return set_tag(object, tag);
 }
 
-void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
-					gfp_t flags)
+void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
+					void *object, gfp_t flags)
 {
-	return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
+	return ____kasan_kmalloc(cache, object, cache->object_size, flags, false);
 }
 
-void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
-				size_t size, gfp_t flags)
+void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
+					size_t size, gfp_t flags)
 {
-	return __kasan_kmalloc(cache, object, size, flags, true);
+	return ____kasan_kmalloc(cache, object, size, flags, true);
 }
-EXPORT_SYMBOL(kasan_kmalloc);
+EXPORT_SYMBOL(__kasan_kmalloc);
 
-void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
+void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
 						gfp_t flags)
 {
 	struct page *page;
@@ -406,7 +406,7 @@ void * __must_check kasan_kmalloc_large(
 	return (void *)ptr;
 }
 
-void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
+void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
 {
 	struct page *page;
 
@@ -416,13 +416,13 @@ void * __must_check kasan_krealloc(const
 	page = virt_to_head_page(object);
 
 	if (unlikely(!PageSlab(page)))
-		return kasan_kmalloc_large(object, size, flags);
+		return __kasan_kmalloc_large(object, size, flags);
 	else
-		return __kasan_kmalloc(page->slab_cache, object, size,
+		return ____kasan_kmalloc(page->slab_cache, object, size,
 						flags, true);
 }
 
-void kasan_poison_kfree(void *ptr, unsigned long ip)
+void __kasan_poison_kfree(void *ptr, unsigned long ip)
 {
 	struct page *page;
 
@@ -435,11 +435,11 @@ void kasan_poison_kfree(void *ptr, unsig
 		}
 		poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
 	} else {
-		__kasan_slab_free(page->slab_cache, ptr, ip, false);
+		____kasan_slab_free(page->slab_cache, ptr, ip, false);
 	}
 }
 
-void kasan_kfree_large(void *ptr, unsigned long ip)
+void __kasan_kfree_large(void *ptr, unsigned long ip)
 {
 	if (ptr != page_address(virt_to_head_page(ptr)))
 		kasan_report_invalid_free(ptr, ip);
_

  parent reply	other threads:[~2020-12-18 22:05 UTC|newest]

Thread overview: 100+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-18 22:00 incoming Andrew Morton
2020-12-18 22:01 ` [patch 01/78] mm/memcg: bail early from swap accounting if memcg disabled Andrew Morton
2020-12-18 22:01 ` [patch 02/78] mm/memcg: warning on !memcg after readahead page charged Andrew Morton
2020-12-18 22:01 ` [patch 03/78] mm/memcg: remove unused definitions Andrew Morton
2020-12-18 22:01 ` [patch 04/78] mm, kvm: account kvm_vcpu_mmap to kmemcg Andrew Morton
2020-12-18 22:01 ` [patch 05/78] mm/memcontrol:rewrite mem_cgroup_page_lruvec() Andrew Morton
2020-12-18 22:01 ` [patch 06/78] epoll: check for events when removing a timed out thread from the wait queue Andrew Morton
2020-12-18 22:01 ` [patch 07/78] epoll: simplify signal handling Andrew Morton
2020-12-18 22:01 ` [patch 08/78] epoll: pull fatal signal checks into ep_send_events() Andrew Morton
2020-12-18 22:01 ` [patch 09/78] epoll: move eavail next to the list_empty_careful check Andrew Morton
2020-12-18 22:01 ` [patch 10/78] epoll: simplify and optimize busy loop logic Andrew Morton
2020-12-18 22:02 ` [patch 11/78] epoll: pull all code between fetch_events and send_event into the loop Andrew Morton
2020-12-18 22:02 ` [patch 12/78] epoll: replace gotos with a proper loop Andrew Morton
2020-12-18 22:02 ` [patch 13/78] epoll: eliminate unnecessary lock for zero timeout Andrew Morton
2020-12-18 22:02 ` [patch 14/78] kasan: drop unnecessary GPL text from comment headers Andrew Morton
2020-12-18 22:02 ` [patch 15/78] kasan: KASAN_VMALLOC depends on KASAN_GENERIC Andrew Morton
2020-12-18 22:02 ` [patch 16/78] kasan: group vmalloc code Andrew Morton
2020-12-18 22:02 ` [patch 17/78] kasan: shadow declarations only for software modes Andrew Morton
2020-12-18 22:02 ` [patch 18/78] kasan: rename (un)poison_shadow to (un)poison_range Andrew Morton
2020-12-18 22:02 ` [patch 19/78] kasan: rename KASAN_SHADOW_* to KASAN_GRANULE_* Andrew Morton
2020-12-18 22:02 ` [patch 20/78] kasan: only build init.c for software modes Andrew Morton
2020-12-18 22:02 ` [patch 21/78] kasan: split out shadow.c from common.c Andrew Morton
2020-12-19  0:28   ` Marco Elver
2020-12-19  1:13     ` Andrew Morton
2020-12-19 10:01       ` Marco Elver
2020-12-19 10:01         ` Marco Elver
2020-12-19 10:11       ` Marco Elver
2020-12-19 10:11         ` Marco Elver
2020-12-19 18:01       ` Andrey Konovalov
2020-12-19 18:01         ` Andrey Konovalov
2020-12-19 19:17       ` Linus Torvalds
2020-12-19 19:17         ` Linus Torvalds
2020-12-19 19:26         ` Linus Torvalds
2020-12-19 19:26           ` Linus Torvalds
2020-12-21  9:46         ` Alexander Potapenko
2020-12-21  9:46           ` Alexander Potapenko
2020-12-21 17:41           ` Linus Torvalds
2020-12-21 17:41             ` Linus Torvalds
2020-12-22 18:38             ` Andrew Morton
2020-12-22 12:00   ` kernel test robot
2020-12-22 12:00     ` kernel test robot
2020-12-18 22:02 ` [patch 22/78] kasan: define KASAN_MEMORY_PER_SHADOW_PAGE Andrew Morton
2020-12-18 22:02 ` [patch 23/78] kasan: rename report and tags files Andrew Morton
2020-12-18 22:02 ` [patch 24/78] kasan: don't duplicate config dependencies Andrew Morton
2020-12-18 22:02 ` [patch 25/78] kasan: hide invalid free check implementation Andrew Morton
2020-12-18 22:02 ` [patch 26/78] kasan: decode stack frame only with KASAN_STACK_ENABLE Andrew Morton
2020-12-18 22:02 ` [patch 27/78] kasan, arm64: only init shadow for software modes Andrew Morton
2020-12-18 22:02 ` [patch 28/78] kasan, arm64: only use kasan_depth " Andrew Morton
2020-12-18 22:03 ` [patch 29/78] kasan, arm64: move initialization message Andrew Morton
2020-12-18 22:03 ` [patch 30/78] kasan, arm64: rename kasan_init_tags and mark as __init Andrew Morton
2020-12-18 22:03 ` [patch 31/78] kasan: rename addr_has_shadow to addr_has_metadata Andrew Morton
2020-12-18 22:03 ` [patch 32/78] kasan: rename print_shadow_for_address to print_memory_metadata Andrew Morton
2020-12-18 22:03 ` [patch 33/78] kasan: rename SHADOW layout macros to META Andrew Morton
2020-12-18 22:03 ` [patch 34/78] kasan: separate metadata_fetch_row for each mode Andrew Morton
2020-12-18 22:03 ` [patch 35/78] kasan: introduce CONFIG_KASAN_HW_TAGS Andrew Morton
2020-12-18 22:03 ` [patch 36/78] arm64: enable armv8.5-a asm-arch option Andrew Morton
2020-12-18 22:03 ` [patch 37/78] arm64: mte: add in-kernel MTE helpers Andrew Morton
2020-12-18 22:03 ` [patch 38/78] arm64: mte: reset the page tag in page->flags Andrew Morton
2020-12-18 22:03 ` [patch 39/78] arm64: mte: add in-kernel tag fault handler Andrew Morton
2020-12-18 22:03 ` [patch 40/78] arm64: kasan: allow enabling in-kernel MTE Andrew Morton
2020-12-18 22:03 ` [patch 41/78] arm64: mte: convert gcr_user into an exclude mask Andrew Morton
2020-12-18 22:03 ` [patch 42/78] arm64: mte: switch GCR_EL1 in kernel entry and exit Andrew Morton
2020-12-18 22:03 ` [patch 43/78] kasan, mm: untag page address in free_reserved_area Andrew Morton
2020-12-18 22:03 ` [patch 44/78] arm64: kasan: align allocations for HW_TAGS Andrew Morton
2020-12-18 22:03 ` [patch 45/78] arm64: kasan: add arch layer for memory tagging helpers Andrew Morton
2020-12-18 22:03 ` [patch 46/78] kasan: define KASAN_GRANULE_SIZE for HW_TAGS Andrew Morton
2020-12-18 22:03 ` [patch 47/78] kasan, x86, s390: update undef CONFIG_KASAN Andrew Morton
2020-12-18 22:04 ` [patch 48/78] kasan, arm64: expand CONFIG_KASAN checks Andrew Morton
2020-12-18 22:04 ` [patch 49/78] kasan, arm64: implement HW_TAGS runtime Andrew Morton
2020-12-18 22:04 ` [patch 50/78] kasan, arm64: print report from tag fault handler Andrew Morton
2020-12-18 22:04 ` [patch 51/78] kasan, mm: reset tags when accessing metadata Andrew Morton
2020-12-18 22:04 ` [patch 52/78] kasan, arm64: enable CONFIG_KASAN_HW_TAGS Andrew Morton
2020-12-18 22:04 ` [patch 53/78] kasan: add documentation for hardware tag-based mode Andrew Morton
2020-12-18 22:04 ` [patch 54/78] kselftest/arm64: check GCR_EL1 after context switch Andrew Morton
2020-12-18 22:04 ` [patch 55/78] kasan: simplify quarantine_put call site Andrew Morton
2020-12-18 22:04 ` [patch 56/78] kasan: rename get_alloc/free_info Andrew Morton
2020-12-18 22:04 ` [patch 57/78] kasan: introduce set_alloc_info Andrew Morton
2020-12-18 22:04 ` [patch 58/78] kasan, arm64: unpoison stack only with CONFIG_KASAN_STACK Andrew Morton
2020-12-18 22:04 ` [patch 59/78] kasan: allow VMAP_STACK for HW_TAGS mode Andrew Morton
2020-12-18 22:04 ` [patch 60/78] kasan: remove __kasan_unpoison_stack Andrew Morton
2020-12-18 22:04 ` [patch 61/78] kasan: inline kasan_reset_tag for tag-based modes Andrew Morton
2020-12-18 22:04 ` [patch 62/78] kasan: inline random_tag for HW_TAGS Andrew Morton
2020-12-18 22:04 ` [patch 63/78] kasan: open-code kasan_unpoison_slab Andrew Morton
2020-12-18 22:04 ` [patch 64/78] kasan: inline (un)poison_range and check_invalid_free Andrew Morton
2020-12-22 14:02   ` kernel test robot
2020-12-22 14:02     ` kernel test robot
2020-12-18 22:05 ` [patch 65/78] kasan: add and integrate kasan boot parameters Andrew Morton
2020-12-18 22:05 ` Andrew Morton [this message]
2020-12-18 22:05 ` [patch 67/78] kasan, mm: rename kasan_poison_kfree Andrew Morton
2020-12-18 22:05 ` [patch 68/78] kasan: don't round_up too much Andrew Morton
2020-12-18 22:05 ` [patch 69/78] kasan: simplify assign_tag and set_tag calls Andrew Morton
2020-12-18 22:05 ` [patch 70/78] kasan: clarify comment in __kasan_kfree_large Andrew Morton
2020-12-18 22:05 ` [patch 71/78] kasan: sanitize objects when metadata doesn't fit Andrew Morton
2020-12-18 22:05 ` [patch 72/78] kasan, mm: allow cache merging with no metadata Andrew Morton
2020-12-18 22:05 ` [patch 73/78] kasan: update documentation Andrew Morton
2020-12-18 22:05 ` [patch 74/78] mm/Kconfig: fix spelling mistake "whats" -> "what's" Andrew Morton
2020-12-18 22:05 ` [patch 75/78] epoll: convert internal api to timespec64 Andrew Morton
2020-12-18 22:05 ` [patch 76/78] epoll: add syscall epoll_pwait2 Andrew Morton
2020-12-18 22:05 ` [patch 77/78] epoll: wire up " Andrew Morton
2020-12-18 22:05 ` [patch 78/78] selftests/filesystems: expand epoll with epoll_pwait2 Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201218220503.bg9ovjDIo%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=Branislav.Rankov@arm.com \
    --cc=Vincenzo.Frascino@arm.com \
    --cc=andreyknvl@google.com \
    --cc=aryabinin@virtuozzo.com \
    --cc=catalin.marinas@arm.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=eugenis@google.com \
    --cc=glider@google.com \
    --cc=gor@linux.ibm.com \
    --cc=kevin.brodsky@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mm-commits@vger.kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.