linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
@ 2016-06-17 14:27 Alexander Potapenko
  2016-06-18 15:32 ` Sasha Levin
  0 siblings, 1 reply; 7+ messages in thread
From: Alexander Potapenko @ 2016-06-17 14:27 UTC (permalink / raw)
  To: adech.fo, cl, dvyukov, akpm, rostedt, iamjoonsoo.kim, js1304,
	kcc, aryabinin, kuthonuzo.luruo
  Cc: kasan-dev, linux-mm, linux-kernel

For KASAN builds:
 - switch SLUB allocator to using stackdepot instead of storing the
   allocation/deallocation stacks in the objects;
 - define SLAB_RED_ZONE, SLAB_POISON, SLAB_STORE_USER to zero,
   effectively disabling these debug features, as they're redundant in
   the presence of KASAN;
 - change the freelist hook so that parts of the freelist can be put into
   the quarantine.

Signed-off-by: Alexander Potapenko <glider@google.com>
---
v4: - addressed comments by Andrey Ryabinin:
      - don't set slub_debug by default for everyone;
      - introduce the ___cache_free() helper function.
v3: - addressed comments by Andrey Ryabinin:
      - replaced KMALLOC_MAX_CACHE_SIZE with KMALLOC_MAX_SIZE in
        kasan_cache_create();
      - for caches with SLAB_KASAN flag set, their alloc_meta_offset and
        free_meta_offset are always valid.
v2: - incorporated kbuild fixes by Andrew Morton
---
 include/linux/slab.h     |  9 +++++++
 include/linux/slub_def.h |  4 ++++
 lib/Kconfig.kasan        |  4 ++--
 mm/kasan/Makefile        |  3 +--
 mm/kasan/kasan.c         | 61 +++++++++++++++++++++++++----------------------
 mm/kasan/kasan.h         |  2 +-
 mm/kasan/report.c        |  8 +++----
 mm/slab.h                |  7 ++++++
 mm/slub.c                | 62 +++++++++++++++++++++++++++++++++++++-----------
 9 files changed, 108 insertions(+), 52 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index aeb3e6d..fe91eef 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,11 +21,20 @@
  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
  */
 #define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
+#ifndef CONFIG_KASAN
 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
+#else
+#define SLAB_RED_ZONE		0x00000000UL	/* KASAN has its own redzones */
+#define SLAB_POISON		0x00000000UL	/* No extra poisoning */
+#endif
 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
+#ifndef CONFIG_KASAN
 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
+#else
+#define SLAB_STORE_USER		0x00000000UL	/* KASAN uses stack depot */
+#endif
 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
 /*
  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d1faa01..5585598 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -99,6 +99,10 @@ struct kmem_cache {
 	 */
 	int remote_node_defrag_ratio;
 #endif
+#ifdef CONFIG_KASAN
+	struct kasan_cache kasan_info;
+#endif
+
 	struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 67d8c68..bd38aab 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,9 +5,9 @@ if HAVE_ARCH_KASAN
 
 config KASAN
 	bool "KASan: runtime memory debugger"
-	depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB)
+	depends on SLUB || (SLAB && !DEBUG_SLAB)
 	select CONSTRUCTORS
-	select STACKDEPOT if SLAB
+	select STACKDEPOT
 	help
 	  Enables kernel address sanitizer - runtime memory debugger,
 	  designed to find out-of-bounds accesses and use-after-free bugs.
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 1548749..2976a9e 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -7,5 +7,4 @@ CFLAGS_REMOVE_kasan.o = -pg
 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
 CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 
-obj-y := kasan.o report.o kasan_init.o
-obj-$(CONFIG_SLAB) += quarantine.o
+obj-y := kasan.o report.o kasan_init.o quarantine.o
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 18b6a2b..1d385e3 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -351,7 +351,6 @@ void kasan_free_pages(struct page *page, unsigned int order)
 				KASAN_FREE_PAGE);
 }
 
-#ifdef CONFIG_SLAB
 /*
  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
  * For larger allocations larger redzones are used.
@@ -373,16 +372,12 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
 			unsigned long *flags)
 {
 	int redzone_adjust;
-	/* Make sure the adjusted size is still less than
-	 * KMALLOC_MAX_CACHE_SIZE.
-	 * TODO: this check is only useful for SLAB, but not SLUB. We'll need
-	 * to skip it for SLUB when it starts using kasan_cache_create().
-	 */
-	if (*size > KMALLOC_MAX_CACHE_SIZE -
-	    sizeof(struct kasan_alloc_meta) -
-	    sizeof(struct kasan_free_meta))
-		return;
+
+#ifdef CONFIG_SLAB
+	int orig_size = *size;
+#endif
 	*flags |= SLAB_KASAN;
+
 	/* Add alloc meta. */
 	cache->kasan_info.alloc_meta_offset = *size;
 	*size += sizeof(struct kasan_alloc_meta);
@@ -392,17 +387,35 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
 	    cache->object_size < sizeof(struct kasan_free_meta)) {
 		cache->kasan_info.free_meta_offset = *size;
 		*size += sizeof(struct kasan_free_meta);
+	} else {
+		cache->kasan_info.free_meta_offset = 0;
 	}
 	redzone_adjust = optimal_redzone(cache->object_size) -
 		(*size - cache->object_size);
+
 	if (redzone_adjust > 0)
 		*size += redzone_adjust;
-	*size = min(KMALLOC_MAX_CACHE_SIZE,
+
+#ifdef CONFIG_SLAB
+	*size = min(KMALLOC_MAX_SIZE,
 		    max(*size,
 			cache->object_size +
 			optimal_redzone(cache->object_size)));
-}
+	/*
+	 * If the metadata doesn't fit, disable KASAN at all.
+	 */
+	if (*size <= cache->kasan_info.alloc_meta_offset ||
+			*size <= cache->kasan_info.free_meta_offset) {
+		*flags &= ~SLAB_KASAN;
+		*size = orig_size;
+	}
+#else
+	*size = max(*size,
+			cache->object_size +
+			optimal_redzone(cache->object_size));
+
 #endif
+}
 
 void kasan_cache_shrink(struct kmem_cache *cache)
 {
@@ -431,16 +444,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
 	kasan_poison_shadow(object,
 			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
 			KASAN_KMALLOC_REDZONE);
-#ifdef CONFIG_SLAB
 	if (cache->flags & SLAB_KASAN) {
 		struct kasan_alloc_meta *alloc_info =
 			get_alloc_info(cache, object);
 		alloc_info->state = KASAN_STATE_INIT;
 	}
-#endif
 }
 
-#ifdef CONFIG_SLAB
 static inline int in_irqentry_text(unsigned long ptr)
 {
 	return (ptr >= (unsigned long)&__irqentry_text_start &&
@@ -501,7 +511,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
 	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
 	return (void *)object + cache->kasan_info.free_meta_offset;
 }
-#endif
 
 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
 {
@@ -522,16 +531,16 @@ void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
 
 bool kasan_slab_free(struct kmem_cache *cache, void *object)
 {
-#ifdef CONFIG_SLAB
 	/* RCU slabs could be legally used after free within the RCU period */
 	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
 		return false;
 
 	if (likely(cache->flags & SLAB_KASAN)) {
-		struct kasan_alloc_meta *alloc_info =
-			get_alloc_info(cache, object);
-		struct kasan_free_meta *free_info =
-			get_free_info(cache, object);
+		struct kasan_alloc_meta *alloc_info;
+		struct kasan_free_meta *free_info;
+
+		alloc_info = get_alloc_info(cache, object);
+		free_info = get_free_info(cache, object);
 
 		switch (alloc_info->state) {
 		case KASAN_STATE_ALLOC:
@@ -550,10 +559,6 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
 		}
 	}
 	return false;
-#else
-	kasan_poison_slab_free(cache, object);
-	return false;
-#endif
 }
 
 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
@@ -568,6 +573,9 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
 	if (unlikely(object == NULL))
 		return;
 
+	if (!(cache->flags & SLAB_KASAN))
+		return;
+
 	redzone_start = round_up((unsigned long)(object + size),
 				KASAN_SHADOW_SCALE_SIZE);
 	redzone_end = round_up((unsigned long)object + cache->object_size,
@@ -576,16 +584,13 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
 	kasan_unpoison_shadow(object, size);
 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 		KASAN_KMALLOC_REDZONE);
-#ifdef CONFIG_SLAB
 	if (cache->flags & SLAB_KASAN) {
 		struct kasan_alloc_meta *alloc_info =
 			get_alloc_info(cache, object);
-
 		alloc_info->state = KASAN_STATE_ALLOC;
 		alloc_info->alloc_size = size;
 		set_track(&alloc_info->track, flags);
 	}
-#endif
 }
 EXPORT_SYMBOL(kasan_kmalloc);
 
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index fb87923..8c75953 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -110,7 +110,7 @@ static inline bool kasan_report_enabled(void)
 void kasan_report(unsigned long addr, size_t size,
 		bool is_write, unsigned long ip);
 
-#ifdef CONFIG_SLAB
+#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
 void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
 void quarantine_reduce(void);
 void quarantine_remove_cache(struct kmem_cache *cache);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index b3c122d..861b977 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -116,7 +116,6 @@ static inline bool init_task_stack_addr(const void *addr)
 			sizeof(init_thread_union.stack));
 }
 
-#ifdef CONFIG_SLAB
 static void print_track(struct kasan_track *track)
 {
 	pr_err("PID = %u\n", track->pid);
@@ -130,8 +129,8 @@ static void print_track(struct kasan_track *track)
 	}
 }
 
-static void object_err(struct kmem_cache *cache, struct page *page,
-			void *object, char *unused_reason)
+static void kasan_object_err(struct kmem_cache *cache, struct page *page,
+				void *object, char *unused_reason)
 {
 	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
 	struct kasan_free_meta *free_info;
@@ -162,7 +161,6 @@ static void object_err(struct kmem_cache *cache, struct page *page,
 		break;
 	}
 }
-#endif
 
 static void print_address_description(struct kasan_access_info *info)
 {
@@ -177,7 +175,7 @@ static void print_address_description(struct kasan_access_info *info)
 			struct kmem_cache *cache = page->slab_cache;
 			object = nearest_obj(cache, page,
 						(void *)info->access_addr);
-			object_err(cache, page, object,
+			kasan_object_err(cache, page, object,
 					"kasan: bad access detected");
 			return;
 		}
diff --git a/mm/slab.h b/mm/slab.h
index dedb1a9..3381a74 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -366,6 +366,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
 		return s->object_size;
 # endif
+	if (s->flags & SLAB_KASAN)
+		return s->object_size;
 	/*
 	 * If we have the need to store the freelist pointer
 	 * back there or track user information then we can
@@ -463,5 +465,10 @@ void slab_stop(struct seq_file *m, void *p);
 int memcg_slab_show(struct seq_file *m, void *p);
 
 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
+#if defined(CONFIG_SLUB)
+void do_slab_free(struct kmem_cache *s,
+		struct page *page, void *head, void *tail,
+		int cnt, unsigned long addr);
+#endif
 
 #endif /* MM_SLAB_H */
diff --git a/mm/slub.c b/mm/slub.c
index 825ff45..7b3d1fc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -191,7 +191,11 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
 
 /* Internal SLUB flags */
+#ifndef CONFIG_KASAN
 #define __OBJECT_POISON		0x80000000UL /* Poison object */
+#else
+#define __OBJECT_POISON		0x00000000UL /* Disable object poisoning */
+#endif
 #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
 
 #ifdef CONFIG_SMP
@@ -454,8 +458,6 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
  */
 #if defined(CONFIG_SLUB_DEBUG_ON)
 static int slub_debug = DEBUG_DEFAULT_FLAGS;
-#elif defined(CONFIG_KASAN)
-static int slub_debug = SLAB_STORE_USER;
 #else
 static int slub_debug;
 #endif
@@ -1322,7 +1324,7 @@ static inline void kfree_hook(const void *x)
 	kasan_kfree_large(x);
 }
 
-static inline void slab_free_hook(struct kmem_cache *s, void *x)
+static inline bool slab_free_hook(struct kmem_cache *s, void *x)
 {
 	kmemleak_free_recursive(x, s->flags);
 
@@ -1344,11 +1346,11 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
 		debug_check_no_obj_freed(x, s->object_size);
 
-	kasan_slab_free(s, x);
+	return kasan_slab_free(s, x);
 }
 
 static inline void slab_free_freelist_hook(struct kmem_cache *s,
-					   void *head, void *tail)
+					   void **head, void **tail, int *cnt)
 {
 /*
  * Compiler cannot detect this function can be removed if slab_free_hook()
@@ -1360,13 +1362,27 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
 	defined(CONFIG_DEBUG_OBJECTS_FREE) ||	\
 	defined(CONFIG_KASAN)
 
-	void *object = head;
-	void *tail_obj = tail ? : head;
+	void *object = *head, *prev = NULL, *next = NULL;
+	void *tail_obj = *tail ? : *head;
+	bool skip = false;
 
 	do {
-		slab_free_hook(s, object);
-	} while ((object != tail_obj) &&
-		 (object = get_freepointer(s, object)));
+		skip = slab_free_hook(s, object);
+		next = (object != tail_obj) ?
+			get_freepointer(s, object) : NULL;
+		if (skip) {
+			if (!prev)
+				*head = next;
+			else
+				set_freepointer(s, prev, next);
+			if (object == tail_obj)
+				*tail = prev;
+			(*cnt)--;
+		} else {
+			prev = object;
+		}
+		object = next;
+	} while (next);
 #endif
 }
 
@@ -2772,12 +2788,22 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
 				      void *head, void *tail, int cnt,
 				      unsigned long addr)
 {
+	void *free_head = head, *free_tail = tail;
+
+	slab_free_freelist_hook(s, &free_head, &free_tail, &cnt);
+	/* slab_free_freelist_hook() could have emptied the freelist. */
+	if (cnt == 0)
+		return;
+	do_slab_free(s, page, free_head, free_tail, cnt, addr);
+}
+
+__always_inline void do_slab_free(struct kmem_cache *s,
+				struct page *page, void *head, void *tail,
+				int cnt, unsigned long addr)
+{
 	void *tail_obj = tail ? : head;
 	struct kmem_cache_cpu *c;
 	unsigned long tid;
-
-	slab_free_freelist_hook(s, head, tail);
-
 redo:
 	/*
 	 * Determine the currently cpus per cpu slab.
@@ -2811,6 +2837,12 @@ redo:
 
 }
 
+/* Helper function to be used from qlink_free() in mm/kasan/quarantine.c */
+void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
+{
+	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
+}
+
 void kmem_cache_free(struct kmem_cache *s, void *x)
 {
 	s = cache_from_obj(s, x);
@@ -3252,7 +3284,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
 static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
 	unsigned long flags = s->flags;
-	unsigned long size = s->object_size;
+	size_t size = s->object_size;
 	int order;
 
 	/*
@@ -3328,6 +3360,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 	}
 #endif
 
+	kasan_cache_create(s, &size, &s->flags);
+
 	/*
 	 * SLUB stores one object immediately after another beginning from
 	 * offset 0. In order to align the objects we have to simply size
-- 
2.8.0.rc3.226.g39d4020

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
  2016-06-17 14:27 [PATCH v4] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB Alexander Potapenko
@ 2016-06-18 15:32 ` Sasha Levin
  2016-06-19  7:24   ` Alexander Potapenko
  0 siblings, 1 reply; 7+ messages in thread
From: Sasha Levin @ 2016-06-18 15:32 UTC (permalink / raw)
  To: Alexander Potapenko, adech.fo, cl, dvyukov, akpm, rostedt,
	iamjoonsoo.kim, js1304, kcc, aryabinin, kuthonuzo.luruo
  Cc: kasan-dev, linux-mm, linux-kernel

On 06/17/2016 10:27 AM, Alexander Potapenko wrote:
> For KASAN builds:
>  - switch SLUB allocator to using stackdepot instead of storing the
>    allocation/deallocation stacks in the objects;
>  - define SLAB_RED_ZONE, SLAB_POISON, SLAB_STORE_USER to zero,
>    effectively disabling these debug features, as they're redundant in
>    the presence of KASAN;
>  - change the freelist hook so that parts of the freelist can be put into
>    the quarantine.
> 
> Signed-off-by: Alexander Potapenko <glider@google.com>

Hi Alexander,

I was seeing a bunch of use-after-frees detected by kasan, such as:

BUG: KASAN: use-after-free in rb_next+0x117/0x1b0 at addr ffff8800b01d4f30
Read of size 8 by task syz-executor/31594
CPU: 2 PID: 31594 Comm: syz-executor Tainted: G        W       4.7.0-rc2-sasha-00205-g2d8a14b #3117
 1ffff10015450f0f 000000007b9351fc ffff8800aa287900 ffffffffa002778b
 ffffffff00000002 fffffbfff5630d30 0000000041b58ab3 ffffffffaaad5648
 ffffffffa002761c ffffffff9e006ab6 ffffffffa8439f65 ffffffffffffffff
Call Trace:
 [<ffffffffa002778b>] dump_stack+0x16f/0x1d4
 [<ffffffff9e79e8cf>] kasan_report_error+0x59f/0x8c0
 [<ffffffff9e79ee06>] __asan_report_load8_noabort+0x66/0x90
 [<ffffffffa003ccf7>] rb_next+0x117/0x1b0
 [<ffffffff9e71627c>] validate_mm_rb+0xac/0xd0
 [<ffffffff9e718594>] __vma_link_rb+0x2e4/0x310
 [<ffffffff9e718650>] vma_link+0x90/0x1b0
 [<ffffffff9e722870>] mmap_region+0x13a0/0x13c0
 [<ffffffff9e7232b2>] do_mmap+0xa22/0xaf0
 [<ffffffff9e6c86bf>] vm_mmap_pgoff+0x14f/0x1c0
 [<ffffffff9e71ba8b>] SyS_mmap_pgoff+0x81b/0x910
 [<ffffffff9e1bf966>] SyS_mmap+0x16/0x20
 [<ffffffff9e006ab6>] do_syscall_64+0x2a6/0x490
 [<ffffffffa8439f65>] entry_SYSCALL64_slow_path+0x25/0x25
Object at ffff8800b01d4f00, in cache vm_area_struct
Object allocated with size 192 bytes.
Allocation:
PID = 8855
(stack is not available)
Memory state around the buggy address:
 ffff8800b01d4e00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
 ffff8800b01d4e80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
>ffff8800b01d4f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
                                     ^
 ffff8800b01d4f80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
 ffff8800b01d5000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00

Or:

BUG: KASAN: use-after-free in validate_mm_rb+0x73/0xd0 at addr ffff8800b01d4f38
Read of size 8 by task syz-executor/31594
CPU: 2 PID: 31594 Comm: syz-executor Tainted: G    B   W       4.7.0-rc2-sasha-00205-g2d8a14b #3117
 1ffff10015450f16 000000007b9351fc ffff8800aa287938 ffffffffa002778b
 ffffffff00000002 fffffbfff5630d30 0000000041b58ab3 ffffffffaaad5648
 ffffffffa002761c ffffffffa84399e8 0000000000000010 ffff8800b61e8000
Call Trace:
 [<ffffffffa002778b>] dump_stack+0x16f/0x1d4
 [<ffffffff9e79e8cf>] kasan_report_error+0x59f/0x8c0
 [<ffffffff9e79ee06>] __asan_report_load8_noabort+0x66/0x90
 [<ffffffff9e716243>] validate_mm_rb+0x73/0xd0
 [<ffffffff9e718594>] __vma_link_rb+0x2e4/0x310
 [<ffffffff9e718650>] vma_link+0x90/0x1b0
 [<ffffffff9e722870>] mmap_region+0x13a0/0x13c0
 [<ffffffff9e7232b2>] do_mmap+0xa22/0xaf0
 [<ffffffff9e6c86bf>] vm_mmap_pgoff+0x14f/0x1c0
 [<ffffffff9e71ba8b>] SyS_mmap_pgoff+0x81b/0x910
 [<ffffffff9e1bf966>] SyS_mmap+0x16/0x20
 [<ffffffff9e006ab6>] do_syscall_64+0x2a6/0x490
 [<ffffffffa8439f65>] entry_SYSCALL64_slow_path+0x25/0x25
Object at ffff8800b01d4f00, in cache vm_area_struct
Object allocated with size 192 bytes.
Allocation:
PID = 8855
(stack is not available)
Memory state around the buggy address:
 ffff8800b01d4e00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
 ffff8800b01d4e80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
>ffff8800b01d4f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
                                        ^
 ffff8800b01d4f80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
 ffff8800b01d5000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00

And bisection pointed me to this commit. Now, I'm not sure how to
tell if this is memory quarantine catching something, or is just a
bug with the patch?


Thanks,
Sasha

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
  2016-06-18 15:32 ` Sasha Levin
@ 2016-06-19  7:24   ` Alexander Potapenko
  2016-06-19 17:40     ` Sasha Levin
  0 siblings, 1 reply; 7+ messages in thread
From: Alexander Potapenko @ 2016-06-19  7:24 UTC (permalink / raw)
  To: Sasha Levin
  Cc: Andrey Konovalov, Christoph Lameter, Dmitriy Vyukov,
	Andrew Morton, Steven Rostedt, Joonsoo Kim, Joonsoo Kim,
	Kostya Serebryany, Andrey Ryabinin, Kuthonuzo Luruo, kasan-dev,
	Linux Memory Management List, LKML

On Sat, Jun 18, 2016 at 5:32 PM, Sasha Levin <sasha.levin@oracle.com> wrote:
> On 06/17/2016 10:27 AM, Alexander Potapenko wrote:
>> For KASAN builds:
>>  - switch SLUB allocator to using stackdepot instead of storing the
>>    allocation/deallocation stacks in the objects;
>>  - define SLAB_RED_ZONE, SLAB_POISON, SLAB_STORE_USER to zero,
>>    effectively disabling these debug features, as they're redundant in
>>    the presence of KASAN;
>>  - change the freelist hook so that parts of the freelist can be put into
>>    the quarantine.
>>
>> Signed-off-by: Alexander Potapenko <glider@google.com>
>
> Hi Alexander,
>
> I was seeing a bunch of use-after-frees detected by kasan, such as:
>
> BUG: KASAN: use-after-free in rb_next+0x117/0x1b0 at addr ffff8800b01d4f30
> Read of size 8 by task syz-executor/31594
> CPU: 2 PID: 31594 Comm: syz-executor Tainted: G        W       4.7.0-rc2-sasha-00205-g2d8a14b #3117
>  1ffff10015450f0f 000000007b9351fc ffff8800aa287900 ffffffffa002778b
>  ffffffff00000002 fffffbfff5630d30 0000000041b58ab3 ffffffffaaad5648
>  ffffffffa002761c ffffffff9e006ab6 ffffffffa8439f65 ffffffffffffffff
> Call Trace:
>  [<ffffffffa002778b>] dump_stack+0x16f/0x1d4
>  [<ffffffff9e79e8cf>] kasan_report_error+0x59f/0x8c0
>  [<ffffffff9e79ee06>] __asan_report_load8_noabort+0x66/0x90
>  [<ffffffffa003ccf7>] rb_next+0x117/0x1b0
>  [<ffffffff9e71627c>] validate_mm_rb+0xac/0xd0
>  [<ffffffff9e718594>] __vma_link_rb+0x2e4/0x310
>  [<ffffffff9e718650>] vma_link+0x90/0x1b0
>  [<ffffffff9e722870>] mmap_region+0x13a0/0x13c0
>  [<ffffffff9e7232b2>] do_mmap+0xa22/0xaf0
>  [<ffffffff9e6c86bf>] vm_mmap_pgoff+0x14f/0x1c0
>  [<ffffffff9e71ba8b>] SyS_mmap_pgoff+0x81b/0x910
>  [<ffffffff9e1bf966>] SyS_mmap+0x16/0x20
>  [<ffffffff9e006ab6>] do_syscall_64+0x2a6/0x490
>  [<ffffffffa8439f65>] entry_SYSCALL64_slow_path+0x25/0x25
> Object at ffff8800b01d4f00, in cache vm_area_struct
> Object allocated with size 192 bytes.
> Allocation:
> PID = 8855
> (stack is not available)
> Memory state around the buggy address:
>  ffff8800b01d4e00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>  ffff8800b01d4e80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
>>ffff8800b01d4f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>                                      ^
>  ffff8800b01d4f80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
>  ffff8800b01d5000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
>
> Or:
>
> BUG: KASAN: use-after-free in validate_mm_rb+0x73/0xd0 at addr ffff8800b01d4f38
> Read of size 8 by task syz-executor/31594
> CPU: 2 PID: 31594 Comm: syz-executor Tainted: G    B   W       4.7.0-rc2-sasha-00205-g2d8a14b #3117
>  1ffff10015450f16 000000007b9351fc ffff8800aa287938 ffffffffa002778b
>  ffffffff00000002 fffffbfff5630d30 0000000041b58ab3 ffffffffaaad5648
>  ffffffffa002761c ffffffffa84399e8 0000000000000010 ffff8800b61e8000
> Call Trace:
>  [<ffffffffa002778b>] dump_stack+0x16f/0x1d4
>  [<ffffffff9e79e8cf>] kasan_report_error+0x59f/0x8c0
>  [<ffffffff9e79ee06>] __asan_report_load8_noabort+0x66/0x90
>  [<ffffffff9e716243>] validate_mm_rb+0x73/0xd0
>  [<ffffffff9e718594>] __vma_link_rb+0x2e4/0x310
>  [<ffffffff9e718650>] vma_link+0x90/0x1b0
>  [<ffffffff9e722870>] mmap_region+0x13a0/0x13c0
>  [<ffffffff9e7232b2>] do_mmap+0xa22/0xaf0
>  [<ffffffff9e6c86bf>] vm_mmap_pgoff+0x14f/0x1c0
>  [<ffffffff9e71ba8b>] SyS_mmap_pgoff+0x81b/0x910
>  [<ffffffff9e1bf966>] SyS_mmap+0x16/0x20
>  [<ffffffff9e006ab6>] do_syscall_64+0x2a6/0x490
>  [<ffffffffa8439f65>] entry_SYSCALL64_slow_path+0x25/0x25
> Object at ffff8800b01d4f00, in cache vm_area_struct
> Object allocated with size 192 bytes.
> Allocation:
> PID = 8855
> (stack is not available)
> Memory state around the buggy address:
>  ffff8800b01d4e00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>  ffff8800b01d4e80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
>>ffff8800b01d4f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>                                         ^
>  ffff8800b01d4f80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
>  ffff8800b01d5000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
>
> And bisection pointed me to this commit. Now, I'm not sure how to
> tell if this is memory quarantine catching something, or is just a
> bug with the patch?
>
>
> Thanks,
> Sasha

Hi Sasha,

This commit delays the reuse of memory after it has been freed, so
it's intended to help people find more use-after-free errors.
But I'm puzzled why the stacks are missing.
Can you please share the reproduction steps for this bug?
I also wonder whether it's reproducible when you:
 - revert this commit?
 - build with SLAB instead of SLUB?

HTH,
Alex

-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Matthew Scott Sucherman, Paul Terence Manicle
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
  2016-06-19  7:24   ` Alexander Potapenko
@ 2016-06-19 17:40     ` Sasha Levin
  2016-06-20 12:53       ` Alexander Potapenko
  2016-06-20 13:21       ` Alexander Potapenko
  0 siblings, 2 replies; 7+ messages in thread
From: Sasha Levin @ 2016-06-19 17:40 UTC (permalink / raw)
  To: Alexander Potapenko
  Cc: Andrey Konovalov, Christoph Lameter, Dmitriy Vyukov,
	Andrew Morton, Steven Rostedt, Joonsoo Kim, Joonsoo Kim,
	Kostya Serebryany, Andrey Ryabinin, Kuthonuzo Luruo, kasan-dev,
	Linux Memory Management List, LKML

On 06/19/2016 03:24 AM, Alexander Potapenko wrote:
> Hi Sasha,
> 
> This commit delays the reuse of memory after it has been freed, so
> it's intended to help people find more use-after-free errors.

Is there a way to tell if the use-after-free access was to a memory
that is quarantined?

> But I'm puzzled why the stacks are missing.

I looked at the logs, it looks like stackdepot ran out of room pretty
early during boot. I've increased the max count and that solved the
problem. Here's a trace with all the stacks:

[ 1157.040216] BUG: KASAN: use-after-free in print_bad_pte+0x5c7/0x6e0 at addr ffff8801b82286a0
[ 1157.040222] Read of size 8 by task syz-executor/20583
[ 1157.040236] CPU: 0 PID: 20583 Comm: syz-executor Tainted: G    B           4.7.0-rc2-next-20160609-sasha-00032-g779e0df-dirty #3123
[ 1157.040249]  1ffff10016b26e97 000000001af4d42c ffff8800b5937540 ffffffffa103380b
[ 1157.040262]  ffffffff00000000 fffffbfff5830bf4 0000000041b58ab3 ffffffffabaf1240
[ 1157.040274]  ffffffffa103369c 0000000000000006 0000000000000000 ffff8800b5937550
[ 1157.040276] Call Trace:
[ 1157.040290]  [<ffffffffa103380b>] dump_stack+0x16f/0x1d4
[ 1157.040319]  [<ffffffff9f7a148f>] kasan_report_error+0x59f/0x8c0
[ 1157.040382]  [<ffffffff9f7a19c6>] __asan_report_load8_noabort+0x66/0x90
[ 1157.040409]  [<ffffffff9f6fa5e7>] print_bad_pte+0x5c7/0x6e0
[ 1157.040418]  [<ffffffff9f702e02>] unmap_page_range+0x12f2/0x1e20
[ 1157.040445]  [<ffffffff9f703b69>] unmap_single_vma+0x239/0x250
[ 1157.040452]  [<ffffffff9f7045e9>] unmap_vmas+0x119/0x1d0
[ 1157.040461]  [<ffffffff9f720a73>] exit_mmap+0x2a3/0x410
[ 1157.040485]  [<ffffffff9f3769e2>] mmput+0x192/0x350
[ 1157.040524]  [<ffffffff9f38d745>] do_exit+0xea5/0x19e0
[ 1157.040566]  [<ffffffff9f38e5d3>] do_group_exit+0x2e3/0x2f0
[ 1157.040580]  [<ffffffff9f3b1928>] get_signal+0x1128/0x1370
[ 1157.040593]  [<ffffffff9f1afca6>] do_signal+0x86/0x1da0
[ 1157.040700]  [<ffffffff9f00539c>] exit_to_usermode_loop+0xac/0x200
[ 1157.040712]  [<ffffffff9f006c20>] do_syscall_64+0x410/0x490
[ 1157.040725]  [<ffffffffa94d0ca5>] entry_SYSCALL64_slow_path+0x25/0x25
[ 1157.040733] Object at ffff8801b8228600, in cache vm_area_struct
[ 1157.040737] Object allocated with size 192 bytes.
[ 1157.040738] Allocation:
[ 1157.040741] PID = 20521
[ 1157.040757]  [<ffffffff9f1dfae6>] save_stack_trace+0x26/0x70
[ 1157.040770]  [<ffffffff9f7a01e6>] save_stack+0x46/0xd0
[ 1157.040784]  [<ffffffff9f7a0470>] kasan_kmalloc+0x110/0x130
[ 1157.040797]  [<ffffffff9f7a09a2>] kasan_slab_alloc+0x12/0x20
[ 1157.040811]  [<ffffffff9f79a546>] kmem_cache_alloc+0x1e6/0x230
[ 1157.040826]  [<ffffffff9f7245ad>] mmap_region+0x56d/0x13c0
[ 1157.040840]  [<ffffffff9f725e22>] do_mmap+0xa22/0xaf0
[ 1157.040853]  [<ffffffff9f6cb1af>] vm_mmap_pgoff+0x14f/0x1c0
[ 1157.040889]  [<ffffffff9f71e5fb>] SyS_mmap_pgoff+0x81b/0x910
[ 1157.040901]  [<ffffffff9f1bf966>] SyS_mmap+0x16/0x20
[ 1157.040910]  [<ffffffff9f006ab6>] do_syscall_64+0x2a6/0x490
[ 1157.040919]  [<ffffffffa94d0ca5>] return_from_SYSCALL_64+0x0/0x6a
[ 1157.040920] Memory state around the buggy address:
[ 1157.040927]  ffff8801b8228580: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
[ 1157.040933]  ffff8801b8228600: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 1157.040938] >ffff8801b8228680: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
[ 1157.040940]                                ^
[ 1157.040946]  ffff8801b8228700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 1157.040951]  ffff8801b8228780: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc

> Can you please share the reproduction steps for this bug?

Just running syzkaller inside a kvmtool guest.

> I also wonder whether it's reproducible when you:
>  - revert this commit?

Not reproducible.

>  - build with SLAB instead of SLUB?

Not reproducible.


Thanks,
Sasha

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
  2016-06-19 17:40     ` Sasha Levin
@ 2016-06-20 12:53       ` Alexander Potapenko
       [not found]         ` <5768490E.6050808@oracle.com>
  2016-06-20 13:21       ` Alexander Potapenko
  1 sibling, 1 reply; 7+ messages in thread
From: Alexander Potapenko @ 2016-06-20 12:53 UTC (permalink / raw)
  To: Sasha Levin
  Cc: Andrey Konovalov, Christoph Lameter, Dmitriy Vyukov,
	Andrew Morton, Steven Rostedt, Joonsoo Kim, Joonsoo Kim,
	Kostya Serebryany, Andrey Ryabinin, Kuthonuzo Luruo, kasan-dev,
	Linux Memory Management List, LKML

[-- Attachment #1: Type: text/plain, Size: 4912 bytes --]

On Sun, Jun 19, 2016 at 7:40 PM, Sasha Levin <sasha.levin@oracle.com> wrote:
> On 06/19/2016 03:24 AM, Alexander Potapenko wrote:
>> Hi Sasha,
>>
>> This commit delays the reuse of memory after it has been freed, so
>> it's intended to help people find more use-after-free errors.
>
> Is there a way to tell if the use-after-free access was to a memory
> that is quarantined?
>
>> But I'm puzzled why the stacks are missing.
>
> I looked at the logs, it looks like stackdepot ran out of room pretty
> early during boot.
This is quite strange, as there's room for ~80k stacks in the depot,
and usually the number of unique stacks is lower than 30k.
I wonder if this is specific to your config, can you please share it
(assuming you're using ToT kernel)?
Attached is the patch that you can try out to dump the new stacks
after 30k - it's really interesting where do they come from (note the
patch is not for submission).

> I've increased the max count and that solved the
> problem. Here's a trace with all the stacks:
>
> [ 1157.040216] BUG: KASAN: use-after-free in print_bad_pte+0x5c7/0x6e0 at addr ffff8801b82286a0
> [ 1157.040222] Read of size 8 by task syz-executor/20583
> [ 1157.040236] CPU: 0 PID: 20583 Comm: syz-executor Tainted: G    B           4.7.0-rc2-next-20160609-sasha-00032-g779e0df-dirty #3123
> [ 1157.040249]  1ffff10016b26e97 000000001af4d42c ffff8800b5937540 ffffffffa103380b
> [ 1157.040262]  ffffffff00000000 fffffbfff5830bf4 0000000041b58ab3 ffffffffabaf1240
> [ 1157.040274]  ffffffffa103369c 0000000000000006 0000000000000000 ffff8800b5937550
> [ 1157.040276] Call Trace:
> [ 1157.040290]  [<ffffffffa103380b>] dump_stack+0x16f/0x1d4
> [ 1157.040319]  [<ffffffff9f7a148f>] kasan_report_error+0x59f/0x8c0
> [ 1157.040382]  [<ffffffff9f7a19c6>] __asan_report_load8_noabort+0x66/0x90
> [ 1157.040409]  [<ffffffff9f6fa5e7>] print_bad_pte+0x5c7/0x6e0
> [ 1157.040418]  [<ffffffff9f702e02>] unmap_page_range+0x12f2/0x1e20
> [ 1157.040445]  [<ffffffff9f703b69>] unmap_single_vma+0x239/0x250
> [ 1157.040452]  [<ffffffff9f7045e9>] unmap_vmas+0x119/0x1d0
> [ 1157.040461]  [<ffffffff9f720a73>] exit_mmap+0x2a3/0x410
> [ 1157.040485]  [<ffffffff9f3769e2>] mmput+0x192/0x350
> [ 1157.040524]  [<ffffffff9f38d745>] do_exit+0xea5/0x19e0
> [ 1157.040566]  [<ffffffff9f38e5d3>] do_group_exit+0x2e3/0x2f0
> [ 1157.040580]  [<ffffffff9f3b1928>] get_signal+0x1128/0x1370
> [ 1157.040593]  [<ffffffff9f1afca6>] do_signal+0x86/0x1da0
> [ 1157.040700]  [<ffffffff9f00539c>] exit_to_usermode_loop+0xac/0x200
> [ 1157.040712]  [<ffffffff9f006c20>] do_syscall_64+0x410/0x490
> [ 1157.040725]  [<ffffffffa94d0ca5>] entry_SYSCALL64_slow_path+0x25/0x25
> [ 1157.040733] Object at ffff8801b8228600, in cache vm_area_struct
> [ 1157.040737] Object allocated with size 192 bytes.
> [ 1157.040738] Allocation:
> [ 1157.040741] PID = 20521
> [ 1157.040757]  [<ffffffff9f1dfae6>] save_stack_trace+0x26/0x70
> [ 1157.040770]  [<ffffffff9f7a01e6>] save_stack+0x46/0xd0
> [ 1157.040784]  [<ffffffff9f7a0470>] kasan_kmalloc+0x110/0x130
> [ 1157.040797]  [<ffffffff9f7a09a2>] kasan_slab_alloc+0x12/0x20
> [ 1157.040811]  [<ffffffff9f79a546>] kmem_cache_alloc+0x1e6/0x230
> [ 1157.040826]  [<ffffffff9f7245ad>] mmap_region+0x56d/0x13c0
> [ 1157.040840]  [<ffffffff9f725e22>] do_mmap+0xa22/0xaf0
> [ 1157.040853]  [<ffffffff9f6cb1af>] vm_mmap_pgoff+0x14f/0x1c0
> [ 1157.040889]  [<ffffffff9f71e5fb>] SyS_mmap_pgoff+0x81b/0x910
> [ 1157.040901]  [<ffffffff9f1bf966>] SyS_mmap+0x16/0x20
> [ 1157.040910]  [<ffffffff9f006ab6>] do_syscall_64+0x2a6/0x490
> [ 1157.040919]  [<ffffffffa94d0ca5>] return_from_SYSCALL_64+0x0/0x6a
> [ 1157.040920] Memory state around the buggy address:
> [ 1157.040927]  ffff8801b8228580: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
> [ 1157.040933]  ffff8801b8228600: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
> [ 1157.040938] >ffff8801b8228680: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
> [ 1157.040940]                                ^
> [ 1157.040946]  ffff8801b8228700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
> [ 1157.040951]  ffff8801b8228780: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
Again, it's interesting why is the deallocation stack missing in a
use-after-free report.
>> Can you please share the reproduction steps for this bug?
>
> Just running syzkaller inside a kvmtool guest.
Any syzkaller programs in the logs?
>> I also wonder whether it's reproducible when you:
>>  - revert this commit?
>
> Not reproducible.
>
>>  - build with SLAB instead of SLUB?
>
> Not reproducible.
>
>
> Thanks,
> Sasha



-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Matthew Scott Sucherman, Paul Terence Manicle
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

[-- Attachment #2: alloc_cnt.patch --]
[-- Type: text/x-patch, Size: 807 bytes --]

diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 53ad6c0..f1d84e1 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -104,6 +104,7 @@ static bool init_stack_slab(void **prealloc)
 	return true;
 }
 
+static int alloc_cnt = 0;  /* imprecise */
 /* Allocation of a new stack in raw storage */
 static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
 		u32 hash, void **prealloc, gfp_t alloc_flags)
@@ -143,6 +144,11 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
 	memcpy(stack->entries, entries, size * sizeof(unsigned long));
 	depot_offset += required_size;
 
+	alloc_cnt++;
+	if (alloc_cnt % 100 == 0)
+		pr_err("alloc_cnt: %d\n", alloc_cnt);
+	if ((alloc_cnt > 20000) && (alloc_cnt % 10 == 0))
+		dump_stack();
 	return stack;
 }
 

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
  2016-06-19 17:40     ` Sasha Levin
  2016-06-20 12:53       ` Alexander Potapenko
@ 2016-06-20 13:21       ` Alexander Potapenko
  1 sibling, 0 replies; 7+ messages in thread
From: Alexander Potapenko @ 2016-06-20 13:21 UTC (permalink / raw)
  To: Sasha Levin
  Cc: Andrey Konovalov, Christoph Lameter, Dmitriy Vyukov,
	Andrew Morton, Steven Rostedt, Joonsoo Kim, Joonsoo Kim,
	Kostya Serebryany, Andrey Ryabinin, Kuthonuzo Luruo, kasan-dev,
	Linux Memory Management List, LKML

On Sun, Jun 19, 2016 at 7:40 PM, Sasha Levin <sasha.levin@oracle.com> wrote:
> On 06/19/2016 03:24 AM, Alexander Potapenko wrote:
>> Hi Sasha,
>>
>> This commit delays the reuse of memory after it has been freed, so
>> it's intended to help people find more use-after-free errors.
>
> Is there a way to tell if the use-after-free access was to a memory
> that is quarantined?
Sorry, missed this question.
In theory, these can be distinguished by looking at alloc_info->state
(see kasan_object_err() in mm/kasan/report.c): the quarantined chunk
should have the KASAN_STATE_QUARANTINE state.
But your report contains the line "Object allocated with size 192
bytes.", which means the chunk had the KASAN_STATE_ALLOC state.
Therefore, we're dealing with an inconsistency between the shadow
memory value (0xFB, i.e. freed memory) and the chunk header
(KASAN_STATE_ALLOC) here.
Can you apply "mm, kasan: improve double-free detection" by Kuthonuzo
Luruo and see if the report changes?
>> But I'm puzzled why the stacks are missing.
>
> I looked at the logs, it looks like stackdepot ran out of room pretty
> early during boot. I've increased the max count and that solved the
> problem. Here's a trace with all the stacks:
>
> [ 1157.040216] BUG: KASAN: use-after-free in print_bad_pte+0x5c7/0x6e0 at addr ffff8801b82286a0
> [ 1157.040222] Read of size 8 by task syz-executor/20583
> [ 1157.040236] CPU: 0 PID: 20583 Comm: syz-executor Tainted: G    B           4.7.0-rc2-next-20160609-sasha-00032-g779e0df-dirty #3123
> [ 1157.040249]  1ffff10016b26e97 000000001af4d42c ffff8800b5937540 ffffffffa103380b
> [ 1157.040262]  ffffffff00000000 fffffbfff5830bf4 0000000041b58ab3 ffffffffabaf1240
> [ 1157.040274]  ffffffffa103369c 0000000000000006 0000000000000000 ffff8800b5937550
> [ 1157.040276] Call Trace:
> [ 1157.040290]  [<ffffffffa103380b>] dump_stack+0x16f/0x1d4
> [ 1157.040319]  [<ffffffff9f7a148f>] kasan_report_error+0x59f/0x8c0
> [ 1157.040382]  [<ffffffff9f7a19c6>] __asan_report_load8_noabort+0x66/0x90
> [ 1157.040409]  [<ffffffff9f6fa5e7>] print_bad_pte+0x5c7/0x6e0
> [ 1157.040418]  [<ffffffff9f702e02>] unmap_page_range+0x12f2/0x1e20
> [ 1157.040445]  [<ffffffff9f703b69>] unmap_single_vma+0x239/0x250
> [ 1157.040452]  [<ffffffff9f7045e9>] unmap_vmas+0x119/0x1d0
> [ 1157.040461]  [<ffffffff9f720a73>] exit_mmap+0x2a3/0x410
> [ 1157.040485]  [<ffffffff9f3769e2>] mmput+0x192/0x350
> [ 1157.040524]  [<ffffffff9f38d745>] do_exit+0xea5/0x19e0
> [ 1157.040566]  [<ffffffff9f38e5d3>] do_group_exit+0x2e3/0x2f0
> [ 1157.040580]  [<ffffffff9f3b1928>] get_signal+0x1128/0x1370
> [ 1157.040593]  [<ffffffff9f1afca6>] do_signal+0x86/0x1da0
> [ 1157.040700]  [<ffffffff9f00539c>] exit_to_usermode_loop+0xac/0x200
> [ 1157.040712]  [<ffffffff9f006c20>] do_syscall_64+0x410/0x490
> [ 1157.040725]  [<ffffffffa94d0ca5>] entry_SYSCALL64_slow_path+0x25/0x25
> [ 1157.040733] Object at ffff8801b8228600, in cache vm_area_struct
> [ 1157.040737] Object allocated with size 192 bytes.
> [ 1157.040738] Allocation:
> [ 1157.040741] PID = 20521
> [ 1157.040757]  [<ffffffff9f1dfae6>] save_stack_trace+0x26/0x70
> [ 1157.040770]  [<ffffffff9f7a01e6>] save_stack+0x46/0xd0
> [ 1157.040784]  [<ffffffff9f7a0470>] kasan_kmalloc+0x110/0x130
> [ 1157.040797]  [<ffffffff9f7a09a2>] kasan_slab_alloc+0x12/0x20
> [ 1157.040811]  [<ffffffff9f79a546>] kmem_cache_alloc+0x1e6/0x230
> [ 1157.040826]  [<ffffffff9f7245ad>] mmap_region+0x56d/0x13c0
> [ 1157.040840]  [<ffffffff9f725e22>] do_mmap+0xa22/0xaf0
> [ 1157.040853]  [<ffffffff9f6cb1af>] vm_mmap_pgoff+0x14f/0x1c0
> [ 1157.040889]  [<ffffffff9f71e5fb>] SyS_mmap_pgoff+0x81b/0x910
> [ 1157.040901]  [<ffffffff9f1bf966>] SyS_mmap+0x16/0x20
> [ 1157.040910]  [<ffffffff9f006ab6>] do_syscall_64+0x2a6/0x490
> [ 1157.040919]  [<ffffffffa94d0ca5>] return_from_SYSCALL_64+0x0/0x6a
> [ 1157.040920] Memory state around the buggy address:
> [ 1157.040927]  ffff8801b8228580: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
> [ 1157.040933]  ffff8801b8228600: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
> [ 1157.040938] >ffff8801b8228680: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
> [ 1157.040940]                                ^
> [ 1157.040946]  ffff8801b8228700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
> [ 1157.040951]  ffff8801b8228780: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
>
>> Can you please share the reproduction steps for this bug?
>
> Just running syzkaller inside a kvmtool guest.
>
>> I also wonder whether it's reproducible when you:
>>  - revert this commit?
>
> Not reproducible.
>
>>  - build with SLAB instead of SLUB?
>
> Not reproducible.
>
>
> Thanks,
> Sasha



-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Matthew Scott Sucherman, Paul Terence Manicle
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
       [not found]         ` <5768490E.6050808@oracle.com>
@ 2016-06-21  8:18           ` Alexander Potapenko
  0 siblings, 0 replies; 7+ messages in thread
From: Alexander Potapenko @ 2016-06-21  8:18 UTC (permalink / raw)
  To: Sasha Levin
  Cc: Andrey Konovalov, Christoph Lameter, Dmitriy Vyukov,
	Andrew Morton, Steven Rostedt, Joonsoo Kim, Joonsoo Kim,
	Kostya Serebryany, Andrey Ryabinin, Kuthonuzo Luruo, kasan-dev,
	Linux Memory Management List, LKML

On Mon, Jun 20, 2016 at 9:50 PM, Sasha Levin <sasha.levin@oracle.com> wrote:
> On 06/20/2016 08:53 AM, Alexander Potapenko wrote:
>> On Sun, Jun 19, 2016 at 7:40 PM, Sasha Levin <sasha.levin@oracle.com> wrote:
>>> > On 06/19/2016 03:24 AM, Alexander Potapenko wrote:
>>>> >> Hi Sasha,
>>>> >>
>>>> >> This commit delays the reuse of memory after it has been freed, so
>>>> >> it's intended to help people find more use-after-free errors.
>>> >
>>> > Is there a way to tell if the use-after-free access was to a memory
>>> > that is quarantined?
>>> >
>>>> >> But I'm puzzled why the stacks are missing.
>>> >
>>> > I looked at the logs, it looks like stackdepot ran out of room pretty
>>> > early during boot.
>> This is quite strange, as there's room for ~80k stacks in the depot,
>> and usually the number of unique stacks is lower than 30k.
>> I wonder if this is specific to your config, can you please share it
>> (assuming you're using ToT kernel)?
>> Attached is the patch that you can try out to dump the new stacks
>> after 30k - it's really interesting where do they come from (note the
>> patch is not for submission).
>
> Attached a log file generated with that patch, and my kernel config.
I haven't looked close yet, but your log contains 1455 unique
'DRIVERNAME_driver_init' function names, for which it's quite likely
that 80k allocation stacks are generated.
Can you remove the dump_stack() call from my patch, fuzz for a while
and see to which number does |alloc_cnt| converge on your machine?
Maybe our estimate was just too optimistic, and we need to increase
the memory limit for the stack depot.
>
> Thanks,
> Sasha
>



-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Matthew Scott Sucherman, Paul Terence Manicle
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2016-06-21  8:23 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-17 14:27 [PATCH v4] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB Alexander Potapenko
2016-06-18 15:32 ` Sasha Levin
2016-06-19  7:24   ` Alexander Potapenko
2016-06-19 17:40     ` Sasha Levin
2016-06-20 12:53       ` Alexander Potapenko
     [not found]         ` <5768490E.6050808@oracle.com>
2016-06-21  8:18           ` Alexander Potapenko
2016-06-20 13:21       ` Alexander Potapenko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).