linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
@ 2016-06-15 15:26 Alexander Potapenko
  2016-06-15 16:50 ` Andrey Ryabinin
  0 siblings, 1 reply; 5+ messages in thread
From: Alexander Potapenko @ 2016-06-15 15:26 UTC (permalink / raw)
  To: adech.fo, cl, dvyukov, akpm, rostedt, iamjoonsoo.kim, js1304,
	kcc, aryabinin, kuthonuzo.luruo
  Cc: kasan-dev, linux-mm, linux-kernel

For KASAN builds:
 - switch SLUB allocator to using stackdepot instead of storing the
   allocation/deallocation stacks in the objects;
 - define SLAB_RED_ZONE, SLAB_POISON, SLAB_STORE_USER to zero,
   effectively disabling these debug features, as they're redundant in
   the presence of KASAN;
 - change the freelist hook so that parts of the freelist can be put into
   the quarantine.

Signed-off-by: Alexander Potapenko <glider@google.com>
---
v3: - addressed comments by Andrey Ryabinin:
      - replaced KMALLOC_MAX_CACHE_SIZE with KMALLOC_MAX_SIZE in
        kasan_cache_create();
      - for caches with SLAB_KASAN flag set, their alloc_meta_offset and
        free_meta_offset are always valid.
v2: - incorporated kbuild fixes by Andrew Morton
---
 include/linux/slab.h     |  9 +++++++
 include/linux/slub_def.h |  4 ++++
 lib/Kconfig.kasan        |  4 ++--
 mm/kasan/Makefile        |  3 +--
 mm/kasan/kasan.c         | 61 ++++++++++++++++++++++++++----------------------
 mm/kasan/kasan.h         |  2 +-
 mm/kasan/quarantine.c    |  5 ++++
 mm/kasan/report.c        |  8 +++----
 mm/slab.h                |  8 +++++++
 mm/slub.c                | 58 +++++++++++++++++++++++++++++++++------------
 10 files changed, 109 insertions(+), 53 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index aeb3e6d..fe91eef 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,11 +21,20 @@
  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
  */
 #define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
+#ifndef CONFIG_KASAN
 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
+#else
+#define SLAB_RED_ZONE		0x00000000UL	/* KASAN has its own redzones */
+#define SLAB_POISON		0x00000000UL	/* No extra poisoning */
+#endif
 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
+#ifndef CONFIG_KASAN
 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
+#else
+#define SLAB_STORE_USER		0x00000000UL	/* KASAN uses stack depot */
+#endif
 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
 /*
  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d1faa01..5585598 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -99,6 +99,10 @@ struct kmem_cache {
 	 */
 	int remote_node_defrag_ratio;
 #endif
+#ifdef CONFIG_KASAN
+	struct kasan_cache kasan_info;
+#endif
+
 	struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 67d8c68..bd38aab 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,9 +5,9 @@ if HAVE_ARCH_KASAN
 
 config KASAN
 	bool "KASan: runtime memory debugger"
-	depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB)
+	depends on SLUB || (SLAB && !DEBUG_SLAB)
 	select CONSTRUCTORS
-	select STACKDEPOT if SLAB
+	select STACKDEPOT
 	help
 	  Enables kernel address sanitizer - runtime memory debugger,
 	  designed to find out-of-bounds accesses and use-after-free bugs.
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 1548749..2976a9e 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -7,5 +7,4 @@ CFLAGS_REMOVE_kasan.o = -pg
 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
 CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 
-obj-y := kasan.o report.o kasan_init.o
-obj-$(CONFIG_SLAB) += quarantine.o
+obj-y := kasan.o report.o kasan_init.o quarantine.o
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 18b6a2b..1d385e3 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -351,7 +351,6 @@ void kasan_free_pages(struct page *page, unsigned int order)
 				KASAN_FREE_PAGE);
 }
 
-#ifdef CONFIG_SLAB
 /*
  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
  * For larger allocations larger redzones are used.
@@ -373,16 +372,12 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
 			unsigned long *flags)
 {
 	int redzone_adjust;
-	/* Make sure the adjusted size is still less than
-	 * KMALLOC_MAX_CACHE_SIZE.
-	 * TODO: this check is only useful for SLAB, but not SLUB. We'll need
-	 * to skip it for SLUB when it starts using kasan_cache_create().
-	 */
-	if (*size > KMALLOC_MAX_CACHE_SIZE -
-	    sizeof(struct kasan_alloc_meta) -
-	    sizeof(struct kasan_free_meta))
-		return;
+
+#ifdef CONFIG_SLAB
+	int orig_size = *size;
+#endif
 	*flags |= SLAB_KASAN;
+
 	/* Add alloc meta. */
 	cache->kasan_info.alloc_meta_offset = *size;
 	*size += sizeof(struct kasan_alloc_meta);
@@ -392,17 +387,35 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
 	    cache->object_size < sizeof(struct kasan_free_meta)) {
 		cache->kasan_info.free_meta_offset = *size;
 		*size += sizeof(struct kasan_free_meta);
+	} else {
+		cache->kasan_info.free_meta_offset = 0;
 	}
 	redzone_adjust = optimal_redzone(cache->object_size) -
 		(*size - cache->object_size);
+
 	if (redzone_adjust > 0)
 		*size += redzone_adjust;
-	*size = min(KMALLOC_MAX_CACHE_SIZE,
+
+#ifdef CONFIG_SLAB
+	*size = min(KMALLOC_MAX_SIZE,
 		    max(*size,
 			cache->object_size +
 			optimal_redzone(cache->object_size)));
-}
+	/*
+	 * If the metadata doesn't fit, disable KASAN at all.
+	 */
+	if (*size <= cache->kasan_info.alloc_meta_offset ||
+			*size <= cache->kasan_info.free_meta_offset) {
+		*flags &= ~SLAB_KASAN;
+		*size = orig_size;
+	}
+#else
+	*size = max(*size,
+			cache->object_size +
+			optimal_redzone(cache->object_size));
+
 #endif
+}
 
 void kasan_cache_shrink(struct kmem_cache *cache)
 {
@@ -431,16 +444,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
 	kasan_poison_shadow(object,
 			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
 			KASAN_KMALLOC_REDZONE);
-#ifdef CONFIG_SLAB
 	if (cache->flags & SLAB_KASAN) {
 		struct kasan_alloc_meta *alloc_info =
 			get_alloc_info(cache, object);
 		alloc_info->state = KASAN_STATE_INIT;
 	}
-#endif
 }
 
-#ifdef CONFIG_SLAB
 static inline int in_irqentry_text(unsigned long ptr)
 {
 	return (ptr >= (unsigned long)&__irqentry_text_start &&
@@ -501,7 +511,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
 	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
 	return (void *)object + cache->kasan_info.free_meta_offset;
 }
-#endif
 
 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
 {
@@ -522,16 +531,16 @@ void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
 
 bool kasan_slab_free(struct kmem_cache *cache, void *object)
 {
-#ifdef CONFIG_SLAB
 	/* RCU slabs could be legally used after free within the RCU period */
 	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
 		return false;
 
 	if (likely(cache->flags & SLAB_KASAN)) {
-		struct kasan_alloc_meta *alloc_info =
-			get_alloc_info(cache, object);
-		struct kasan_free_meta *free_info =
-			get_free_info(cache, object);
+		struct kasan_alloc_meta *alloc_info;
+		struct kasan_free_meta *free_info;
+
+		alloc_info = get_alloc_info(cache, object);
+		free_info = get_free_info(cache, object);
 
 		switch (alloc_info->state) {
 		case KASAN_STATE_ALLOC:
@@ -550,10 +559,6 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
 		}
 	}
 	return false;
-#else
-	kasan_poison_slab_free(cache, object);
-	return false;
-#endif
 }
 
 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
@@ -568,6 +573,9 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
 	if (unlikely(object == NULL))
 		return;
 
+	if (!(cache->flags & SLAB_KASAN))
+		return;
+
 	redzone_start = round_up((unsigned long)(object + size),
 				KASAN_SHADOW_SCALE_SIZE);
 	redzone_end = round_up((unsigned long)object + cache->object_size,
@@ -576,16 +584,13 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
 	kasan_unpoison_shadow(object, size);
 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 		KASAN_KMALLOC_REDZONE);
-#ifdef CONFIG_SLAB
 	if (cache->flags & SLAB_KASAN) {
 		struct kasan_alloc_meta *alloc_info =
 			get_alloc_info(cache, object);
-
 		alloc_info->state = KASAN_STATE_ALLOC;
 		alloc_info->alloc_size = size;
 		set_track(&alloc_info->track, flags);
 	}
-#endif
 }
 EXPORT_SYMBOL(kasan_kmalloc);
 
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index fb87923..8c75953 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -110,7 +110,7 @@ static inline bool kasan_report_enabled(void)
 void kasan_report(unsigned long addr, size_t size,
 		bool is_write, unsigned long ip);
 
-#ifdef CONFIG_SLAB
+#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
 void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
 void quarantine_reduce(void);
 void quarantine_remove_cache(struct kmem_cache *cache);
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 4973505..89259c2 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -149,7 +149,12 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
 
 	local_irq_save(flags);
 	alloc_info->state = KASAN_STATE_FREE;
+#ifdef CONFIG_SLAB
 	___cache_free(cache, object, _THIS_IP_);
+#elif defined(CONFIG_SLUB)
+	do_slab_free(cache, virt_to_head_page(object), object, NULL, 1,
+		_RET_IP_);
+#endif
 	local_irq_restore(flags);
 }
 
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index b3c122d..861b977 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -116,7 +116,6 @@ static inline bool init_task_stack_addr(const void *addr)
 			sizeof(init_thread_union.stack));
 }
 
-#ifdef CONFIG_SLAB
 static void print_track(struct kasan_track *track)
 {
 	pr_err("PID = %u\n", track->pid);
@@ -130,8 +129,8 @@ static void print_track(struct kasan_track *track)
 	}
 }
 
-static void object_err(struct kmem_cache *cache, struct page *page,
-			void *object, char *unused_reason)
+static void kasan_object_err(struct kmem_cache *cache, struct page *page,
+				void *object, char *unused_reason)
 {
 	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
 	struct kasan_free_meta *free_info;
@@ -162,7 +161,6 @@ static void object_err(struct kmem_cache *cache, struct page *page,
 		break;
 	}
 }
-#endif
 
 static void print_address_description(struct kasan_access_info *info)
 {
@@ -177,7 +175,7 @@ static void print_address_description(struct kasan_access_info *info)
 			struct kmem_cache *cache = page->slab_cache;
 			object = nearest_obj(cache, page,
 						(void *)info->access_addr);
-			object_err(cache, page, object,
+			kasan_object_err(cache, page, object,
 					"kasan: bad access detected");
 			return;
 		}
diff --git a/mm/slab.h b/mm/slab.h
index dedb1a9..23cac96 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -366,6 +366,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
 		return s->object_size;
 # endif
+	if (s->flags & SLAB_KASAN)
+		return s->object_size;
 	/*
 	 * If we have the need to store the freelist pointer
 	 * back there or track user information then we can
@@ -462,6 +464,12 @@ void *slab_next(struct seq_file *m, void *p, loff_t *pos);
 void slab_stop(struct seq_file *m, void *p);
 int memcg_slab_show(struct seq_file *m, void *p);
 
+#if defined(CONFIG_SLAB)
 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
+#elif defined(CONFIG_SLUB)
+void do_slab_free(struct kmem_cache *s,
+		struct page *page, void *head, void *tail,
+		int cnt, unsigned long addr);
+#endif
 
 #endif /* MM_SLAB_H */
diff --git a/mm/slub.c b/mm/slub.c
index 825ff45..f023dd4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -191,7 +191,11 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
 
 /* Internal SLUB flags */
+#ifndef CONFIG_KASAN
 #define __OBJECT_POISON		0x80000000UL /* Poison object */
+#else
+#define __OBJECT_POISON		0x00000000UL /* Disable object poisoning */
+#endif
 #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
 
 #ifdef CONFIG_SMP
@@ -454,10 +458,8 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
  */
 #if defined(CONFIG_SLUB_DEBUG_ON)
 static int slub_debug = DEBUG_DEFAULT_FLAGS;
-#elif defined(CONFIG_KASAN)
-static int slub_debug = SLAB_STORE_USER;
 #else
-static int slub_debug;
+static int slub_debug = SLAB_STORE_USER;
 #endif
 
 static char *slub_debug_slabs;
@@ -1322,7 +1324,7 @@ static inline void kfree_hook(const void *x)
 	kasan_kfree_large(x);
 }
 
-static inline void slab_free_hook(struct kmem_cache *s, void *x)
+static inline bool slab_free_hook(struct kmem_cache *s, void *x)
 {
 	kmemleak_free_recursive(x, s->flags);
 
@@ -1344,11 +1346,11 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
 		debug_check_no_obj_freed(x, s->object_size);
 
-	kasan_slab_free(s, x);
+	return kasan_slab_free(s, x);
 }
 
 static inline void slab_free_freelist_hook(struct kmem_cache *s,
-					   void *head, void *tail)
+					   void **head, void **tail, int *cnt)
 {
 /*
  * Compiler cannot detect this function can be removed if slab_free_hook()
@@ -1360,13 +1362,27 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
 	defined(CONFIG_DEBUG_OBJECTS_FREE) ||	\
 	defined(CONFIG_KASAN)
 
-	void *object = head;
-	void *tail_obj = tail ? : head;
+	void *object = *head, *prev = NULL, *next = NULL;
+	void *tail_obj = *tail ? : *head;
+	bool skip = false;
 
 	do {
-		slab_free_hook(s, object);
-	} while ((object != tail_obj) &&
-		 (object = get_freepointer(s, object)));
+		skip = slab_free_hook(s, object);
+		next = (object != tail_obj) ?
+			get_freepointer(s, object) : NULL;
+		if (skip) {
+			if (!prev)
+				*head = next;
+			else
+				set_freepointer(s, prev, next);
+			if (object == tail_obj)
+				*tail = prev;
+			(*cnt)--;
+		} else {
+			prev = object;
+		}
+		object = next;
+	} while (next);
 #endif
 }
 
@@ -2772,12 +2788,22 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
 				      void *head, void *tail, int cnt,
 				      unsigned long addr)
 {
+	void *free_head = head, *free_tail = tail;
+
+	slab_free_freelist_hook(s, &free_head, &free_tail, &cnt);
+	/* slab_free_freelist_hook() could have emptied the freelist. */
+	if (cnt == 0)
+		return;
+	do_slab_free(s, page, free_head, free_tail, cnt, addr);
+}
+
+__always_inline void do_slab_free(struct kmem_cache *s,
+				struct page *page, void *head, void *tail,
+				int cnt, unsigned long addr)
+{
 	void *tail_obj = tail ? : head;
 	struct kmem_cache_cpu *c;
 	unsigned long tid;
-
-	slab_free_freelist_hook(s, head, tail);
-
 redo:
 	/*
 	 * Determine the currently cpus per cpu slab.
@@ -3252,7 +3278,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
 static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
 	unsigned long flags = s->flags;
-	unsigned long size = s->object_size;
+	size_t size = s->object_size;
 	int order;
 
 	/*
@@ -3328,6 +3354,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 	}
 #endif
 
+	kasan_cache_create(s, &size, &s->flags);
+
 	/*
 	 * SLUB stores one object immediately after another beginning from
 	 * offset 0. In order to align the objects we have to simply size
-- 
2.8.0.rc3.226.g39d4020

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
  2016-06-15 15:26 [PATCH v3] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB Alexander Potapenko
@ 2016-06-15 16:50 ` Andrey Ryabinin
  2016-06-17 14:27   ` Alexander Potapenko
  0 siblings, 1 reply; 5+ messages in thread
From: Andrey Ryabinin @ 2016-06-15 16:50 UTC (permalink / raw)
  To: Alexander Potapenko, adech.fo, cl, dvyukov, akpm, rostedt,
	iamjoonsoo.kim, js1304, kcc, kuthonuzo.luruo
  Cc: kasan-dev, linux-mm, linux-kernel



On 06/15/2016 06:26 PM, Alexander Potapenko wrote:
> For KASAN builds:
>  - switch SLUB allocator to using stackdepot instead of storing the
>    allocation/deallocation stacks in the objects;
>  - define SLAB_RED_ZONE, SLAB_POISON, SLAB_STORE_USER to zero,
>    effectively disabling these debug features, as they're redundant in
>    the presence of KASAN;

So, why we forbid these? If user wants to set these, why not? If you don't want it, just don't turn them on, that's it.

And sometimes POISON/REDZONE might be actually useful. KASAN doesn't catch everything,
e.g. corruption may happen in assembly code, or DMA by  some device.


>  - change the freelist hook so that parts of the freelist can be put into
>    the quarantine.
> 
> Signed-off-by: Alexander Potapenko <glider@google.com>
> ---

...

> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index fb87923..8c75953 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -110,7 +110,7 @@ static inline bool kasan_report_enabled(void)
>  void kasan_report(unsigned long addr, size_t size,
>  		bool is_write, unsigned long ip);
>  
> -#ifdef CONFIG_SLAB
> +#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
>  void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
>  void quarantine_reduce(void);
>  void quarantine_remove_cache(struct kmem_cache *cache);
> diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
> index 4973505..89259c2 100644
> --- a/mm/kasan/quarantine.c
> +++ b/mm/kasan/quarantine.c
> @@ -149,7 +149,12 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
>  
>  	local_irq_save(flags);
>  	alloc_info->state = KASAN_STATE_FREE;
> +#ifdef CONFIG_SLAB
>  	___cache_free(cache, object, _THIS_IP_);
> +#elif defined(CONFIG_SLUB)
> +	do_slab_free(cache, virt_to_head_page(object), object, NULL, 1,
> +		_RET_IP_);
> +#endif

Please, add some simple wrapper instead of this.

>  	local_irq_restore(flags);
>  }
>  


...

> diff --git a/mm/slub.c b/mm/slub.c
> index 825ff45..f023dd4 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -191,7 +191,11 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
>  #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
>  
>  /* Internal SLUB flags */
> +#ifndef CONFIG_KASAN
>  #define __OBJECT_POISON		0x80000000UL /* Poison object */
> +#else
> +#define __OBJECT_POISON		0x00000000UL /* Disable object poisoning */
> +#endif
>  #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
>  
>  #ifdef CONFIG_SMP
> @@ -454,10 +458,8 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
>   */
>  #if defined(CONFIG_SLUB_DEBUG_ON)
>  static int slub_debug = DEBUG_DEFAULT_FLAGS;
> -#elif defined(CONFIG_KASAN)
> -static int slub_debug = SLAB_STORE_USER;
>  #else
> -static int slub_debug;
> +static int slub_debug = SLAB_STORE_USER;

Huh! So now it is on!? By default, and for everyone!

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
  2016-06-15 16:50 ` Andrey Ryabinin
@ 2016-06-17 14:27   ` Alexander Potapenko
  2016-06-17 15:12     ` Andrey Ryabinin
  0 siblings, 1 reply; 5+ messages in thread
From: Alexander Potapenko @ 2016-06-17 14:27 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Andrey Konovalov, Christoph Lameter, Dmitriy Vyukov,
	Andrew Morton, Steven Rostedt, Joonsoo Kim, Joonsoo Kim,
	Kostya Serebryany, Kuthonuzo Luruo, kasan-dev,
	Linux Memory Management List, LKML

On Wed, Jun 15, 2016 at 6:50 PM, Andrey Ryabinin
<aryabinin@virtuozzo.com> wrote:
>
>
> On 06/15/2016 06:26 PM, Alexander Potapenko wrote:
>> For KASAN builds:
>>  - switch SLUB allocator to using stackdepot instead of storing the
>>    allocation/deallocation stacks in the objects;
>>  - define SLAB_RED_ZONE, SLAB_POISON, SLAB_STORE_USER to zero,
>>    effectively disabling these debug features, as they're redundant in
>>    the presence of KASAN;
>
> So, why we forbid these? If user wants to set these, why not? If you don't want it, just don't turn them on, that's it.
SLAB_RED_ZONE simply doesn't work with KASAN.
With additional efforts it may work, but I don't think we really need
that. Extra red zones will just bloat the heap, and won't give any
interesting signal except "someone corrupted this object from
non-instrumented code".
SLAB_POISON doesn't crash on simple tests, but I am not sure there are
no corner cases which I haven't checked, so I thought it's safer to
disable it.
As I said before, we can make SLAB_STORE_USER use stackdepot in a
later CL, thus we disable it now.

> And sometimes POISON/REDZONE might be actually useful. KASAN doesn't catch everything,
> e.g. corruption may happen in assembly code, or DMA by  some device.
>
>
>>  - change the freelist hook so that parts of the freelist can be put into
>>    the quarantine.
>>
>> Signed-off-by: Alexander Potapenko <glider@google.com>
>> ---
>
> ...
>
>> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
>> index fb87923..8c75953 100644
>> --- a/mm/kasan/kasan.h
>> +++ b/mm/kasan/kasan.h
>> @@ -110,7 +110,7 @@ static inline bool kasan_report_enabled(void)
>>  void kasan_report(unsigned long addr, size_t size,
>>               bool is_write, unsigned long ip);
>>
>> -#ifdef CONFIG_SLAB
>> +#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
>>  void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
>>  void quarantine_reduce(void);
>>  void quarantine_remove_cache(struct kmem_cache *cache);
>> diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
>> index 4973505..89259c2 100644
>> --- a/mm/kasan/quarantine.c
>> +++ b/mm/kasan/quarantine.c
>> @@ -149,7 +149,12 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
>>
>>       local_irq_save(flags);
>>       alloc_info->state = KASAN_STATE_FREE;
>> +#ifdef CONFIG_SLAB
>>       ___cache_free(cache, object, _THIS_IP_);
>> +#elif defined(CONFIG_SLUB)
>> +     do_slab_free(cache, virt_to_head_page(object), object, NULL, 1,
>> +             _RET_IP_);
>> +#endif
>
> Please, add some simple wrapper instead of this.
>
>>       local_irq_restore(flags);
>>  }
>>
Done. I've reused ___cache_free().
>
> ...
>
>> diff --git a/mm/slub.c b/mm/slub.c
>> index 825ff45..f023dd4 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -191,7 +191,11 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
>>  #define MAX_OBJS_PER_PAGE    32767 /* since page.objects is u15 */
>>
>>  /* Internal SLUB flags */
>> +#ifndef CONFIG_KASAN
>>  #define __OBJECT_POISON              0x80000000UL /* Poison object */
>> +#else
>> +#define __OBJECT_POISON              0x00000000UL /* Disable object poisoning */
>> +#endif
>>  #define __CMPXCHG_DOUBLE     0x40000000UL /* Use cmpxchg_double */
>>
>>  #ifdef CONFIG_SMP
>> @@ -454,10 +458,8 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
>>   */
>>  #if defined(CONFIG_SLUB_DEBUG_ON)
>>  static int slub_debug = DEBUG_DEFAULT_FLAGS;
>> -#elif defined(CONFIG_KASAN)
>> -static int slub_debug = SLAB_STORE_USER;
>>  #else
>> -static int slub_debug;
>> +static int slub_debug = SLAB_STORE_USER;
>
> Huh! So now it is on!? By default, and for everyone!
>
Good catch, thanks!


-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Matthew Scott Sucherman, Paul Terence Manicle
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
  2016-06-17 14:27   ` Alexander Potapenko
@ 2016-06-17 15:12     ` Andrey Ryabinin
  2016-06-17 18:21       ` Alexander Potapenko
  0 siblings, 1 reply; 5+ messages in thread
From: Andrey Ryabinin @ 2016-06-17 15:12 UTC (permalink / raw)
  To: Alexander Potapenko
  Cc: Andrey Konovalov, Christoph Lameter, Dmitriy Vyukov,
	Andrew Morton, Steven Rostedt, Joonsoo Kim, Joonsoo Kim,
	Kostya Serebryany, Kuthonuzo Luruo, kasan-dev,
	Linux Memory Management List, LKML



On 06/17/2016 05:27 PM, Alexander Potapenko wrote:
> On Wed, Jun 15, 2016 at 6:50 PM, Andrey Ryabinin
> <aryabinin@virtuozzo.com> wrote:
>>
>>
>> On 06/15/2016 06:26 PM, Alexander Potapenko wrote:
>>> For KASAN builds:
>>>  - switch SLUB allocator to using stackdepot instead of storing the
>>>    allocation/deallocation stacks in the objects;
>>>  - define SLAB_RED_ZONE, SLAB_POISON, SLAB_STORE_USER to zero,
>>>    effectively disabling these debug features, as they're redundant in
>>>    the presence of KASAN;
>>
>> So, why we forbid these? If user wants to set these, why not? If you don't want it, just don't turn them on, that's it.
> SLAB_RED_ZONE simply doesn't work with KASAN.

Why? This sounds like a bug.

> With additional efforts it may work, but I don't think we really need
> that. Extra red zones will just bloat the heap, and won't give any
> interesting signal except "someone corrupted this object from
> non-instrumented code".
> SLAB_POISON doesn't crash on simple tests, but I am not sure there are
> no corner cases which I haven't checked, so I thought it's safer to
> disable it.
> As I said before, we can make SLAB_STORE_USER use stackdepot in a
> later CL, thus we disable it now.
> 

This doesn't explain why we need this. What's the problem you are trying to solve by this? And why it is ok to silently ignore user requests?
You think that these options are redundant, I get it. Well, then just don't turn them on.
But, when a user requests for something, he expects that such request will be fulfilled, not just ignored.

>> And sometimes POISON/REDZONE might be actually useful. KASAN doesn't catch everything,
>> e.g. corruption may happen in assembly code, or DMA by  some device.
>>
>>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
  2016-06-17 15:12     ` Andrey Ryabinin
@ 2016-06-17 18:21       ` Alexander Potapenko
  0 siblings, 0 replies; 5+ messages in thread
From: Alexander Potapenko @ 2016-06-17 18:21 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Andrey Konovalov, Christoph Lameter, Dmitriy Vyukov,
	Andrew Morton, Steven Rostedt, Joonsoo Kim, Joonsoo Kim,
	Kostya Serebryany, Kuthonuzo Luruo, kasan-dev,
	Linux Memory Management List, LKML

On Fri, Jun 17, 2016 at 5:12 PM, Andrey Ryabinin
<aryabinin@virtuozzo.com> wrote:
>
>
> On 06/17/2016 05:27 PM, Alexander Potapenko wrote:
>> On Wed, Jun 15, 2016 at 6:50 PM, Andrey Ryabinin
>> <aryabinin@virtuozzo.com> wrote:
>>>
>>>
>>> On 06/15/2016 06:26 PM, Alexander Potapenko wrote:
>>>> For KASAN builds:
>>>>  - switch SLUB allocator to using stackdepot instead of storing the
>>>>    allocation/deallocation stacks in the objects;
>>>>  - define SLAB_RED_ZONE, SLAB_POISON, SLAB_STORE_USER to zero,
>>>>    effectively disabling these debug features, as they're redundant in
>>>>    the presence of KASAN;
>>>
>>> So, why we forbid these? If user wants to set these, why not? If you don't want it, just don't turn them on, that's it.
>> SLAB_RED_ZONE simply doesn't work with KASAN.
>
> Why? This sounds like a bug.
I'm looking now. There are some issues with the left redzone being
added, which messes up the offsets.
I'd say it's no surprise that different debugging tools do not work
together, like e.g. KASAN and kmemcheck are not expected to.
>> With additional efforts it may work, but I don't think we really need
>> that. Extra red zones will just bloat the heap, and won't give any
>> interesting signal except "someone corrupted this object from
>> non-instrumented code".
>> SLAB_POISON doesn't crash on simple tests, but I am not sure there are
>> no corner cases which I haven't checked, so I thought it's safer to
>> disable it.
>> As I said before, we can make SLAB_STORE_USER use stackdepot in a
>> later CL, thus we disable it now.
>>
>
> This doesn't explain why we need this. What's the problem you are trying to solve by this? And why it is ok to silently ignore user requests?
Agreed, there's no point in redefining the flag constants.
> You think that these options are redundant, I get it. Well, then just don't turn them on.
> But, when a user requests for something, he expects that such request will be fulfilled, not just ignored.
Yes, I'd better just document the incompatibility between the
different operation modes (if I don't solve the problem).
>>> And sometimes POISON/REDZONE might be actually useful. KASAN doesn't catch everything,
>>> e.g. corruption may happen in assembly code, or DMA by  some device.
>>>
>>>



-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Matthew Scott Sucherman, Paul Terence Manicle
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-06-17 18:21 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-15 15:26 [PATCH v3] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB Alexander Potapenko
2016-06-15 16:50 ` Andrey Ryabinin
2016-06-17 14:27   ` Alexander Potapenko
2016-06-17 15:12     ` Andrey Ryabinin
2016-06-17 18:21       ` Alexander Potapenko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).