linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Luruo, Kuthonuzo" <kuthonuzo.luruo@hpe.com>
To: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: "glider@google.com" <glider@google.com>,
	"dvyukov@google.com" <dvyukov@google.com>,
	"cl@linux.com" <cl@linux.com>,
	"penberg@kernel.org" <penberg@kernel.org>,
	"rientjes@google.com" <rientjes@google.com>,
	"iamjoonsoo.kim@lge.com" <iamjoonsoo.kim@lge.com>,
	"akpm@linux-foundation.org" <akpm@linux-foundation.org>,
	"kasan-dev@googlegroups.com" <kasan-dev@googlegroups.com>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"ynorov@caviumnetworks.com" <ynorov@caviumnetworks.com>
Subject: RE: [PATCH v5 1/2] mm, kasan: improve double-free detection
Date: Tue, 14 Jun 2016 06:46:04 +0000	[thread overview]
Message-ID: <DF4PR84MB00893B1ADEC50ED3E4891FC6F4540@DF4PR84MB0089.NAMPRD84.PROD.OUTLOOK.COM> (raw)
In-Reply-To: <575AF2D9.7030701@virtuozzo.com>

> > Next time, when/if you send patch series, send patches in one thread, i.e.
> > patches should be replies to the cover letter.
> > Your patches are not linked together, which makes them harder to track.

Thanks for the tip; but doesn't this conflict with the advice in
https://www.kernel.org/doc/Documentation/SubmittingPatches, specifically the
use of  the "summary phrase"... 

> >
> >
> >> Currently, KASAN may fail to detect concurrent deallocations of the same
> >> object due to a race in kasan_slab_free(). This patch makes double-free
> >> detection more reliable by serializing access to KASAN object metadata.
> >> New functions kasan_meta_lock() and kasan_meta_unlock() are provided to
> >> lock/unlock per-object metadata. Double-free errors are now reported via
> >> kasan_report().
> >>
> >> Per-object lock concept from suggestion/observations by Dmitry Vyukov.
> >>
> >
> >
> > So, I still don't like this, this too way hacky and complex.

I don't think patch is particularly complex; but respect your judgment.

> > I have some thoughts about how to make this lockless and robust enough.
> > I'll try to sort this out tomorrow.
> >
> 
> 
> So, I something like this should work.
> Tested very briefly.
> 
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index ac4b3c4..8691142 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -75,6 +75,8 @@ struct kasan_cache {
>  int kasan_module_alloc(void *addr, size_t size);
>  void kasan_free_shadow(const struct vm_struct *vm);
> 
> +void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
> +
>  size_t ksize(const void *);
>  static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
> 
> @@ -102,6 +104,9 @@ static inline void kasan_unpoison_object_data(struct
> kmem_cache *cache,
>  static inline void kasan_poison_object_data(struct kmem_cache *cache,
>  					void *object) {}
> 
> +static inline void kasan_init_slab_obj(struct kmem_cache *cache,
> +				const void *object) { }
> +
>  static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
>  static inline void kasan_kfree_large(const void *ptr) {}
>  static inline void kasan_poison_kfree(void *ptr) {}
> diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
> index 6845f92..ab0fded 100644
> --- a/mm/kasan/kasan.c
> +++ b/mm/kasan/kasan.c
> @@ -388,11 +388,9 @@ void kasan_cache_create(struct kmem_cache *cache,
> size_t *size,
>  	*size += sizeof(struct kasan_alloc_meta);
> 
>  	/* Add free meta. */
> -	if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
> -	    cache->object_size < sizeof(struct kasan_free_meta)) {
> -		cache->kasan_info.free_meta_offset = *size;
> -		*size += sizeof(struct kasan_free_meta);
> -	}
> +	cache->kasan_info.free_meta_offset = *size;
> +	*size += sizeof(struct kasan_free_meta);
> +
>  	redzone_adjust = optimal_redzone(cache->object_size) -
>  		(*size - cache->object_size);
>  	if (redzone_adjust > 0)
> @@ -431,13 +429,6 @@ void kasan_poison_object_data(struct kmem_cache
> *cache, void *object)
>  	kasan_poison_shadow(object,
>  			round_up(cache->object_size,
> KASAN_SHADOW_SCALE_SIZE),
>  			KASAN_KMALLOC_REDZONE);
> -#ifdef CONFIG_SLAB
> -	if (cache->flags & SLAB_KASAN) {
> -		struct kasan_alloc_meta *alloc_info =
> -			get_alloc_info(cache, object);
> -		alloc_info->state = KASAN_STATE_INIT;
> -	}
> -#endif
>  }
> 
>  #ifdef CONFIG_SLAB
> @@ -501,6 +492,20 @@ struct kasan_free_meta *get_free_info(struct
> kmem_cache *cache,
>  	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
>  	return (void *)object + cache->kasan_info.free_meta_offset;
>  }
> +
> +void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
> +{
> +	struct kasan_alloc_meta *alloc_info;
> +	struct kasan_free_meta *free_info;
> +
> +	if (!(cache->flags & SLAB_KASAN))
> +		return;
> +
> +	alloc_info = get_alloc_info(cache, object);
> +	free_info = get_free_info(cache, object);
> +	__memset(alloc_info, 0, sizeof(*alloc_info));
> +	__memset(free_info, 0, sizeof(*free_info));
> +}
>  #endif
> 
>  void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
> @@ -523,37 +528,47 @@ static void kasan_poison_slab_free(struct
> kmem_cache *cache, void *object)
>  bool kasan_slab_free(struct kmem_cache *cache, void *object)
>  {
>  #ifdef CONFIG_SLAB
> +	struct kasan_free_meta *free_info = get_free_info(cache, object);
> +	struct kasan_track new_free_stack, old_free_stack;
> +	s8 old_shadow;
> +
>  	/* RCU slabs could be legally used after free within the RCU period */
>  	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
>  		return false;
> 
> -	if (likely(cache->flags & SLAB_KASAN)) {
> -		struct kasan_alloc_meta *alloc_info =
> -			get_alloc_info(cache, object);
> -		struct kasan_free_meta *free_info =
> -			get_free_info(cache, object);
> -
> -		switch (alloc_info->state) {
> -		case KASAN_STATE_ALLOC:
> -			alloc_info->state = KASAN_STATE_QUARANTINE;
> -			quarantine_put(free_info, cache);
> -			set_track(&free_info->track, GFP_NOWAIT);
> -			kasan_poison_slab_free(cache, object);
> -			return true;
> -		case KASAN_STATE_QUARANTINE:
> -		case KASAN_STATE_FREE:
> -			pr_err("Double free");
> -			dump_stack();
> -			break;
> -		default:
> -			break;
> -		}
> +	if (unlikely(!(cache->flags & SLAB_KASAN)))
> +		return false;
> +
> +	set_track(&new_free_stack, GFP_NOWAIT);
> +	old_free_stack = xchg(&free_info->track, new_free_stack);
> +	old_shadow = xchg((s8 *)kasan_mem_to_shadow(object),
> +			KASAN_KMALLOC_FREE);
> +
> +	if (old_shadow < 0 || old_shadow >= KASAN_SHADOW_SCALE_SIZE) {
> +		struct kasan_track free_stack;
> +
> +		/* Paired with xchg() above */
> +		free_stack = smp_load_acquire(&free_info->track);
> +
> +		/*
> +		 * We didn't raced with another instance of kasan_slab_free()
> +		 * so the previous free stack supposed to be in old_free_stack.
> +		 * Otherwise, free_stack will contain stack trace of another
> +		 * kfree() call.
> +		 */
> +		if (free_stack.id == new_free_stack.id)
> +			free_stack = old_free_stack;
> +
> +		kasan_report_double_free(cache, object,
> +					free_stack, old_shadow);
> +		return false;
>  	}
> -	return false;
> -#else
>  	kasan_poison_slab_free(cache, object);
> -	return false;
> +	return true;
> +
>  #endif
> +	kasan_poison_slab_free(cache, object);
> +	return false;
>  }
> 
>  void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
> @@ -581,7 +596,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const
> void *object, size_t size,
>  		struct kasan_alloc_meta *alloc_info =
>  			get_alloc_info(cache, object);
> 
> -		alloc_info->state = KASAN_STATE_ALLOC;
>  		alloc_info->alloc_size = size;
>  		set_track(&alloc_info->track, flags);
>  	}
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index fb87923..9b46d2e 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -59,24 +59,21 @@ struct kasan_global {
>   * Structures to keep alloc and free tracks *
>   */
> 
> -enum kasan_state {
> -	KASAN_STATE_INIT,
> -	KASAN_STATE_ALLOC,
> -	KASAN_STATE_QUARANTINE,
> -	KASAN_STATE_FREE
> -};
> -
>  #define KASAN_STACK_DEPTH 64
> 
>  struct kasan_track {
> -	u32 pid;
> -	depot_stack_handle_t stack;
> +union {
> +	struct {
> +		u32 pid;
> +		depot_stack_handle_t stack;
> +	};
> +	u64 id;
> +};
>  };
> 
>  struct kasan_alloc_meta {
>  	struct kasan_track track;
> -	u32 state : 2;	/* enum kasan_state */
> -	u32 alloc_size : 30;
> +	u32 alloc_size;
>  };
> 
>  struct qlist_node {
> @@ -109,6 +106,9 @@ static inline bool kasan_report_enabled(void)
> 
>  void kasan_report(unsigned long addr, size_t size,
>  		bool is_write, unsigned long ip);
> +void kasan_report_double_free(struct kmem_cache *cache, void *object,
> +			struct kasan_track free_stack, s8 shadow);
> +
> 
>  #ifdef CONFIG_SLAB
>  void quarantine_put(struct kasan_free_meta *info, struct kmem_cache
> *cache);
> diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
> index 4973505..3ec039c 100644
> --- a/mm/kasan/quarantine.c
> +++ b/mm/kasan/quarantine.c
> @@ -144,11 +144,9 @@ static void *qlink_to_object(struct qlist_node *qlink,
> struct kmem_cache *cache)
>  static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
>  {
>  	void *object = qlink_to_object(qlink, cache);
> -	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
>  	unsigned long flags;
> 
>  	local_irq_save(flags);
> -	alloc_info->state = KASAN_STATE_FREE;
>  	___cache_free(cache, object, _THIS_IP_);
>  	local_irq_restore(flags);
>  }
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index b3c122d..a0f4519 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -140,28 +140,13 @@ static void object_err(struct kmem_cache *cache,
> struct page *page,
>  	pr_err("Object at %p, in cache %s\n", object, cache->name);
>  	if (!(cache->flags & SLAB_KASAN))
>  		return;
> -	switch (alloc_info->state) {
> -	case KASAN_STATE_INIT:
> -		pr_err("Object not allocated yet\n");
> -		break;
> -	case KASAN_STATE_ALLOC:
> -		pr_err("Object allocated with size %u bytes.\n",
> -		       alloc_info->alloc_size);
> -		pr_err("Allocation:\n");
> -		print_track(&alloc_info->track);
> -		break;
> -	case KASAN_STATE_FREE:
> -	case KASAN_STATE_QUARANTINE:
> -		pr_err("Object freed, allocated with size %u bytes\n",
> -		       alloc_info->alloc_size);
> -		free_info = get_free_info(cache, object);
> -		pr_err("Allocation:\n");
> -		print_track(&alloc_info->track);
> -		pr_err("Deallocation:\n");
> -		print_track(&free_info->track);
> -		break;
> -	}
> +	free_info = get_free_info(cache, object);
> +	pr_err("Allocation:\n");
> +	print_track(&alloc_info->track);
> +	pr_err("Deallocation:\n");
> +	print_track(&free_info->track);
>  }
> +
>  #endif
> 
>  static void print_address_description(struct kasan_access_info *info)
> @@ -245,17 +230,31 @@ static void print_shadow_for_address(const void
> *addr)
> 
>  static DEFINE_SPINLOCK(report_lock);
> 
> -static void kasan_report_error(struct kasan_access_info *info)
> +static void kasan_start_report(unsigned long *flags)
>  {
> -	unsigned long flags;
> -	const char *bug_type;
> -
>  	/*
>  	 * Make sure we don't end up in loop.
>  	 */
>  	kasan_disable_current();
> -	spin_lock_irqsave(&report_lock, flags);
> +	spin_lock_irqsave(&report_lock, *flags);
> 
> 	pr_err("====================================================
> ==============\n");
> +}
> +
> +static void kasan_end_report(unsigned long *flags)
> +{
> +
> 	pr_err("====================================================
> ==============\n");
> +	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
> +	spin_unlock_irqrestore(&report_lock, *flags);
> +	kasan_enable_current();
> +}
> +
> +static void kasan_report_error(struct kasan_access_info *info)
> +{
> +	unsigned long flags;
> +	const char *bug_type;
> +
> +	kasan_start_report(&flags);
> +
>  	if (info->access_addr <
>  			kasan_shadow_to_mem((void
> *)KASAN_SHADOW_START)) {
>  		if ((unsigned long)info->access_addr < PAGE_SIZE)
> @@ -276,10 +275,29 @@ static void kasan_report_error(struct
> kasan_access_info *info)
>  		print_address_description(info);
>  		print_shadow_for_address(info->first_bad_addr);
>  	}
> -
> 	pr_err("====================================================
> ==============\n");
> -	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
> -	spin_unlock_irqrestore(&report_lock, flags);
> -	kasan_enable_current();
> +
> +	kasan_end_report(&flags);
> +}
> +
> +void kasan_report_double_free(struct kmem_cache *cache, void *object,
> +			struct kasan_track free_stack, s8 shadow)
> +{
> +	unsigned long flags;
> +
> +	kasan_start_report(&flags);
> +
> +	pr_err("BUG: Double free or corrupt pointer\n");
> +	pr_err("Unexpected shadow byte: 0x%hhX\n", shadow);
> +
> +	dump_stack();
> +	pr_err("Object at %p, in cache %s\n", object, cache->name);
> +	get_alloc_info(cache, object);
> +	pr_err("Allocation:\n");
> +	print_track(&get_alloc_info(cache, object)->track);
> +	pr_err("Deallocation:\n");
> +	print_track(&free_stack);
> +
> +	kasan_end_report(&flags);
>  }
> 
>  void kasan_report(unsigned long addr, size_t size,
> diff --git a/mm/slab.c b/mm/slab.c
> index 763096a..65c942b 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -2604,9 +2604,11 @@ static void cache_init_objs(struct kmem_cache
> *cachep,
>  	}
> 
>  	for (i = 0; i < cachep->num; i++) {
> +		objp = index_to_obj(cachep, page, i);
> +		kasan_init_slab_obj(cachep, objp);
> +
>  		/* constructor could break poison info */
>  		if (DEBUG == 0 && cachep->ctor) {
> -			objp = index_to_obj(cachep, page, i);
>  			kasan_unpoison_object_data(cachep, objp);
>  			cachep->ctor(objp);
>  			kasan_poison_object_data(cachep, objp);

Nice hack & novel approach. It does have the flaw that subsequent error reports
for object will have a bogus deallocation track.

Kuthonuzo

  parent reply	other threads:[~2016-06-14  7:18 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-07 18:03 [PATCH v5 1/2] mm, kasan: improve double-free detection Kuthonuzo Luruo
2016-06-09 13:32 ` Alexander Potapenko
2016-06-09 16:54   ` Luruo, Kuthonuzo
2016-06-09 17:00 ` Andrey Ryabinin
2016-06-10 17:03   ` Andrey Ryabinin
2016-06-10 17:09     ` Dmitry Vyukov
2016-06-15 16:18       ` Andrey Ryabinin
2016-06-13 11:56     ` Alexander Potapenko
2016-06-14  6:46     ` Luruo, Kuthonuzo [this message]
2016-06-15 16:38       ` Andrey Ryabinin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DF4PR84MB00893B1ADEC50ED3E4891FC6F4540@DF4PR84MB0089.NAMPRD84.PROD.OUTLOOK.COM \
    --to=kuthonuzo.luruo@hpe.com \
    --cc=akpm@linux-foundation.org \
    --cc=aryabinin@virtuozzo.com \
    --cc=cl@linux.com \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=ynorov@caviumnetworks.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).