linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Daniel Axtens <dja@axtens.net>
To: kasan-dev@googlegroups.com, linux-mm@kvack.org, x86@kernel.org,
	aryabinin@virtuozzo.com, glider@google.com,
	linux-kernel@vger.kernel.org, dvyukov@google.com
Cc: Qian Cai <cai@lca.pw>
Subject: Re: [PATCH] kasan: support vmalloc backing of vm_map_ram()
Date: Wed, 04 Dec 2019 23:01:02 +1100	[thread overview]
Message-ID: <87h82ge1vl.fsf@dja-thinkpad.axtens.net> (raw)
In-Reply-To: <20191129154519.30964-1-dja@axtens.net>

I've realised this throws a few compile warnings, I'll respin it.

Daniel Axtens <dja@axtens.net> writes:

> This fixes some crashes in xfs, binder and the i915 mock_selftests,
> with kasan vmalloc, where no shadow space was being allocated when
> vm_map_ram was called.
>
> vm_map_ram has two paths, a path that uses vmap_block and a path
> that uses alloc_vmap_area. The alloc_vmap_area path is straight-forward,
> we handle it like most other allocations.
>
> For the vmap_block case, we map a shadow for the entire vmap_block
> when the block is allocated, and unpoison it piecewise in vm_map_ram().
> It already gets cleaned up when the block is released in the lazy vmap
> area freeing path.
>
> For both cases, we need to tweak the interface to allow for vmalloc
> addresses that don't have an attached vm_struct.
>
> Reported-by: Dmitry Vyukov <dvyukov@google.com>
> Cc: Qian Cai <cai@lca.pw>
> Thanks-to: Andrey Ryabinin <aryabinin@virtuozzo.com>
> Signed-off-by: Daniel Axtens <dja@axtens.net>
> ---
>  include/linux/kasan.h |  6 ++++++
>  mm/kasan/common.c     | 37 +++++++++++++++++++++++--------------
>  mm/vmalloc.c          | 24 ++++++++++++++++++++++++
>  3 files changed, 53 insertions(+), 14 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 4f404c565db1..0b50b59a8ff5 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -207,6 +207,7 @@ static inline void *kasan_reset_tag(const void *addr)
>  #ifdef CONFIG_KASAN_VMALLOC
>  int kasan_populate_vmalloc(unsigned long requested_size,
>  			   struct vm_struct *area);
> +int kasan_populate_vmalloc_area(unsigned long size, void *addr);
>  void kasan_poison_vmalloc(void *start, unsigned long size);
>  void kasan_release_vmalloc(unsigned long start, unsigned long end,
>  			   unsigned long free_region_start,
> @@ -218,6 +219,11 @@ static inline int kasan_populate_vmalloc(unsigned long requested_size,
>  	return 0;
>  }
>  
> +static inline int kasan_populate_vmalloc_area(unsigned long size, void *addr)
> +{
> +	return 0;
> +}
> +
>  static inline void kasan_poison_vmalloc(void *start, unsigned long size) {}
>  static inline void kasan_release_vmalloc(unsigned long start,
>  					 unsigned long end,
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index df3371d5c572..27d8522ffaad 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -779,27 +779,15 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
>  
>  int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
>  {
> -	unsigned long shadow_start, shadow_end;
>  	int ret;
> -
> -	shadow_start = (unsigned long)kasan_mem_to_shadow(area->addr);
> -	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
> -	shadow_end = (unsigned long)kasan_mem_to_shadow(area->addr +
> -							area->size);
> -	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
> -
> -	ret = apply_to_page_range(&init_mm, shadow_start,
> -				  shadow_end - shadow_start,
> -				  kasan_populate_vmalloc_pte, NULL);
> +	ret = kasan_populate_vmalloc_area(area->size, area->addr);
>  	if (ret)
>  		return ret;
>  
> -	flush_cache_vmap(shadow_start, shadow_end);
> +	area->flags |= VM_KASAN;
>  
>  	kasan_unpoison_shadow(area->addr, requested_size);
>  
> -	area->flags |= VM_KASAN;
> -
>  	/*
>  	 * We need to be careful about inter-cpu effects here. Consider:
>  	 *
> @@ -838,6 +826,27 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
>  	return 0;
>  }
>  
> +int kasan_populate_vmalloc_area(unsigned long size, void *addr)
> +{
> +	unsigned long shadow_start, shadow_end;
> +	int ret;
> +
> +	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
> +	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
> +	shadow_end = (unsigned long)kasan_mem_to_shadow(addr + size);
> +	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
> +
> +	ret = apply_to_page_range(&init_mm, shadow_start,
> +				  shadow_end - shadow_start,
> +				  kasan_populate_vmalloc_pte, NULL);
> +	if (ret)
> +		return ret;
> +
> +	flush_cache_vmap(shadow_start, shadow_end);
> +
> +	return 0;
> +}
> +
>  /*
>   * Poison the shadow for a vmalloc region. Called as part of the
>   * freeing process at the time the region is freed.
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index bf030516258c..2896189e351f 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1509,6 +1509,13 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
>  		return ERR_CAST(va);
>  	}
>  
> +	err = kasan_populate_vmalloc_area(VMAP_BLOCK_SIZE, va->va_start);
> +	if (unlikely(err)) {
> +		kfree(vb);
> +		free_vmap_area(va);
> +		return ERR_PTR(err);
> +	}
> +
>  	err = radix_tree_preload(gfp_mask);
>  	if (unlikely(err)) {
>  		kfree(vb);
> @@ -1554,6 +1561,7 @@ static void free_vmap_block(struct vmap_block *vb)
>  	spin_unlock(&vmap_block_tree_lock);
>  	BUG_ON(tmp != vb);
>  
> +	/* free_vmap_area will take care of freeing the shadow */
>  	free_vmap_area_noflush(vb->va);
>  	kfree_rcu(vb, rcu_head);
>  }
> @@ -1780,6 +1788,8 @@ void vm_unmap_ram(const void *mem, unsigned int count)
>  	if (likely(count <= VMAP_MAX_ALLOC)) {
>  		debug_check_no_locks_freed(mem, size);
>  		vb_free(mem, size);
> +		kasan_poison_vmalloc(mem, size);
> +
>  		return;
>  	}
>  
> @@ -1787,6 +1797,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
>  	BUG_ON(!va);
>  	debug_check_no_locks_freed((void *)va->va_start,
>  				    (va->va_end - va->va_start));
> +	/* vmap area purging will clean up the KASAN shadow later */
>  	free_unmap_vmap_area(va);
>  }
>  EXPORT_SYMBOL(vm_unmap_ram);
> @@ -1817,6 +1828,11 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
>  		if (IS_ERR(mem))
>  			return NULL;
>  		addr = (unsigned long)mem;
> +
> +		/*
> +		 * We don't need to call kasan_populate_vmalloc_area here, as
> +		 * it's done at block allocation time.
> +		 */
>  	} else {
>  		struct vmap_area *va;
>  		va = alloc_vmap_area(size, PAGE_SIZE,
> @@ -1826,7 +1842,15 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
>  
>  		addr = va->va_start;
>  		mem = (void *)addr;
> +
> +		if (kasan_populate_vmalloc_area(size, mem)) {
> +			vm_unmap_ram(mem, count);
> +			return NULL;
> +		}
>  	}
> +
> +	kasan_unpoison_shadow(mem, size);
> +
>  	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
>  		vm_unmap_ram(mem, count);
>  		return NULL;
> -- 
> 2.20.1


  reply	other threads:[~2019-12-04 12:01 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-29 15:45 [PATCH] kasan: support vmalloc backing of vm_map_ram() Daniel Axtens
2019-12-04 12:01 ` Daniel Axtens [this message]
2019-12-04 20:44 ` Andrey Ryabinin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87h82ge1vl.fsf@dja-thinkpad.axtens.net \
    --to=dja@axtens.net \
    --cc=aryabinin@virtuozzo.com \
    --cc=cai@lca.pw \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).