linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] kasan: support vmalloc backing of vm_map_ram()
@ 2019-11-29 15:45 Daniel Axtens
  2019-12-04 12:01 ` Daniel Axtens
  2019-12-04 20:44 ` Andrey Ryabinin
  0 siblings, 2 replies; 3+ messages in thread
From: Daniel Axtens @ 2019-11-29 15:45 UTC (permalink / raw)
  To: kasan-dev, linux-mm, x86, aryabinin, glider, linux-kernel, dvyukov
  Cc: Daniel Axtens, Qian Cai

This fixes some crashes in xfs, binder and the i915 mock_selftests,
with kasan vmalloc, where no shadow space was being allocated when
vm_map_ram was called.

vm_map_ram has two paths, a path that uses vmap_block and a path
that uses alloc_vmap_area. The alloc_vmap_area path is straight-forward,
we handle it like most other allocations.

For the vmap_block case, we map a shadow for the entire vmap_block
when the block is allocated, and unpoison it piecewise in vm_map_ram().
It already gets cleaned up when the block is released in the lazy vmap
area freeing path.

For both cases, we need to tweak the interface to allow for vmalloc
addresses that don't have an attached vm_struct.

Reported-by: Dmitry Vyukov <dvyukov@google.com>
Cc: Qian Cai <cai@lca.pw>
Thanks-to: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Daniel Axtens <dja@axtens.net>
---
 include/linux/kasan.h |  6 ++++++
 mm/kasan/common.c     | 37 +++++++++++++++++++++++--------------
 mm/vmalloc.c          | 24 ++++++++++++++++++++++++
 3 files changed, 53 insertions(+), 14 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 4f404c565db1..0b50b59a8ff5 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -207,6 +207,7 @@ static inline void *kasan_reset_tag(const void *addr)
 #ifdef CONFIG_KASAN_VMALLOC
 int kasan_populate_vmalloc(unsigned long requested_size,
 			   struct vm_struct *area);
+int kasan_populate_vmalloc_area(unsigned long size, void *addr);
 void kasan_poison_vmalloc(void *start, unsigned long size);
 void kasan_release_vmalloc(unsigned long start, unsigned long end,
 			   unsigned long free_region_start,
@@ -218,6 +219,11 @@ static inline int kasan_populate_vmalloc(unsigned long requested_size,
 	return 0;
 }
 
+static inline int kasan_populate_vmalloc_area(unsigned long size, void *addr)
+{
+	return 0;
+}
+
 static inline void kasan_poison_vmalloc(void *start, unsigned long size) {}
 static inline void kasan_release_vmalloc(unsigned long start,
 					 unsigned long end,
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index df3371d5c572..27d8522ffaad 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -779,27 +779,15 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
 
 int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
 {
-	unsigned long shadow_start, shadow_end;
 	int ret;
-
-	shadow_start = (unsigned long)kasan_mem_to_shadow(area->addr);
-	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
-	shadow_end = (unsigned long)kasan_mem_to_shadow(area->addr +
-							area->size);
-	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
-
-	ret = apply_to_page_range(&init_mm, shadow_start,
-				  shadow_end - shadow_start,
-				  kasan_populate_vmalloc_pte, NULL);
+	ret = kasan_populate_vmalloc_area(area->size, area->addr);
 	if (ret)
 		return ret;
 
-	flush_cache_vmap(shadow_start, shadow_end);
+	area->flags |= VM_KASAN;
 
 	kasan_unpoison_shadow(area->addr, requested_size);
 
-	area->flags |= VM_KASAN;
-
 	/*
 	 * We need to be careful about inter-cpu effects here. Consider:
 	 *
@@ -838,6 +826,27 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
 	return 0;
 }
 
+int kasan_populate_vmalloc_area(unsigned long size, void *addr)
+{
+	unsigned long shadow_start, shadow_end;
+	int ret;
+
+	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
+	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
+	shadow_end = (unsigned long)kasan_mem_to_shadow(addr + size);
+	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
+
+	ret = apply_to_page_range(&init_mm, shadow_start,
+				  shadow_end - shadow_start,
+				  kasan_populate_vmalloc_pte, NULL);
+	if (ret)
+		return ret;
+
+	flush_cache_vmap(shadow_start, shadow_end);
+
+	return 0;
+}
+
 /*
  * Poison the shadow for a vmalloc region. Called as part of the
  * freeing process at the time the region is freed.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index bf030516258c..2896189e351f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1509,6 +1509,13 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
 		return ERR_CAST(va);
 	}
 
+	err = kasan_populate_vmalloc_area(VMAP_BLOCK_SIZE, va->va_start);
+	if (unlikely(err)) {
+		kfree(vb);
+		free_vmap_area(va);
+		return ERR_PTR(err);
+	}
+
 	err = radix_tree_preload(gfp_mask);
 	if (unlikely(err)) {
 		kfree(vb);
@@ -1554,6 +1561,7 @@ static void free_vmap_block(struct vmap_block *vb)
 	spin_unlock(&vmap_block_tree_lock);
 	BUG_ON(tmp != vb);
 
+	/* free_vmap_area will take care of freeing the shadow */
 	free_vmap_area_noflush(vb->va);
 	kfree_rcu(vb, rcu_head);
 }
@@ -1780,6 +1788,8 @@ void vm_unmap_ram(const void *mem, unsigned int count)
 	if (likely(count <= VMAP_MAX_ALLOC)) {
 		debug_check_no_locks_freed(mem, size);
 		vb_free(mem, size);
+		kasan_poison_vmalloc(mem, size);
+
 		return;
 	}
 
@@ -1787,6 +1797,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
 	BUG_ON(!va);
 	debug_check_no_locks_freed((void *)va->va_start,
 				    (va->va_end - va->va_start));
+	/* vmap area purging will clean up the KASAN shadow later */
 	free_unmap_vmap_area(va);
 }
 EXPORT_SYMBOL(vm_unmap_ram);
@@ -1817,6 +1828,11 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
 		if (IS_ERR(mem))
 			return NULL;
 		addr = (unsigned long)mem;
+
+		/*
+		 * We don't need to call kasan_populate_vmalloc_area here, as
+		 * it's done at block allocation time.
+		 */
 	} else {
 		struct vmap_area *va;
 		va = alloc_vmap_area(size, PAGE_SIZE,
@@ -1826,7 +1842,15 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
 
 		addr = va->va_start;
 		mem = (void *)addr;
+
+		if (kasan_populate_vmalloc_area(size, mem)) {
+			vm_unmap_ram(mem, count);
+			return NULL;
+		}
 	}
+
+	kasan_unpoison_shadow(mem, size);
+
 	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
 		vm_unmap_ram(mem, count);
 		return NULL;
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] kasan: support vmalloc backing of vm_map_ram()
  2019-11-29 15:45 [PATCH] kasan: support vmalloc backing of vm_map_ram() Daniel Axtens
@ 2019-12-04 12:01 ` Daniel Axtens
  2019-12-04 20:44 ` Andrey Ryabinin
  1 sibling, 0 replies; 3+ messages in thread
From: Daniel Axtens @ 2019-12-04 12:01 UTC (permalink / raw)
  To: kasan-dev, linux-mm, x86, aryabinin, glider, linux-kernel, dvyukov
  Cc: Qian Cai

I've realised this throws a few compile warnings, I'll respin it.

Daniel Axtens <dja@axtens.net> writes:

> This fixes some crashes in xfs, binder and the i915 mock_selftests,
> with kasan vmalloc, where no shadow space was being allocated when
> vm_map_ram was called.
>
> vm_map_ram has two paths, a path that uses vmap_block and a path
> that uses alloc_vmap_area. The alloc_vmap_area path is straight-forward,
> we handle it like most other allocations.
>
> For the vmap_block case, we map a shadow for the entire vmap_block
> when the block is allocated, and unpoison it piecewise in vm_map_ram().
> It already gets cleaned up when the block is released in the lazy vmap
> area freeing path.
>
> For both cases, we need to tweak the interface to allow for vmalloc
> addresses that don't have an attached vm_struct.
>
> Reported-by: Dmitry Vyukov <dvyukov@google.com>
> Cc: Qian Cai <cai@lca.pw>
> Thanks-to: Andrey Ryabinin <aryabinin@virtuozzo.com>
> Signed-off-by: Daniel Axtens <dja@axtens.net>
> ---
>  include/linux/kasan.h |  6 ++++++
>  mm/kasan/common.c     | 37 +++++++++++++++++++++++--------------
>  mm/vmalloc.c          | 24 ++++++++++++++++++++++++
>  3 files changed, 53 insertions(+), 14 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 4f404c565db1..0b50b59a8ff5 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -207,6 +207,7 @@ static inline void *kasan_reset_tag(const void *addr)
>  #ifdef CONFIG_KASAN_VMALLOC
>  int kasan_populate_vmalloc(unsigned long requested_size,
>  			   struct vm_struct *area);
> +int kasan_populate_vmalloc_area(unsigned long size, void *addr);
>  void kasan_poison_vmalloc(void *start, unsigned long size);
>  void kasan_release_vmalloc(unsigned long start, unsigned long end,
>  			   unsigned long free_region_start,
> @@ -218,6 +219,11 @@ static inline int kasan_populate_vmalloc(unsigned long requested_size,
>  	return 0;
>  }
>  
> +static inline int kasan_populate_vmalloc_area(unsigned long size, void *addr)
> +{
> +	return 0;
> +}
> +
>  static inline void kasan_poison_vmalloc(void *start, unsigned long size) {}
>  static inline void kasan_release_vmalloc(unsigned long start,
>  					 unsigned long end,
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index df3371d5c572..27d8522ffaad 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -779,27 +779,15 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
>  
>  int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
>  {
> -	unsigned long shadow_start, shadow_end;
>  	int ret;
> -
> -	shadow_start = (unsigned long)kasan_mem_to_shadow(area->addr);
> -	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
> -	shadow_end = (unsigned long)kasan_mem_to_shadow(area->addr +
> -							area->size);
> -	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
> -
> -	ret = apply_to_page_range(&init_mm, shadow_start,
> -				  shadow_end - shadow_start,
> -				  kasan_populate_vmalloc_pte, NULL);
> +	ret = kasan_populate_vmalloc_area(area->size, area->addr);
>  	if (ret)
>  		return ret;
>  
> -	flush_cache_vmap(shadow_start, shadow_end);
> +	area->flags |= VM_KASAN;
>  
>  	kasan_unpoison_shadow(area->addr, requested_size);
>  
> -	area->flags |= VM_KASAN;
> -
>  	/*
>  	 * We need to be careful about inter-cpu effects here. Consider:
>  	 *
> @@ -838,6 +826,27 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
>  	return 0;
>  }
>  
> +int kasan_populate_vmalloc_area(unsigned long size, void *addr)
> +{
> +	unsigned long shadow_start, shadow_end;
> +	int ret;
> +
> +	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
> +	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
> +	shadow_end = (unsigned long)kasan_mem_to_shadow(addr + size);
> +	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
> +
> +	ret = apply_to_page_range(&init_mm, shadow_start,
> +				  shadow_end - shadow_start,
> +				  kasan_populate_vmalloc_pte, NULL);
> +	if (ret)
> +		return ret;
> +
> +	flush_cache_vmap(shadow_start, shadow_end);
> +
> +	return 0;
> +}
> +
>  /*
>   * Poison the shadow for a vmalloc region. Called as part of the
>   * freeing process at the time the region is freed.
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index bf030516258c..2896189e351f 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1509,6 +1509,13 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
>  		return ERR_CAST(va);
>  	}
>  
> +	err = kasan_populate_vmalloc_area(VMAP_BLOCK_SIZE, va->va_start);
> +	if (unlikely(err)) {
> +		kfree(vb);
> +		free_vmap_area(va);
> +		return ERR_PTR(err);
> +	}
> +
>  	err = radix_tree_preload(gfp_mask);
>  	if (unlikely(err)) {
>  		kfree(vb);
> @@ -1554,6 +1561,7 @@ static void free_vmap_block(struct vmap_block *vb)
>  	spin_unlock(&vmap_block_tree_lock);
>  	BUG_ON(tmp != vb);
>  
> +	/* free_vmap_area will take care of freeing the shadow */
>  	free_vmap_area_noflush(vb->va);
>  	kfree_rcu(vb, rcu_head);
>  }
> @@ -1780,6 +1788,8 @@ void vm_unmap_ram(const void *mem, unsigned int count)
>  	if (likely(count <= VMAP_MAX_ALLOC)) {
>  		debug_check_no_locks_freed(mem, size);
>  		vb_free(mem, size);
> +		kasan_poison_vmalloc(mem, size);
> +
>  		return;
>  	}
>  
> @@ -1787,6 +1797,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
>  	BUG_ON(!va);
>  	debug_check_no_locks_freed((void *)va->va_start,
>  				    (va->va_end - va->va_start));
> +	/* vmap area purging will clean up the KASAN shadow later */
>  	free_unmap_vmap_area(va);
>  }
>  EXPORT_SYMBOL(vm_unmap_ram);
> @@ -1817,6 +1828,11 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
>  		if (IS_ERR(mem))
>  			return NULL;
>  		addr = (unsigned long)mem;
> +
> +		/*
> +		 * We don't need to call kasan_populate_vmalloc_area here, as
> +		 * it's done at block allocation time.
> +		 */
>  	} else {
>  		struct vmap_area *va;
>  		va = alloc_vmap_area(size, PAGE_SIZE,
> @@ -1826,7 +1842,15 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
>  
>  		addr = va->va_start;
>  		mem = (void *)addr;
> +
> +		if (kasan_populate_vmalloc_area(size, mem)) {
> +			vm_unmap_ram(mem, count);
> +			return NULL;
> +		}
>  	}
> +
> +	kasan_unpoison_shadow(mem, size);
> +
>  	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
>  		vm_unmap_ram(mem, count);
>  		return NULL;
> -- 
> 2.20.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] kasan: support vmalloc backing of vm_map_ram()
  2019-11-29 15:45 [PATCH] kasan: support vmalloc backing of vm_map_ram() Daniel Axtens
  2019-12-04 12:01 ` Daniel Axtens
@ 2019-12-04 20:44 ` Andrey Ryabinin
  1 sibling, 0 replies; 3+ messages in thread
From: Andrey Ryabinin @ 2019-12-04 20:44 UTC (permalink / raw)
  To: Daniel Axtens, kasan-dev, linux-mm, x86, glider, linux-kernel, dvyukov
  Cc: Qian Cai



On 11/29/19 6:45 PM, Daniel Axtens wrote:
> @@ -1826,7 +1842,15 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
>  
>  		addr = va->va_start;
>  		mem = (void *)addr;
> +
> +		if (kasan_populate_vmalloc_area(size, mem)) {
> +			vm_unmap_ram(mem, count);
> +			return NULL;
> +		}
>  	}
> +
> +	kasan_unpoison_shadow(mem, size);
> +

This probably gonna explode on CONFIG_KASAN=y && CONFIG_KASAN_VMALLOC=n

I've sent alternative patch which fixes vm_map_ram() and also makes the code a bit easier to follow in my opinion.

>  	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
>  		vm_unmap_ram(mem, count);
>  		return NULL;
> 


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-12-04 20:46 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-29 15:45 [PATCH] kasan: support vmalloc backing of vm_map_ram() Daniel Axtens
2019-12-04 12:01 ` Daniel Axtens
2019-12-04 20:44 ` Andrey Ryabinin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).