linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/vmalloc: unbreak kasan vmalloc support
@ 2021-06-17  8:13 Daniel Axtens
  2021-06-17  8:43 ` David Gow
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Daniel Axtens @ 2021-06-17  8:13 UTC (permalink / raw)
  To: linux-kernel, linux-mm, kasan-dev, akpm
  Cc: Daniel Axtens, Nicholas Piggin, David Gow, Dmitry Vyukov,
	Andrey Konovalov, Uladzislau Rezki

In commit 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings"),
__vmalloc_node_range was changed such that __get_vm_area_node was no
longer called with the requested/real size of the vmalloc allocation, but
rather with a rounded-up size.

This means that __get_vm_area_node called kasan_unpoision_vmalloc() with
a rounded up size rather than the real size. This led to it allowing
access to too much memory and so missing vmalloc OOBs and failing the
kasan kunit tests.

Pass the real size and the desired shift into __get_vm_area_node. This
allows it to round up the size for the underlying allocators while
still unpoisioning the correct quantity of shadow memory.

Adjust the other call-sites to pass in PAGE_SHIFT for the shift value.

Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: David Gow <davidgow@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=213335
Fixes: 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings")
Signed-off-by: Daniel Axtens <dja@axtens.net>
---
 mm/vmalloc.c | 24 ++++++++++++++----------
 1 file changed, 14 insertions(+), 10 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index aaad569e8963..3471cbeb083c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2362,15 +2362,16 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
 }
 
 static struct vm_struct *__get_vm_area_node(unsigned long size,
-		unsigned long align, unsigned long flags, unsigned long start,
-		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
+		unsigned long align, unsigned long shift, unsigned long flags,
+		unsigned long start, unsigned long end, int node,
+		gfp_t gfp_mask, const void *caller)
 {
 	struct vmap_area *va;
 	struct vm_struct *area;
 	unsigned long requested_size = size;
 
 	BUG_ON(in_interrupt());
-	size = PAGE_ALIGN(size);
+	size = ALIGN(size, 1ul << shift);
 	if (unlikely(!size))
 		return NULL;
 
@@ -2402,8 +2403,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
 				       unsigned long start, unsigned long end,
 				       const void *caller)
 {
-	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
-				  GFP_KERNEL, caller);
+	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
+				  NUMA_NO_NODE, GFP_KERNEL, caller);
 }
 
 /**
@@ -2419,7 +2420,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
  */
 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
 {
-	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
+				  VMALLOC_START, VMALLOC_END,
 				  NUMA_NO_NODE, GFP_KERNEL,
 				  __builtin_return_address(0));
 }
@@ -2427,7 +2429,8 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
 				const void *caller)
 {
-	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
+				  VMALLOC_START, VMALLOC_END,
 				  NUMA_NO_NODE, GFP_KERNEL, caller);
 }
 
@@ -2949,9 +2952,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
 	}
 
 again:
-	size = PAGE_ALIGN(size);
-	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
-				vm_flags, start, end, node, gfp_mask, caller);
+	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
+				  VM_UNINITIALIZED | vm_flags, start, end, node,
+				  gfp_mask, caller);
 	if (!area) {
 		warn_alloc(gfp_mask, NULL,
 			"vmalloc error: size %lu, vm_struct allocation failed",
@@ -2970,6 +2973,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
 	 */
 	clear_vm_uninitialized_flag(area);
 
+	size = PAGE_ALIGN(size);
 	kmemleak_vmalloc(area, size, gfp_mask);
 
 	return addr;
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] mm/vmalloc: unbreak kasan vmalloc support
  2021-06-17  8:13 [PATCH] mm/vmalloc: unbreak kasan vmalloc support Daniel Axtens
@ 2021-06-17  8:43 ` David Gow
  2021-06-17  9:40 ` Nicholas Piggin
  2021-06-20 11:44 ` Andrey Konovalov
  2 siblings, 0 replies; 5+ messages in thread
From: David Gow @ 2021-06-17  8:43 UTC (permalink / raw)
  To: Daniel Axtens
  Cc: Linux Kernel Mailing List, Linux Memory Management List,
	kasan-dev, Andrew Morton, Nicholas Piggin, Dmitry Vyukov,
	Andrey Konovalov, Uladzislau Rezki

On Thu, Jun 17, 2021 at 4:13 PM Daniel Axtens <dja@axtens.net> wrote:
>
> In commit 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings"),
> __vmalloc_node_range was changed such that __get_vm_area_node was no
> longer called with the requested/real size of the vmalloc allocation, but
> rather with a rounded-up size.
>
> This means that __get_vm_area_node called kasan_unpoision_vmalloc() with
> a rounded up size rather than the real size. This led to it allowing
> access to too much memory and so missing vmalloc OOBs and failing the
> kasan kunit tests.
>
> Pass the real size and the desired shift into __get_vm_area_node. This
> allows it to round up the size for the underlying allocators while
> still unpoisioning the correct quantity of shadow memory.
>
> Adjust the other call-sites to pass in PAGE_SHIFT for the shift value.
>
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: David Gow <davidgow@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
> Link: https://bugzilla.kernel.org/show_bug.cgi?id=213335
> Fixes: 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings")
> Signed-off-by: Daniel Axtens <dja@axtens.net>
> ---

This fixes the KUnit test failure I was seeing on x86_64, thanks!

Tested-by: David Gow <davidgow@google.com>

Cheers,
-- David


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] mm/vmalloc: unbreak kasan vmalloc support
  2021-06-17  8:13 [PATCH] mm/vmalloc: unbreak kasan vmalloc support Daniel Axtens
  2021-06-17  8:43 ` David Gow
@ 2021-06-17  9:40 ` Nicholas Piggin
  2021-06-19 13:02   ` Uladzislau Rezki
  2021-06-20 11:44 ` Andrey Konovalov
  2 siblings, 1 reply; 5+ messages in thread
From: Nicholas Piggin @ 2021-06-17  9:40 UTC (permalink / raw)
  To: akpm, Daniel Axtens, kasan-dev, linux-kernel, linux-mm
  Cc: Andrey Konovalov, David Gow, Dmitry Vyukov, Uladzislau Rezki

Excerpts from Daniel Axtens's message of June 17, 2021 6:13 pm:
> In commit 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings"),
> __vmalloc_node_range was changed such that __get_vm_area_node was no
> longer called with the requested/real size of the vmalloc allocation, but
> rather with a rounded-up size.
> 
> This means that __get_vm_area_node called kasan_unpoision_vmalloc() with
> a rounded up size rather than the real size. This led to it allowing
> access to too much memory and so missing vmalloc OOBs and failing the
> kasan kunit tests.
> 
> Pass the real size and the desired shift into __get_vm_area_node. This
> allows it to round up the size for the underlying allocators while
> still unpoisioning the correct quantity of shadow memory.
> 
> Adjust the other call-sites to pass in PAGE_SHIFT for the shift value.
> 
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: David Gow <davidgow@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
> Link: https://bugzilla.kernel.org/show_bug.cgi?id=213335
> Fixes: 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings")

Thanks Daniel, good debugging.

Reviewed-by: Nicholas Piggin <npiggin@gmail.com>

> Signed-off-by: Daniel Axtens <dja@axtens.net>
> ---
>  mm/vmalloc.c | 24 ++++++++++++++----------
>  1 file changed, 14 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index aaad569e8963..3471cbeb083c 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2362,15 +2362,16 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
>  }
>  
>  static struct vm_struct *__get_vm_area_node(unsigned long size,
> -		unsigned long align, unsigned long flags, unsigned long start,
> -		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
> +		unsigned long align, unsigned long shift, unsigned long flags,
> +		unsigned long start, unsigned long end, int node,
> +		gfp_t gfp_mask, const void *caller)
>  {
>  	struct vmap_area *va;
>  	struct vm_struct *area;
>  	unsigned long requested_size = size;
>  
>  	BUG_ON(in_interrupt());
> -	size = PAGE_ALIGN(size);
> +	size = ALIGN(size, 1ul << shift);
>  	if (unlikely(!size))
>  		return NULL;
>  
> @@ -2402,8 +2403,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
>  				       unsigned long start, unsigned long end,
>  				       const void *caller)
>  {
> -	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
> -				  GFP_KERNEL, caller);
> +	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
> +				  NUMA_NO_NODE, GFP_KERNEL, caller);
>  }
>  
>  /**
> @@ -2419,7 +2420,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
>   */
>  struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
>  {
> -	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
> +	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
> +				  VMALLOC_START, VMALLOC_END,
>  				  NUMA_NO_NODE, GFP_KERNEL,
>  				  __builtin_return_address(0));
>  }
> @@ -2427,7 +2429,8 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
>  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
>  				const void *caller)
>  {
> -	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
> +	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
> +				  VMALLOC_START, VMALLOC_END,
>  				  NUMA_NO_NODE, GFP_KERNEL, caller);
>  }
>  
> @@ -2949,9 +2952,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
>  	}
>  
>  again:
> -	size = PAGE_ALIGN(size);
> -	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
> -				vm_flags, start, end, node, gfp_mask, caller);
> +	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
> +				  VM_UNINITIALIZED | vm_flags, start, end, node,
> +				  gfp_mask, caller);
>  	if (!area) {
>  		warn_alloc(gfp_mask, NULL,
>  			"vmalloc error: size %lu, vm_struct allocation failed",
> @@ -2970,6 +2973,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
>  	 */
>  	clear_vm_uninitialized_flag(area);
>  
> +	size = PAGE_ALIGN(size);
>  	kmemleak_vmalloc(area, size, gfp_mask);
>  
>  	return addr;
> -- 
> 2.30.2
> 
> 


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] mm/vmalloc: unbreak kasan vmalloc support
  2021-06-17  9:40 ` Nicholas Piggin
@ 2021-06-19 13:02   ` Uladzislau Rezki
  0 siblings, 0 replies; 5+ messages in thread
From: Uladzislau Rezki @ 2021-06-19 13:02 UTC (permalink / raw)
  To: Daniel Axtens
  Cc: akpm, Daniel Axtens, kasan-dev, linux-kernel, linux-mm,
	Andrey Konovalov, David Gow, Dmitry Vyukov, Uladzislau Rezki

On Thu, Jun 17, 2021 at 07:40:49PM +1000, Nicholas Piggin wrote:
> Excerpts from Daniel Axtens's message of June 17, 2021 6:13 pm:
> > In commit 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings"),
> > __vmalloc_node_range was changed such that __get_vm_area_node was no
> > longer called with the requested/real size of the vmalloc allocation, but
> > rather with a rounded-up size.
> > 
> > This means that __get_vm_area_node called kasan_unpoision_vmalloc() with
> > a rounded up size rather than the real size. This led to it allowing
> > access to too much memory and so missing vmalloc OOBs and failing the
> > kasan kunit tests.
> > 
> > Pass the real size and the desired shift into __get_vm_area_node. This
> > allows it to round up the size for the underlying allocators while
> > still unpoisioning the correct quantity of shadow memory.
> > 
> > Adjust the other call-sites to pass in PAGE_SHIFT for the shift value.
> > 
> > Cc: Nicholas Piggin <npiggin@gmail.com>
> > Cc: David Gow <davidgow@google.com>
> > Cc: Dmitry Vyukov <dvyukov@google.com>
> > Cc: Andrey Konovalov <andreyknvl@gmail.com>
> > Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > Link: https://bugzilla.kernel.org/show_bug.cgi?id=213335
> > Fixes: 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings")
> 
> Thanks Daniel, good debugging.
> 
> Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
> 
> > Signed-off-by: Daniel Axtens <dja@axtens.net>
> > ---
> >  mm/vmalloc.c | 24 ++++++++++++++----------
> >  1 file changed, 14 insertions(+), 10 deletions(-)
> > 
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index aaad569e8963..3471cbeb083c 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -2362,15 +2362,16 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
> >  }
> >  
> >  static struct vm_struct *__get_vm_area_node(unsigned long size,
> > -		unsigned long align, unsigned long flags, unsigned long start,
> > -		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
> > +		unsigned long align, unsigned long shift, unsigned long flags,
> > +		unsigned long start, unsigned long end, int node,
> > +		gfp_t gfp_mask, const void *caller)
> >  {
> >  	struct vmap_area *va;
> >  	struct vm_struct *area;
> >  	unsigned long requested_size = size;
> >  
> >  	BUG_ON(in_interrupt());
> > -	size = PAGE_ALIGN(size);
> > +	size = ALIGN(size, 1ul << shift);
> >  	if (unlikely(!size))
> >  		return NULL;
> >  
> > @@ -2402,8 +2403,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
> >  				       unsigned long start, unsigned long end,
> >  				       const void *caller)
> >  {
> > -	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
> > -				  GFP_KERNEL, caller);
> > +	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
> > +				  NUMA_NO_NODE, GFP_KERNEL, caller);
> >  }
> >  
> >  /**
> > @@ -2419,7 +2420,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
> >   */
> >  struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
> >  {
> > -	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
> > +	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
> > +				  VMALLOC_START, VMALLOC_END,
> >  				  NUMA_NO_NODE, GFP_KERNEL,
> >  				  __builtin_return_address(0));
> >  }
> > @@ -2427,7 +2429,8 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
> >  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
> >  				const void *caller)
> >  {
> > -	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
> > +	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
> > +				  VMALLOC_START, VMALLOC_END,
> >  				  NUMA_NO_NODE, GFP_KERNEL, caller);
> >  }
> >  
> > @@ -2949,9 +2952,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
> >  	}
> >  
> >  again:
> > -	size = PAGE_ALIGN(size);
> > -	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
> > -				vm_flags, start, end, node, gfp_mask, caller);
> > +	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
> > +				  VM_UNINITIALIZED | vm_flags, start, end, node,
> > +				  gfp_mask, caller);
> >  	if (!area) {
> >  		warn_alloc(gfp_mask, NULL,
> >  			"vmalloc error: size %lu, vm_struct allocation failed",
> > @@ -2970,6 +2973,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
> >  	 */
> >  	clear_vm_uninitialized_flag(area);
> >  
> > +	size = PAGE_ALIGN(size);
> >  	kmemleak_vmalloc(area, size, gfp_mask);
> >  
> >  	return addr;
> > -- 
> > 2.30.2
> > 
> > 
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

Indeed hugepage mapping was broken in regard to KASAN. 

Thanks!

--
Vlad Rezki


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] mm/vmalloc: unbreak kasan vmalloc support
  2021-06-17  8:13 [PATCH] mm/vmalloc: unbreak kasan vmalloc support Daniel Axtens
  2021-06-17  8:43 ` David Gow
  2021-06-17  9:40 ` Nicholas Piggin
@ 2021-06-20 11:44 ` Andrey Konovalov
  2 siblings, 0 replies; 5+ messages in thread
From: Andrey Konovalov @ 2021-06-20 11:44 UTC (permalink / raw)
  To: Daniel Axtens
  Cc: LKML, Linux Memory Management List, kasan-dev, Andrew Morton,
	Nicholas Piggin, David Gow, Dmitry Vyukov, Uladzislau Rezki

On Thu, Jun 17, 2021 at 11:13 AM Daniel Axtens <dja@axtens.net> wrote:
>
> In commit 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings"),
> __vmalloc_node_range was changed such that __get_vm_area_node was no
> longer called with the requested/real size of the vmalloc allocation, but
> rather with a rounded-up size.
>
> This means that __get_vm_area_node called kasan_unpoision_vmalloc() with
> a rounded up size rather than the real size. This led to it allowing
> access to too much memory and so missing vmalloc OOBs and failing the
> kasan kunit tests.
>
> Pass the real size and the desired shift into __get_vm_area_node. This
> allows it to round up the size for the underlying allocators while
> still unpoisioning the correct quantity of shadow memory.
>
> Adjust the other call-sites to pass in PAGE_SHIFT for the shift value.
>
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: David Gow <davidgow@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
> Link: https://bugzilla.kernel.org/show_bug.cgi?id=213335
> Fixes: 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings")
> Signed-off-by: Daniel Axtens <dja@axtens.net>
> ---
>  mm/vmalloc.c | 24 ++++++++++++++----------
>  1 file changed, 14 insertions(+), 10 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index aaad569e8963..3471cbeb083c 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2362,15 +2362,16 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
>  }
>
>  static struct vm_struct *__get_vm_area_node(unsigned long size,
> -               unsigned long align, unsigned long flags, unsigned long start,
> -               unsigned long end, int node, gfp_t gfp_mask, const void *caller)
> +               unsigned long align, unsigned long shift, unsigned long flags,
> +               unsigned long start, unsigned long end, int node,
> +               gfp_t gfp_mask, const void *caller)
>  {
>         struct vmap_area *va;
>         struct vm_struct *area;
>         unsigned long requested_size = size;
>
>         BUG_ON(in_interrupt());
> -       size = PAGE_ALIGN(size);
> +       size = ALIGN(size, 1ul << shift);
>         if (unlikely(!size))
>                 return NULL;
>
> @@ -2402,8 +2403,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
>                                        unsigned long start, unsigned long end,
>                                        const void *caller)
>  {
> -       return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
> -                                 GFP_KERNEL, caller);
> +       return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
> +                                 NUMA_NO_NODE, GFP_KERNEL, caller);
>  }
>
>  /**
> @@ -2419,7 +2420,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
>   */
>  struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
>  {
> -       return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
> +       return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
> +                                 VMALLOC_START, VMALLOC_END,
>                                   NUMA_NO_NODE, GFP_KERNEL,
>                                   __builtin_return_address(0));
>  }
> @@ -2427,7 +2429,8 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
>  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
>                                 const void *caller)
>  {
> -       return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
> +       return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
> +                                 VMALLOC_START, VMALLOC_END,
>                                   NUMA_NO_NODE, GFP_KERNEL, caller);
>  }
>
> @@ -2949,9 +2952,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
>         }
>
>  again:
> -       size = PAGE_ALIGN(size);
> -       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
> -                               vm_flags, start, end, node, gfp_mask, caller);
> +       area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
> +                                 VM_UNINITIALIZED | vm_flags, start, end, node,
> +                                 gfp_mask, caller);
>         if (!area) {
>                 warn_alloc(gfp_mask, NULL,
>                         "vmalloc error: size %lu, vm_struct allocation failed",
> @@ -2970,6 +2973,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
>          */
>         clear_vm_uninitialized_flag(area);
>
> +       size = PAGE_ALIGN(size);
>         kmemleak_vmalloc(area, size, gfp_mask);
>
>         return addr;
> --
> 2.30.2
>

This fixes the vmalloc_oob test for me. Thank you, Daniel!

Tested-by: Andrey Konovalov <andreyknvl@gmail.com>
Acked-by: Andrey Konovalov <andreyknvl@gmail.com>


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-06-20 11:44 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-17  8:13 [PATCH] mm/vmalloc: unbreak kasan vmalloc support Daniel Axtens
2021-06-17  8:43 ` David Gow
2021-06-17  9:40 ` Nicholas Piggin
2021-06-19 13:02   ` Uladzislau Rezki
2021-06-20 11:44 ` Andrey Konovalov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).