All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kasan, slub: fix handling of kasan_slab_free hook
@ 2018-02-23 15:53 ` Andrey Konovalov
  0 siblings, 0 replies; 10+ messages in thread
From: Andrey Konovalov @ 2018-02-23 15:53 UTC (permalink / raw)
  To: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, Andrey Ryabinin, Alexander Potapenko,
	Dmitry Vyukov, linux-mm, linux-kernel, kasan-dev
  Cc: Kostya Serebryany, Andrey Konovalov

The kasan_slab_free hook's return value denotes whether the reuse of a
slab object must be delayed (e.g. when the object is put into memory
qurantine).

The current way SLUB handles this hook is by ignoring its return value
and hardcoding checks similar (but not exactly the same) to the ones
performed in kasan_slab_free, which is prone to making mistakes.

This patch changes the way SLUB handles this by:
1. taking into account the return value of kasan_slab_free for each of
   the objects, that are being freed;
2. reconstructing the freelist of objects to exclude the ones, whose
   reuse must be delayed.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/slub.c | 52 ++++++++++++++++++++++++++++++----------------------
 1 file changed, 30 insertions(+), 22 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index e381728a3751..f111c2a908b9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1362,10 +1362,8 @@ static __always_inline void kfree_hook(void *x)
 	kasan_kfree_large(x, _RET_IP_);
 }
 
-static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
+static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
 {
-	void *freeptr;
-
 	kmemleak_free_recursive(x, s->flags);
 
 	/*
@@ -1385,17 +1383,12 @@ static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
 		debug_check_no_obj_freed(x, s->object_size);
 
-	freeptr = get_freepointer(s, x);
-	/*
-	 * kasan_slab_free() may put x into memory quarantine, delaying its
-	 * reuse. In this case the object's freelist pointer is changed.
-	 */
-	kasan_slab_free(s, x, _RET_IP_);
-	return freeptr;
+	/* KASAN might put x into memory quarantine, delaying its reuse */
+	return kasan_slab_free(s, x, _RET_IP_);
 }
 
 static inline void slab_free_freelist_hook(struct kmem_cache *s,
-					   void *head, void *tail)
+					   void **head, void **tail)
 {
 /*
  * Compiler cannot detect this function can be removed if slab_free_hook()
@@ -1406,13 +1399,29 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
 	defined(CONFIG_DEBUG_OBJECTS_FREE) ||	\
 	defined(CONFIG_KASAN)
 
-	void *object = head;
-	void *tail_obj = tail ? : head;
-	void *freeptr;
+	void *object;
+	void *next = *head;
+	void *old_tail = *tail ? *tail : *head;
+
+	/* Head and tail of the reconstructed freelist */
+	*head = NULL;
+	*tail = NULL;
 
 	do {
-		freeptr = slab_free_hook(s, object);
-	} while ((object != tail_obj) && (object = freeptr));
+		object = next;
+		next = get_freepointer(s, object);
+		/* If object's reuse doesn't have to be delayed */
+		if (!slab_free_hook(s, object)) {
+			/* Move object to the new freelist */
+			set_freepointer(s, object, *head);
+			*head = object;
+			if (!*tail)
+				*tail = object;
+		}
+	} while (object != old_tail);
+
+	if (*head == *tail)
+		*tail = NULL;
 #endif
 }
 
@@ -2965,14 +2974,13 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
 				      void *head, void *tail, int cnt,
 				      unsigned long addr)
 {
-	slab_free_freelist_hook(s, head, tail);
 	/*
-	 * slab_free_freelist_hook() could have put the items into quarantine.
-	 * If so, no need to free them.
+	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
+	 * to remove objects, whose reuse must be delayed.
 	 */
-	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
-		return;
-	do_slab_free(s, page, head, tail, cnt, addr);
+	slab_free_freelist_hook(s, &head, &tail);
+	if (head != NULL)
+		do_slab_free(s, page, head, tail, cnt, addr);
 }
 
 #ifdef CONFIG_KASAN
-- 
2.16.1.291.g4437f3f132-goog

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH] kasan, slub: fix handling of kasan_slab_free hook
@ 2018-02-23 15:53 ` Andrey Konovalov
  0 siblings, 0 replies; 10+ messages in thread
From: Andrey Konovalov @ 2018-02-23 15:53 UTC (permalink / raw)
  To: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, Andrey Ryabinin, Alexander Potapenko,
	Dmitry Vyukov, linux-mm, linux-kernel, kasan-dev
  Cc: Kostya Serebryany, Andrey Konovalov

The kasan_slab_free hook's return value denotes whether the reuse of a
slab object must be delayed (e.g. when the object is put into memory
qurantine).

The current way SLUB handles this hook is by ignoring its return value
and hardcoding checks similar (but not exactly the same) to the ones
performed in kasan_slab_free, which is prone to making mistakes.

This patch changes the way SLUB handles this by:
1. taking into account the return value of kasan_slab_free for each of
   the objects, that are being freed;
2. reconstructing the freelist of objects to exclude the ones, whose
   reuse must be delayed.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/slub.c | 52 ++++++++++++++++++++++++++++++----------------------
 1 file changed, 30 insertions(+), 22 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index e381728a3751..f111c2a908b9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1362,10 +1362,8 @@ static __always_inline void kfree_hook(void *x)
 	kasan_kfree_large(x, _RET_IP_);
 }
 
-static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
+static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
 {
-	void *freeptr;
-
 	kmemleak_free_recursive(x, s->flags);
 
 	/*
@@ -1385,17 +1383,12 @@ static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
 		debug_check_no_obj_freed(x, s->object_size);
 
-	freeptr = get_freepointer(s, x);
-	/*
-	 * kasan_slab_free() may put x into memory quarantine, delaying its
-	 * reuse. In this case the object's freelist pointer is changed.
-	 */
-	kasan_slab_free(s, x, _RET_IP_);
-	return freeptr;
+	/* KASAN might put x into memory quarantine, delaying its reuse */
+	return kasan_slab_free(s, x, _RET_IP_);
 }
 
 static inline void slab_free_freelist_hook(struct kmem_cache *s,
-					   void *head, void *tail)
+					   void **head, void **tail)
 {
 /*
  * Compiler cannot detect this function can be removed if slab_free_hook()
@@ -1406,13 +1399,29 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
 	defined(CONFIG_DEBUG_OBJECTS_FREE) ||	\
 	defined(CONFIG_KASAN)
 
-	void *object = head;
-	void *tail_obj = tail ? : head;
-	void *freeptr;
+	void *object;
+	void *next = *head;
+	void *old_tail = *tail ? *tail : *head;
+
+	/* Head and tail of the reconstructed freelist */
+	*head = NULL;
+	*tail = NULL;
 
 	do {
-		freeptr = slab_free_hook(s, object);
-	} while ((object != tail_obj) && (object = freeptr));
+		object = next;
+		next = get_freepointer(s, object);
+		/* If object's reuse doesn't have to be delayed */
+		if (!slab_free_hook(s, object)) {
+			/* Move object to the new freelist */
+			set_freepointer(s, object, *head);
+			*head = object;
+			if (!*tail)
+				*tail = object;
+		}
+	} while (object != old_tail);
+
+	if (*head == *tail)
+		*tail = NULL;
 #endif
 }
 
@@ -2965,14 +2974,13 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
 				      void *head, void *tail, int cnt,
 				      unsigned long addr)
 {
-	slab_free_freelist_hook(s, head, tail);
 	/*
-	 * slab_free_freelist_hook() could have put the items into quarantine.
-	 * If so, no need to free them.
+	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
+	 * to remove objects, whose reuse must be delayed.
 	 */
-	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
-		return;
-	do_slab_free(s, page, head, tail, cnt, addr);
+	slab_free_freelist_hook(s, &head, &tail);
+	if (head != NULL)
+		do_slab_free(s, page, head, tail, cnt, addr);
 }
 
 #ifdef CONFIG_KASAN
-- 
2.16.1.291.g4437f3f132-goog

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH] kasan, slub: fix handling of kasan_slab_free hook
  2018-02-23 15:53 ` Andrey Konovalov
@ 2018-03-02 12:10   ` Andrey Ryabinin
  -1 siblings, 0 replies; 10+ messages in thread
From: Andrey Ryabinin @ 2018-03-02 12:10 UTC (permalink / raw)
  To: Andrey Konovalov, Christoph Lameter, Pekka Enberg,
	David Rientjes, Joonsoo Kim, Andrew Morton, Alexander Potapenko,
	Dmitry Vyukov, linux-mm, linux-kernel, kasan-dev
  Cc: Kostya Serebryany

On 02/23/2018 06:53 PM, Andrey Konovalov wrote:
> The kasan_slab_free hook's return value denotes whether the reuse of a
> slab object must be delayed (e.g. when the object is put into memory
> qurantine).
> 
> The current way SLUB handles this hook is by ignoring its return value
> and hardcoding checks similar (but not exactly the same) to the ones
> performed in kasan_slab_free, which is prone to making mistakes.
> 

What are those differences exactly? And what problems do they cause?
Answers to these questions should be in the changelog.


> This patch changes the way SLUB handles this by:
> 1. taking into account the return value of kasan_slab_free for each of
>    the objects, that are being freed;
> 2. reconstructing the freelist of objects to exclude the ones, whose
>    reuse must be delayed.
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> ---




>  
> @@ -2965,14 +2974,13 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
>  				      void *head, void *tail, int cnt,
>  				      unsigned long addr)
>  {
> -	slab_free_freelist_hook(s, head, tail);
>  	/*
> -	 * slab_free_freelist_hook() could have put the items into quarantine.
> -	 * If so, no need to free them.
> +	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
> +	 * to remove objects, whose reuse must be delayed.
>  	 */
> -	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
> -		return;
> -	do_slab_free(s, page, head, tail, cnt, addr);
> +	slab_free_freelist_hook(s, &head, &tail);
> +	if (head != NULL)

That's an additional branch in non-debug fast-path. Find a way to avoid this.


> +		do_slab_free(s, page, head, tail, cnt, addr);
>  }
>  
>  #ifdef CONFIG_KASAN
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] kasan, slub: fix handling of kasan_slab_free hook
@ 2018-03-02 12:10   ` Andrey Ryabinin
  0 siblings, 0 replies; 10+ messages in thread
From: Andrey Ryabinin @ 2018-03-02 12:10 UTC (permalink / raw)
  To: Andrey Konovalov, Christoph Lameter, Pekka Enberg,
	David Rientjes, Joonsoo Kim, Andrew Morton, Alexander Potapenko,
	Dmitry Vyukov, linux-mm, linux-kernel, kasan-dev
  Cc: Kostya Serebryany

On 02/23/2018 06:53 PM, Andrey Konovalov wrote:
> The kasan_slab_free hook's return value denotes whether the reuse of a
> slab object must be delayed (e.g. when the object is put into memory
> qurantine).
> 
> The current way SLUB handles this hook is by ignoring its return value
> and hardcoding checks similar (but not exactly the same) to the ones
> performed in kasan_slab_free, which is prone to making mistakes.
> 

What are those differences exactly? And what problems do they cause?
Answers to these questions should be in the changelog.


> This patch changes the way SLUB handles this by:
> 1. taking into account the return value of kasan_slab_free for each of
>    the objects, that are being freed;
> 2. reconstructing the freelist of objects to exclude the ones, whose
>    reuse must be delayed.
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> ---




>  
> @@ -2965,14 +2974,13 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
>  				      void *head, void *tail, int cnt,
>  				      unsigned long addr)
>  {
> -	slab_free_freelist_hook(s, head, tail);
>  	/*
> -	 * slab_free_freelist_hook() could have put the items into quarantine.
> -	 * If so, no need to free them.
> +	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
> +	 * to remove objects, whose reuse must be delayed.
>  	 */
> -	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
> -		return;
> -	do_slab_free(s, page, head, tail, cnt, addr);
> +	slab_free_freelist_hook(s, &head, &tail);
> +	if (head != NULL)

That's an additional branch in non-debug fast-path. Find a way to avoid this.


> +		do_slab_free(s, page, head, tail, cnt, addr);
>  }
>  
>  #ifdef CONFIG_KASAN
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] kasan, slub: fix handling of kasan_slab_free hook
  2018-03-02 12:10   ` Andrey Ryabinin
@ 2018-03-06 17:42     ` Andrey Konovalov
  -1 siblings, 0 replies; 10+ messages in thread
From: Andrey Konovalov @ 2018-03-06 17:42 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, Alexander Potapenko, Dmitry Vyukov,
	Linux Memory Management List, LKML, kasan-dev, Kostya Serebryany

On Fri, Mar 2, 2018 at 1:10 PM, Andrey Ryabinin <aryabinin@virtuozzo.com> wrote:
> On 02/23/2018 06:53 PM, Andrey Konovalov wrote:
>> The kasan_slab_free hook's return value denotes whether the reuse of a
>> slab object must be delayed (e.g. when the object is put into memory
>> qurantine).
>>
>> The current way SLUB handles this hook is by ignoring its return value
>> and hardcoding checks similar (but not exactly the same) to the ones
>> performed in kasan_slab_free, which is prone to making mistakes.
>>
>
> What are those differences exactly? And what problems do they cause?
> Answers to these questions should be in the changelog.


The difference is that with the old code we end up proceeding with
invalidly freeing an object when an invalid-free (or double-free) is
detected. Will add this in v2.

>
>
>> This patch changes the way SLUB handles this by:
>> 1. taking into account the return value of kasan_slab_free for each of
>>    the objects, that are being freed;
>> 2. reconstructing the freelist of objects to exclude the ones, whose
>>    reuse must be delayed.
>>
>> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
>> ---
>
>
>
>
>>
>> @@ -2965,14 +2974,13 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
>>                                     void *head, void *tail, int cnt,
>>                                     unsigned long addr)
>>  {
>> -     slab_free_freelist_hook(s, head, tail);
>>       /*
>> -      * slab_free_freelist_hook() could have put the items into quarantine.
>> -      * If so, no need to free them.
>> +      * With KASAN enabled slab_free_freelist_hook modifies the freelist
>> +      * to remove objects, whose reuse must be delayed.
>>        */
>> -     if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
>> -             return;
>> -     do_slab_free(s, page, head, tail, cnt, addr);
>> +     slab_free_freelist_hook(s, &head, &tail);
>> +     if (head != NULL)
>
> That's an additional branch in non-debug fast-path. Find a way to avoid this.

Hm, there supposed to be a branch here. We either have objects that we
need to free, or we don't, and we need to do different things in those
cases. Previously this was done with a hardcoded "if (s->flags &
SLAB_KASAN && ..." statement, not it's a different "if (head !=
NULL)".

I could put this check under #ifdef CONFIG_KASAN if the performance is
critical here, but I'm not sure if that's the best solution. I could
also add an "unlikely()" there.

>
>
>> +             do_slab_free(s, page, head, tail, cnt, addr);
>>  }
>>
>>  #ifdef CONFIG_KASAN
>>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] kasan, slub: fix handling of kasan_slab_free hook
@ 2018-03-06 17:42     ` Andrey Konovalov
  0 siblings, 0 replies; 10+ messages in thread
From: Andrey Konovalov @ 2018-03-06 17:42 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, Alexander Potapenko, Dmitry Vyukov,
	Linux Memory Management List, LKML, kasan-dev, Kostya Serebryany

On Fri, Mar 2, 2018 at 1:10 PM, Andrey Ryabinin <aryabinin@virtuozzo.com> wrote:
> On 02/23/2018 06:53 PM, Andrey Konovalov wrote:
>> The kasan_slab_free hook's return value denotes whether the reuse of a
>> slab object must be delayed (e.g. when the object is put into memory
>> qurantine).
>>
>> The current way SLUB handles this hook is by ignoring its return value
>> and hardcoding checks similar (but not exactly the same) to the ones
>> performed in kasan_slab_free, which is prone to making mistakes.
>>
>
> What are those differences exactly? And what problems do they cause?
> Answers to these questions should be in the changelog.


The difference is that with the old code we end up proceeding with
invalidly freeing an object when an invalid-free (or double-free) is
detected. Will add this in v2.

>
>
>> This patch changes the way SLUB handles this by:
>> 1. taking into account the return value of kasan_slab_free for each of
>>    the objects, that are being freed;
>> 2. reconstructing the freelist of objects to exclude the ones, whose
>>    reuse must be delayed.
>>
>> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
>> ---
>
>
>
>
>>
>> @@ -2965,14 +2974,13 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
>>                                     void *head, void *tail, int cnt,
>>                                     unsigned long addr)
>>  {
>> -     slab_free_freelist_hook(s, head, tail);
>>       /*
>> -      * slab_free_freelist_hook() could have put the items into quarantine.
>> -      * If so, no need to free them.
>> +      * With KASAN enabled slab_free_freelist_hook modifies the freelist
>> +      * to remove objects, whose reuse must be delayed.
>>        */
>> -     if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
>> -             return;
>> -     do_slab_free(s, page, head, tail, cnt, addr);
>> +     slab_free_freelist_hook(s, &head, &tail);
>> +     if (head != NULL)
>
> That's an additional branch in non-debug fast-path. Find a way to avoid this.

Hm, there supposed to be a branch here. We either have objects that we
need to free, or we don't, and we need to do different things in those
cases. Previously this was done with a hardcoded "if (s->flags &
SLAB_KASAN && ..." statement, not it's a different "if (head !=
NULL)".

I could put this check under #ifdef CONFIG_KASAN if the performance is
critical here, but I'm not sure if that's the best solution. I could
also add an "unlikely()" there.

>
>
>> +             do_slab_free(s, page, head, tail, cnt, addr);
>>  }
>>
>>  #ifdef CONFIG_KASAN
>>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] kasan, slub: fix handling of kasan_slab_free hook
  2018-03-06 17:42     ` Andrey Konovalov
@ 2018-03-06 17:47       ` Andrey Konovalov
  -1 siblings, 0 replies; 10+ messages in thread
From: Andrey Konovalov @ 2018-03-06 17:47 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, Alexander Potapenko, Dmitry Vyukov,
	Linux Memory Management List, LKML, kasan-dev, Kostya Serebryany

On Tue, Mar 6, 2018 at 6:42 PM, Andrey Konovalov <andreyknvl@google.com> wrote:
> On Fri, Mar 2, 2018 at 1:10 PM, Andrey Ryabinin <aryabinin@virtuozzo.com> wrote:
>> On 02/23/2018 06:53 PM, Andrey Konovalov wrote:
>>> The kasan_slab_free hook's return value denotes whether the reuse of a
>>> slab object must be delayed (e.g. when the object is put into memory
>>> qurantine).
>>>
>>> The current way SLUB handles this hook is by ignoring its return value
>>> and hardcoding checks similar (but not exactly the same) to the ones
>>> performed in kasan_slab_free, which is prone to making mistakes.
>>>
>>
>> What are those differences exactly? And what problems do they cause?
>> Answers to these questions should be in the changelog.
>
>
> The difference is that with the old code we end up proceeding with
> invalidly freeing an object when an invalid-free (or double-free) is
> detected. Will add this in v2.
>
>>
>>
>>> This patch changes the way SLUB handles this by:
>>> 1. taking into account the return value of kasan_slab_free for each of
>>>    the objects, that are being freed;
>>> 2. reconstructing the freelist of objects to exclude the ones, whose
>>>    reuse must be delayed.
>>>
>>> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
>>> ---
>>
>>
>>
>>
>>>
>>> @@ -2965,14 +2974,13 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
>>>                                     void *head, void *tail, int cnt,
>>>                                     unsigned long addr)
>>>  {
>>> -     slab_free_freelist_hook(s, head, tail);
>>>       /*
>>> -      * slab_free_freelist_hook() could have put the items into quarantine.
>>> -      * If so, no need to free them.
>>> +      * With KASAN enabled slab_free_freelist_hook modifies the freelist
>>> +      * to remove objects, whose reuse must be delayed.
>>>        */
>>> -     if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
>>> -             return;
>>> -     do_slab_free(s, page, head, tail, cnt, addr);
>>> +     slab_free_freelist_hook(s, &head, &tail);
>>> +     if (head != NULL)
>>
>> That's an additional branch in non-debug fast-path. Find a way to avoid this.
>
> Hm, there supposed to be a branch here. We either have objects that we
> need to free, or we don't, and we need to do different things in those
> cases. Previously this was done with a hardcoded "if (s->flags &
> SLAB_KASAN && ..." statement, not it's a different "if (head !=
> NULL)".
>
> I could put this check under #ifdef CONFIG_KASAN if the performance is
> critical here, but I'm not sure if that's the best solution. I could
> also add an "unlikely()" there.

OK, I have a solution better for this, stay tuned for v2.

>
>>
>>
>>> +             do_slab_free(s, page, head, tail, cnt, addr);
>>>  }
>>>
>>>  #ifdef CONFIG_KASAN
>>>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] kasan, slub: fix handling of kasan_slab_free hook
@ 2018-03-06 17:47       ` Andrey Konovalov
  0 siblings, 0 replies; 10+ messages in thread
From: Andrey Konovalov @ 2018-03-06 17:47 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, Alexander Potapenko, Dmitry Vyukov,
	Linux Memory Management List, LKML, kasan-dev, Kostya Serebryany

On Tue, Mar 6, 2018 at 6:42 PM, Andrey Konovalov <andreyknvl@google.com> wrote:
> On Fri, Mar 2, 2018 at 1:10 PM, Andrey Ryabinin <aryabinin@virtuozzo.com> wrote:
>> On 02/23/2018 06:53 PM, Andrey Konovalov wrote:
>>> The kasan_slab_free hook's return value denotes whether the reuse of a
>>> slab object must be delayed (e.g. when the object is put into memory
>>> qurantine).
>>>
>>> The current way SLUB handles this hook is by ignoring its return value
>>> and hardcoding checks similar (but not exactly the same) to the ones
>>> performed in kasan_slab_free, which is prone to making mistakes.
>>>
>>
>> What are those differences exactly? And what problems do they cause?
>> Answers to these questions should be in the changelog.
>
>
> The difference is that with the old code we end up proceeding with
> invalidly freeing an object when an invalid-free (or double-free) is
> detected. Will add this in v2.
>
>>
>>
>>> This patch changes the way SLUB handles this by:
>>> 1. taking into account the return value of kasan_slab_free for each of
>>>    the objects, that are being freed;
>>> 2. reconstructing the freelist of objects to exclude the ones, whose
>>>    reuse must be delayed.
>>>
>>> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
>>> ---
>>
>>
>>
>>
>>>
>>> @@ -2965,14 +2974,13 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
>>>                                     void *head, void *tail, int cnt,
>>>                                     unsigned long addr)
>>>  {
>>> -     slab_free_freelist_hook(s, head, tail);
>>>       /*
>>> -      * slab_free_freelist_hook() could have put the items into quarantine.
>>> -      * If so, no need to free them.
>>> +      * With KASAN enabled slab_free_freelist_hook modifies the freelist
>>> +      * to remove objects, whose reuse must be delayed.
>>>        */
>>> -     if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
>>> -             return;
>>> -     do_slab_free(s, page, head, tail, cnt, addr);
>>> +     slab_free_freelist_hook(s, &head, &tail);
>>> +     if (head != NULL)
>>
>> That's an additional branch in non-debug fast-path. Find a way to avoid this.
>
> Hm, there supposed to be a branch here. We either have objects that we
> need to free, or we don't, and we need to do different things in those
> cases. Previously this was done with a hardcoded "if (s->flags &
> SLAB_KASAN && ..." statement, not it's a different "if (head !=
> NULL)".
>
> I could put this check under #ifdef CONFIG_KASAN if the performance is
> critical here, but I'm not sure if that's the best solution. I could
> also add an "unlikely()" there.

OK, I have a solution better for this, stay tuned for v2.

>
>>
>>
>>> +             do_slab_free(s, page, head, tail, cnt, addr);
>>>  }
>>>
>>>  #ifdef CONFIG_KASAN
>>>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] kasan, slub: fix handling of kasan_slab_free hook
  2018-03-06 17:42     ` Andrey Konovalov
@ 2018-03-07 12:43       ` Andrey Ryabinin
  -1 siblings, 0 replies; 10+ messages in thread
From: Andrey Ryabinin @ 2018-03-07 12:43 UTC (permalink / raw)
  To: Andrey Konovalov
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, Alexander Potapenko, Dmitry Vyukov,
	Linux Memory Management List, LKML, kasan-dev, Kostya Serebryany



On 03/06/2018 08:42 PM, Andrey Konovalov wrote:

>>> -     if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
>>> -             return;
>>> -     do_slab_free(s, page, head, tail, cnt, addr);
>>> +     slab_free_freelist_hook(s, &head, &tail);
>>> +     if (head != NULL)
>>
>> That's an additional branch in non-debug fast-path. Find a way to avoid this.
> 
> Hm, there supposed to be a branch here. We either have objects that we
> need to free, or we don't, and we need to do different things in those
> cases. Previously this was done with a hardcoded "if (s->flags &
> SLAB_KASAN && ..." statement, not it's a different "if (head !=
> NULL)".
> 

They are different. "if (s->flags & SLAB_KASAN && ..." can be optimized away by compiler when CONFIG_KASAN=n,
"if (head != NULL)" - can not.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] kasan, slub: fix handling of kasan_slab_free hook
@ 2018-03-07 12:43       ` Andrey Ryabinin
  0 siblings, 0 replies; 10+ messages in thread
From: Andrey Ryabinin @ 2018-03-07 12:43 UTC (permalink / raw)
  To: Andrey Konovalov
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, Alexander Potapenko, Dmitry Vyukov,
	Linux Memory Management List, LKML, kasan-dev, Kostya Serebryany



On 03/06/2018 08:42 PM, Andrey Konovalov wrote:

>>> -     if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
>>> -             return;
>>> -     do_slab_free(s, page, head, tail, cnt, addr);
>>> +     slab_free_freelist_hook(s, &head, &tail);
>>> +     if (head != NULL)
>>
>> That's an additional branch in non-debug fast-path. Find a way to avoid this.
> 
> Hm, there supposed to be a branch here. We either have objects that we
> need to free, or we don't, and we need to do different things in those
> cases. Previously this was done with a hardcoded "if (s->flags &
> SLAB_KASAN && ..." statement, not it's a different "if (head !=
> NULL)".
> 

They are different. "if (s->flags & SLAB_KASAN && ..." can be optimized away by compiler when CONFIG_KASAN=n,
"if (head != NULL)" - can not.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2018-03-07 12:44 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-23 15:53 [PATCH] kasan, slub: fix handling of kasan_slab_free hook Andrey Konovalov
2018-02-23 15:53 ` Andrey Konovalov
2018-03-02 12:10 ` Andrey Ryabinin
2018-03-02 12:10   ` Andrey Ryabinin
2018-03-06 17:42   ` Andrey Konovalov
2018-03-06 17:42     ` Andrey Konovalov
2018-03-06 17:47     ` Andrey Konovalov
2018-03-06 17:47       ` Andrey Konovalov
2018-03-07 12:43     ` Andrey Ryabinin
2018-03-07 12:43       ` Andrey Ryabinin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.