linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm, mempool: kasan: poison mempool elements
@ 2015-04-03 14:47 Andrey Ryabinin
  2015-04-03 22:07 ` Andrew Morton
  2015-04-04  2:06 ` David Rientjes
  0 siblings, 2 replies; 4+ messages in thread
From: Andrey Ryabinin @ 2015-04-03 14:47 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Andrey Ryabinin, David Rientjes, Dave Kleikamp,
	Christoph Hellwig, Sebastian Ott, Mikulas Patocka,
	Catalin Marinas, LKML, linux-mm, jfs-discussion,
	Dmitry Chernenkov, Dmitry Vyukov, Alexander Potapenko

Mempools keep allocated objects in reserved for situations
when ordinary allocation may not be possible to satisfy.
These objects shouldn't be accessed before they leave
the pool.
This patch poison elements when get into the pool
and unpoison when they leave it. This will let KASan
to detect use-after-free of mempool's elements.

Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
---
 include/linux/kasan.h |  2 ++
 mm/kasan/kasan.c      | 13 +++++++++++++
 mm/mempool.c          | 23 +++++++++++++++++++++++
 3 files changed, 38 insertions(+)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5bb0744..5486d77 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -44,6 +44,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
 
 void kasan_kmalloc_large(const void *ptr, size_t size);
 void kasan_kfree_large(const void *ptr);
+void kasan_kfree(void *ptr);
 void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
 void kasan_krealloc(const void *object, size_t new_size);
 
@@ -71,6 +72,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
 
 static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
 static inline void kasan_kfree_large(const void *ptr) {}
+static inline void kasan_kfree(void *ptr) {}
 static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
 				size_t size) {}
 static inline void kasan_krealloc(const void *object, size_t new_size) {}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 936d816..6c513a6 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -389,6 +389,19 @@ void kasan_krealloc(const void *object, size_t size)
 		kasan_kmalloc(page->slab_cache, object, size);
 }
 
+void kasan_kfree(void *ptr)
+{
+	struct page *page;
+
+	page = virt_to_head_page(ptr);
+
+	if (unlikely(!PageSlab(page)))
+		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
+				KASAN_FREE_PAGE);
+	else
+		kasan_slab_free(page->slab_cache, ptr);
+}
+
 void kasan_kfree_large(const void *ptr)
 {
 	struct page *page = virt_to_page(ptr);
diff --git a/mm/mempool.c b/mm/mempool.c
index acd597f..f884e24 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -11,6 +11,7 @@
 
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/kasan.h>
 #include <linux/kmemleak.h>
 #include <linux/export.h>
 #include <linux/mempool.h>
@@ -100,10 +101,31 @@ static inline void poison_element(mempool_t *pool, void *element)
 }
 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
 
+static void kasan_poison_element(mempool_t *pool, void *element)
+{
+	if (pool->alloc == mempool_alloc_slab)
+		kasan_slab_free(pool->pool_data, element);
+	if (pool->alloc == mempool_kmalloc)
+		kasan_kfree(element);
+	if (pool->alloc == mempool_alloc_pages)
+		kasan_free_pages(element, (unsigned long)pool->pool_data);
+}
+
+static void kasan_unpoison_element(mempool_t *pool, void *element)
+{
+	if (pool->alloc == mempool_alloc_slab)
+		kasan_slab_alloc(pool->pool_data, element);
+	if (pool->alloc == mempool_kmalloc)
+		kasan_krealloc(element, (size_t)pool->pool_data);
+	if (pool->alloc == mempool_alloc_pages)
+		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
+}
+
 static void add_element(mempool_t *pool, void *element)
 {
 	BUG_ON(pool->curr_nr >= pool->min_nr);
 	poison_element(pool, element);
+	kasan_poison_element(pool, element);
 	pool->elements[pool->curr_nr++] = element;
 }
 
@@ -113,6 +135,7 @@ static void *remove_element(mempool_t *pool)
 
 	BUG_ON(pool->curr_nr < 0);
 	check_element(pool, element);
+	kasan_unpoison_element(pool, element);
 	return element;
 }
 
-- 
2.3.5


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm, mempool: kasan: poison mempool elements
  2015-04-03 14:47 [PATCH] mm, mempool: kasan: poison mempool elements Andrey Ryabinin
@ 2015-04-03 22:07 ` Andrew Morton
  2015-04-06 11:36   ` Andrey Ryabinin
  2015-04-04  2:06 ` David Rientjes
  1 sibling, 1 reply; 4+ messages in thread
From: Andrew Morton @ 2015-04-03 22:07 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: David Rientjes, Dave Kleikamp, Christoph Hellwig, Sebastian Ott,
	Mikulas Patocka, Catalin Marinas, LKML, linux-mm, jfs-discussion,
	Dmitry Chernenkov, Dmitry Vyukov, Alexander Potapenko

On Fri, 03 Apr 2015 17:47:47 +0300 Andrey Ryabinin <a.ryabinin@samsung.com> wrote:

> Mempools keep allocated objects in reserved for situations
> when ordinary allocation may not be possible to satisfy.
> These objects shouldn't be accessed before they leave
> the pool.
> This patch poison elements when get into the pool
> and unpoison when they leave it. This will let KASan
> to detect use-after-free of mempool's elements.
> 
> ...
>
> +static void kasan_poison_element(mempool_t *pool, void *element)
> +{
> +	if (pool->alloc == mempool_alloc_slab)
> +		kasan_slab_free(pool->pool_data, element);
> +	if (pool->alloc == mempool_kmalloc)
> +		kasan_kfree(element);
> +	if (pool->alloc == mempool_alloc_pages)
> +		kasan_free_pages(element, (unsigned long)pool->pool_data);
> +}

We recently discovered that mempool pages (from alloc_pages, not slab)
can be in highmem.  But kasan apepars to handle highmem pages (by
baling out) so we should be OK with that.

Can kasan be taught to use kmap_atomic() or is it more complicated than
that?  It probably isn't worthwhile - highmem pages don'[t get used by the
kernel much and most bugs will be found using 64-bit testing anyway.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm, mempool: kasan: poison mempool elements
  2015-04-03 14:47 [PATCH] mm, mempool: kasan: poison mempool elements Andrey Ryabinin
  2015-04-03 22:07 ` Andrew Morton
@ 2015-04-04  2:06 ` David Rientjes
  1 sibling, 0 replies; 4+ messages in thread
From: David Rientjes @ 2015-04-04  2:06 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Andrew Morton, Dave Kleikamp, Christoph Hellwig, Sebastian Ott,
	Mikulas Patocka, Catalin Marinas, LKML, linux-mm, jfs-discussion,
	Dmitry Chernenkov, Dmitry Vyukov, Alexander Potapenko

On Fri, 3 Apr 2015, Andrey Ryabinin wrote:

> Mempools keep allocated objects in reserved for situations
> when ordinary allocation may not be possible to satisfy.
> These objects shouldn't be accessed before they leave
> the pool.
> This patch poison elements when get into the pool
> and unpoison when they leave it. This will let KASan
> to detect use-after-free of mempool's elements.
> 
> Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>

Tested-by: David Rientjes <rientjes@google.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm, mempool: kasan: poison mempool elements
  2015-04-03 22:07 ` Andrew Morton
@ 2015-04-06 11:36   ` Andrey Ryabinin
  0 siblings, 0 replies; 4+ messages in thread
From: Andrey Ryabinin @ 2015-04-06 11:36 UTC (permalink / raw)
  To: Andrew Morton
  Cc: David Rientjes, Dave Kleikamp, Christoph Hellwig, Sebastian Ott,
	Mikulas Patocka, Catalin Marinas, LKML, linux-mm, jfs-discussion,
	Dmitry Chernenkov, Dmitry Vyukov, Alexander Potapenko

On 04/04/2015 01:07 AM, Andrew Morton wrote:
> On Fri, 03 Apr 2015 17:47:47 +0300 Andrey Ryabinin <a.ryabinin@samsung.com> wrote:
> 
>> Mempools keep allocated objects in reserved for situations
>> when ordinary allocation may not be possible to satisfy.
>> These objects shouldn't be accessed before they leave
>> the pool.
>> This patch poison elements when get into the pool
>> and unpoison when they leave it. This will let KASan
>> to detect use-after-free of mempool's elements.
>>
>> ...
>>
>> +static void kasan_poison_element(mempool_t *pool, void *element)
>> +{
>> +	if (pool->alloc == mempool_alloc_slab)
>> +		kasan_slab_free(pool->pool_data, element);
>> +	if (pool->alloc == mempool_kmalloc)
>> +		kasan_kfree(element);
>> +	if (pool->alloc == mempool_alloc_pages)
>> +		kasan_free_pages(element, (unsigned long)pool->pool_data);
>> +}
> 
> We recently discovered that mempool pages (from alloc_pages, not slab)
> can be in highmem.  But kasan apepars to handle highmem pages (by
> baling out) so we should be OK with that.
> 
> Can kasan be taught to use kmap_atomic() or is it more complicated than
> that?  It probably isn't worthwhile - highmem pages don'[t get used by the
> kernel much and most bugs will be found using 64-bit testing anyway.
> 

kasan could only tell whether it's ok to use some virtual address or not.
So it can't be used for catching use after free of highmem page.
If highmem page was kmapped at some address than it's ok to dereference that address.
However, kasan can be used to unpoison/poison kmapped/kunmapped addresses to find use-after-kunmap bugs.
AFAIK kunmap has some sort of lazy unmap logic and kunmaped page might be still accessible for some time.

Another idea - poison lowmem pages if they were allocated with __GFP_HIGHMEM, unpoison them only on kmap, and poison back on kunmap.
Generally such pages shouldn't be accessed without mapping them first.
However it might be some false-positives. User could check if page is in lowmem and don't use kmap in that case.
It probably isn't worthwhile as well - 32bit testing will find these bugs without kasan.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-04-06 11:36 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-04-03 14:47 [PATCH] mm, mempool: kasan: poison mempool elements Andrey Ryabinin
2015-04-03 22:07 ` Andrew Morton
2015-04-06 11:36   ` Andrey Ryabinin
2015-04-04  2:06 ` David Rientjes

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).