linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/4] mm/kasan: get rid of speculative shadow checks
@ 2017-06-01 16:23 Andrey Ryabinin
  2017-06-01 16:23 ` [PATCH 2/4] x86/kasan: don't allocate extra shadow memory Andrey Ryabinin
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Andrey Ryabinin @ 2017-06-01 16:23 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Alexander Potapenko, Dmitry Vyukov, kasan-dev, linux-mm,
	linux-kernel, Andrey Ryabinin

For some unaligned memory accesses we have to check additional
byte of the shadow memory. Currently we load that byte speculatively
to have only single load + branch on the optimistic fast path.

However, this approach have some downsides:
 - It's unaligned access, so this prevents porting KASAN on architectures
    which doesn't support unaligned accesses.
 - We have to map additional shadow page to prevent crash if
    speculative load happens near the end of the mapped memory.
    This would significantly complicate upcoming memory hotplug support.

I wasn't able to notice any performance degradation with this patch.
So these speculative loads is just a pain with no gain, let's remove
them.

Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 mm/kasan/kasan.c | 98 +++++++++-----------------------------------------------
 1 file changed, 16 insertions(+), 82 deletions(-)

diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 85ee45b07615..e6fe07a98677 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -134,94 +134,30 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
 	return false;
 }
 
-static __always_inline bool memory_is_poisoned_2(unsigned long addr)
+static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
+						unsigned long size)
 {
-	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
-
-	if (unlikely(*shadow_addr)) {
-		if (memory_is_poisoned_1(addr + 1))
-			return true;
-
-		/*
-		 * If single shadow byte covers 2-byte access, we don't
-		 * need to do anything more. Otherwise, test the first
-		 * shadow byte.
-		 */
-		if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
-			return false;
-
-		return unlikely(*(u8 *)shadow_addr);
-	}
+	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
 
-	return false;
-}
-
-static __always_inline bool memory_is_poisoned_4(unsigned long addr)
-{
-	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
-
-	if (unlikely(*shadow_addr)) {
-		if (memory_is_poisoned_1(addr + 3))
-			return true;
-
-		/*
-		 * If single shadow byte covers 4-byte access, we don't
-		 * need to do anything more. Otherwise, test the first
-		 * shadow byte.
-		 */
-		if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
-			return false;
-
-		return unlikely(*(u8 *)shadow_addr);
-	}
-
-	return false;
-}
-
-static __always_inline bool memory_is_poisoned_8(unsigned long addr)
-{
-	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
-
-	if (unlikely(*shadow_addr)) {
-		if (memory_is_poisoned_1(addr + 7))
-			return true;
-
-		/*
-		 * If single shadow byte covers 8-byte access, we don't
-		 * need to do anything more. Otherwise, test the first
-		 * shadow byte.
-		 */
-		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
-			return false;
-
-		return unlikely(*(u8 *)shadow_addr);
-	}
+	/*
+	 * Access crosses 8(shadow size)-byte boundary. Such access maps
+	 * into 2 shadow bytes, so we need to check them both.
+	 */
+	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
+		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
 
-	return false;
+	return memory_is_poisoned_1(addr + size - 1);
 }
 
 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
 {
-	u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
-
-	if (unlikely(*shadow_addr)) {
-		u16 shadow_first_bytes = *(u16 *)shadow_addr;
-
-		if (unlikely(shadow_first_bytes))
-			return true;
-
-		/*
-		 * If two shadow bytes covers 16-byte access, we don't
-		 * need to do anything more. Otherwise, test the last
-		 * shadow byte.
-		 */
-		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
-			return false;
+	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 
-		return memory_is_poisoned_1(addr + 15);
-	}
+	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
+	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+		return *shadow_addr || memory_is_poisoned_1(addr + 15);
 
-	return false;
+	return *shadow_addr;
 }
 
 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
@@ -292,11 +228,9 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
 		case 1:
 			return memory_is_poisoned_1(addr);
 		case 2:
-			return memory_is_poisoned_2(addr);
 		case 4:
-			return memory_is_poisoned_4(addr);
 		case 8:
-			return memory_is_poisoned_8(addr);
+			return memory_is_poisoned_2_4_8(addr, size);
 		case 16:
 			return memory_is_poisoned_16(addr);
 		default:
-- 
2.13.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/4] x86/kasan: don't allocate extra shadow memory
  2017-06-01 16:23 [PATCH 1/4] mm/kasan: get rid of speculative shadow checks Andrey Ryabinin
@ 2017-06-01 16:23 ` Andrey Ryabinin
  2017-06-01 16:23 ` [PATCH 3/4] arm64/kasan: " Andrey Ryabinin
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 12+ messages in thread
From: Andrey Ryabinin @ 2017-06-01 16:23 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Alexander Potapenko, Dmitry Vyukov, kasan-dev, linux-mm,
	linux-kernel, Andrey Ryabinin, Thomas Gleixner, Ingo Molnar,
	H. Peter Anvin, x86

We used to read several bytes of the shadow memory in advance.
Therefore additional shadow memory mapped to prevent crash if
speculative load would happen near the end of the mapped shadow memory.

Now we don't have such speculative loads, so we no longer need to map
additional shadow memory.

Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
---
 arch/x86/mm/kasan_init_64.c | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0c7d8129bed6..39231a9c981a 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -23,12 +23,7 @@ static int __init map_range(struct range *range)
 	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
 	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
 
-	/*
-	 * end + 1 here is intentional. We check several shadow bytes in advance
-	 * to slightly speed up fastpath. In some rare cases we could cross
-	 * boundary of mapped shadow, so we just map some more here.
-	 */
-	return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
+	return vmemmap_populate(start, end, NUMA_NO_NODE);
 }
 
 static void __init clear_pgds(unsigned long start,
-- 
2.13.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/4] arm64/kasan: don't allocate extra shadow memory
  2017-06-01 16:23 [PATCH 1/4] mm/kasan: get rid of speculative shadow checks Andrey Ryabinin
  2017-06-01 16:23 ` [PATCH 2/4] x86/kasan: don't allocate extra shadow memory Andrey Ryabinin
@ 2017-06-01 16:23 ` Andrey Ryabinin
  2017-06-01 16:34   ` Mark Rutland
  2017-06-01 16:23 ` [PATCH 4/4] mm/kasan: Add support for memory hotplug Andrey Ryabinin
  2017-06-01 17:45 ` [PATCH 1/4] mm/kasan: get rid of speculative shadow checks Dmitry Vyukov
  3 siblings, 1 reply; 12+ messages in thread
From: Andrey Ryabinin @ 2017-06-01 16:23 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Alexander Potapenko, Dmitry Vyukov, kasan-dev, linux-mm,
	linux-kernel, Andrey Ryabinin, Catalin Marinas, Will Deacon,
	linux-arm-kernel

We used to read several bytes of the shadow memory in advance.
Therefore additional shadow memory mapped to prevent crash if
speculative load would happen near the end of the mapped shadow memory.

Now we don't have such speculative loads, so we no longer need to map
additional shadow memory.

Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
---
 arch/arm64/mm/kasan_init.c | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 687a358a3733..81f03959a4ab 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -191,14 +191,8 @@ void __init kasan_init(void)
 		if (start >= end)
 			break;
 
-		/*
-		 * end + 1 here is intentional. We check several shadow bytes in
-		 * advance to slightly speed up fastpath. In some rare cases
-		 * we could cross boundary of mapped shadow, so we just map
-		 * some more here.
-		 */
 		vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
-				(unsigned long)kasan_mem_to_shadow(end) + 1,
+				(unsigned long)kasan_mem_to_shadow(end),
 				pfn_to_nid(virt_to_pfn(start)));
 	}
 
-- 
2.13.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 4/4] mm/kasan: Add support for memory hotplug
  2017-06-01 16:23 [PATCH 1/4] mm/kasan: get rid of speculative shadow checks Andrey Ryabinin
  2017-06-01 16:23 ` [PATCH 2/4] x86/kasan: don't allocate extra shadow memory Andrey Ryabinin
  2017-06-01 16:23 ` [PATCH 3/4] arm64/kasan: " Andrey Ryabinin
@ 2017-06-01 16:23 ` Andrey Ryabinin
  2017-06-01 17:45 ` [PATCH 1/4] mm/kasan: get rid of speculative shadow checks Dmitry Vyukov
  3 siblings, 0 replies; 12+ messages in thread
From: Andrey Ryabinin @ 2017-06-01 16:23 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Alexander Potapenko, Dmitry Vyukov, kasan-dev, linux-mm,
	linux-kernel, Andrey Ryabinin

KASAN doesn't happen work with memory hotplug because hotplugged memory
doesn't have any shadow memory. So any access to hotplugged memory
would cause a crash on shadow check.

Use memory hotplug notifier to allocate and map shadow memory when the
hotplugged memory is going online and free shadow after the memory
offlined.

Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 mm/Kconfig       |  1 -
 mm/kasan/kasan.c | 40 +++++++++++++++++++++++++++++++++++-----
 2 files changed, 35 insertions(+), 6 deletions(-)

diff --git a/mm/Kconfig b/mm/Kconfig
index f1fbde17d45d..c8df94059974 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -161,7 +161,6 @@ config MEMORY_HOTPLUG
 	bool "Allow for memory hot-add"
 	depends on SPARSEMEM || X86_64_ACPI_NUMA
 	depends on ARCH_ENABLE_MEMORY_HOTPLUG
-	depends on COMPILE_TEST || !KASAN
 
 config MEMORY_HOTPLUG_SPARSE
 	def_bool y
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index e6fe07a98677..ca11bc4ce205 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -737,17 +737,47 @@ void __asan_unpoison_stack_memory(const void *addr, size_t size)
 EXPORT_SYMBOL(__asan_unpoison_stack_memory);
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-static int kasan_mem_notifier(struct notifier_block *nb,
+static int __meminit kasan_mem_notifier(struct notifier_block *nb,
 			unsigned long action, void *data)
 {
-	return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
+	struct memory_notify *mem_data = data;
+	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
+	unsigned long shadow_end, shadow_size;
+
+	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
+	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
+	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
+	shadow_size = nr_shadow_pages << PAGE_SHIFT;
+	shadow_end = shadow_start + shadow_size;
+
+	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
+		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
+		return NOTIFY_BAD;
+
+	switch (action) {
+	case MEM_GOING_ONLINE: {
+		void *ret;
+
+		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
+					shadow_end, GFP_KERNEL,
+					PAGE_KERNEL, VM_NO_GUARD,
+					pfn_to_nid(mem_data->start_pfn),
+					__builtin_return_address(0));
+		if (!ret)
+			return NOTIFY_BAD;
+
+		kmemleak_ignore(ret);
+		return NOTIFY_OK;
+	}
+	case MEM_OFFLINE:
+		vfree((void *)shadow_start);
+	}
+
+	return NOTIFY_OK;
 }
 
 static int __init kasan_memhotplug_init(void)
 {
-	pr_info("WARNING: KASAN doesn't support memory hot-add\n");
-	pr_info("Memory hot-add will be disabled\n");
-
 	hotplug_memory_notifier(kasan_mem_notifier, 0);
 
 	return 0;
-- 
2.13.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64/kasan: don't allocate extra shadow memory
  2017-06-01 16:23 ` [PATCH 3/4] arm64/kasan: " Andrey Ryabinin
@ 2017-06-01 16:34   ` Mark Rutland
  2017-06-01 16:45     ` Dmitry Vyukov
  0 siblings, 1 reply; 12+ messages in thread
From: Mark Rutland @ 2017-06-01 16:34 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Andrew Morton, Catalin Marinas, Will Deacon, linux-kernel,
	kasan-dev, linux-mm, Alexander Potapenko, linux-arm-kernel,
	Dmitry Vyukov

On Thu, Jun 01, 2017 at 07:23:37PM +0300, Andrey Ryabinin wrote:
> We used to read several bytes of the shadow memory in advance.
> Therefore additional shadow memory mapped to prevent crash if
> speculative load would happen near the end of the mapped shadow memory.
>
> Now we don't have such speculative loads, so we no longer need to map
> additional shadow memory.

I see that patch 1 fixed up the Linux helpers for outline
instrumentation.

Just to check, is it also true that the inline instrumentation never
performs unaligned accesses to the shadow memory?

If so, this looks good to me; it also avoids a potential fencepost issue
when memory exists right at the end of the linear map. Assuming that
holds:

Acked-by: Mark Rutland <mark.rutland@arm.com>

Thanks,
Mark.

>
> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will.deacon@arm.com>
> Cc: linux-arm-kernel@lists.infradead.org
> ---
>  arch/arm64/mm/kasan_init.c | 8 +-------
>  1 file changed, 1 insertion(+), 7 deletions(-)
>
> diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> index 687a358a3733..81f03959a4ab 100644
> --- a/arch/arm64/mm/kasan_init.c
> +++ b/arch/arm64/mm/kasan_init.c
> @@ -191,14 +191,8 @@ void __init kasan_init(void)
>               if (start >= end)
>                       break;
>
> -             /*
> -              * end + 1 here is intentional. We check several shadow bytes in
> -              * advance to slightly speed up fastpath. In some rare cases
> -              * we could cross boundary of mapped shadow, so we just map
> -              * some more here.
> -              */
>               vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
> -                             (unsigned long)kasan_mem_to_shadow(end) + 1,
> +                             (unsigned long)kasan_mem_to_shadow(end),
>                               pfn_to_nid(virt_to_pfn(start)));
>       }
>
> --
> 2.13.0
>
>
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64/kasan: don't allocate extra shadow memory
  2017-06-01 16:34   ` Mark Rutland
@ 2017-06-01 16:45     ` Dmitry Vyukov
  2017-06-01 16:52       ` Mark Rutland
  0 siblings, 1 reply; 12+ messages in thread
From: Dmitry Vyukov @ 2017-06-01 16:45 UTC (permalink / raw)
  To: Mark Rutland
  Cc: Andrey Ryabinin, Andrew Morton, Catalin Marinas, Will Deacon,
	LKML, kasan-dev, linux-mm, Alexander Potapenko, linux-arm-kernel

On Thu, Jun 1, 2017 at 6:34 PM, Mark Rutland <mark.rutland@arm.com> wrote:
> On Thu, Jun 01, 2017 at 07:23:37PM +0300, Andrey Ryabinin wrote:
>> We used to read several bytes of the shadow memory in advance.
>> Therefore additional shadow memory mapped to prevent crash if
>> speculative load would happen near the end of the mapped shadow memory.
>>
>> Now we don't have such speculative loads, so we no longer need to map
>> additional shadow memory.
>
> I see that patch 1 fixed up the Linux helpers for outline
> instrumentation.
>
> Just to check, is it also true that the inline instrumentation never
> performs unaligned accesses to the shadow memory?

Inline instrumentation generally accesses only a single byte.

> If so, this looks good to me; it also avoids a potential fencepost issue
> when memory exists right at the end of the linear map. Assuming that
> holds:
>
> Acked-by: Mark Rutland <mark.rutland@arm.com>
>
> Thanks,
> Mark.
>
>>
>> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
>> Cc: Catalin Marinas <catalin.marinas@arm.com>
>> Cc: Will Deacon <will.deacon@arm.com>
>> Cc: linux-arm-kernel@lists.infradead.org
>> ---
>>  arch/arm64/mm/kasan_init.c | 8 +-------
>>  1 file changed, 1 insertion(+), 7 deletions(-)
>>
>> diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
>> index 687a358a3733..81f03959a4ab 100644
>> --- a/arch/arm64/mm/kasan_init.c
>> +++ b/arch/arm64/mm/kasan_init.c
>> @@ -191,14 +191,8 @@ void __init kasan_init(void)
>>               if (start >= end)
>>                       break;
>>
>> -             /*
>> -              * end + 1 here is intentional. We check several shadow bytes in
>> -              * advance to slightly speed up fastpath. In some rare cases
>> -              * we could cross boundary of mapped shadow, so we just map
>> -              * some more here.
>> -              */
>>               vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
>> -                             (unsigned long)kasan_mem_to_shadow(end) + 1,
>> +                             (unsigned long)kasan_mem_to_shadow(end),
>>                               pfn_to_nid(virt_to_pfn(start)));
>>       }
>>
>> --
>> 2.13.0
>>
>>
>> _______________________________________________
>> linux-arm-kernel mailing list
>> linux-arm-kernel@lists.infradead.org
>> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
> IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64/kasan: don't allocate extra shadow memory
  2017-06-01 16:45     ` Dmitry Vyukov
@ 2017-06-01 16:52       ` Mark Rutland
  2017-06-01 16:59         ` Andrey Ryabinin
  0 siblings, 1 reply; 12+ messages in thread
From: Mark Rutland @ 2017-06-01 16:52 UTC (permalink / raw)
  To: Dmitry Vyukov
  Cc: Andrey Ryabinin, Andrew Morton, Catalin Marinas, Will Deacon,
	LKML, kasan-dev, linux-mm, Alexander Potapenko, linux-arm-kernel

On Thu, Jun 01, 2017 at 06:45:32PM +0200, Dmitry Vyukov wrote:
> On Thu, Jun 1, 2017 at 6:34 PM, Mark Rutland <mark.rutland@arm.com> wrote:
> > On Thu, Jun 01, 2017 at 07:23:37PM +0300, Andrey Ryabinin wrote:
> >> We used to read several bytes of the shadow memory in advance.
> >> Therefore additional shadow memory mapped to prevent crash if
> >> speculative load would happen near the end of the mapped shadow memory.
> >>
> >> Now we don't have such speculative loads, so we no longer need to map
> >> additional shadow memory.
> >
> > I see that patch 1 fixed up the Linux helpers for outline
> > instrumentation.
> >
> > Just to check, is it also true that the inline instrumentation never
> > performs unaligned accesses to the shadow memory?
> 
> Inline instrumentation generally accesses only a single byte.

Sorry to be a little pedantic, but does that mean we'll never access the
additional shadow, or does that mean it's very unlikely that we will?

I'm guessing/hoping it's the former!

Thanks,
Mark.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64/kasan: don't allocate extra shadow memory
  2017-06-01 16:52       ` Mark Rutland
@ 2017-06-01 16:59         ` Andrey Ryabinin
  2017-06-01 17:00           ` Andrey Ryabinin
  0 siblings, 1 reply; 12+ messages in thread
From: Andrey Ryabinin @ 2017-06-01 16:59 UTC (permalink / raw)
  To: Mark Rutland, Dmitry Vyukov
  Cc: Andrew Morton, Catalin Marinas, Will Deacon, LKML, kasan-dev,
	linux-mm, Alexander Potapenko, linux-arm-kernel



On 06/01/2017 07:52 PM, Mark Rutland wrote:
> On Thu, Jun 01, 2017 at 06:45:32PM +0200, Dmitry Vyukov wrote:
>> On Thu, Jun 1, 2017 at 6:34 PM, Mark Rutland <mark.rutland@arm.com> wrote:
>>> On Thu, Jun 01, 2017 at 07:23:37PM +0300, Andrey Ryabinin wrote:
>>>> We used to read several bytes of the shadow memory in advance.
>>>> Therefore additional shadow memory mapped to prevent crash if
>>>> speculative load would happen near the end of the mapped shadow memory.
>>>>
>>>> Now we don't have such speculative loads, so we no longer need to map
>>>> additional shadow memory.
>>>
>>> I see that patch 1 fixed up the Linux helpers for outline
>>> instrumentation.
>>>
>>> Just to check, is it also true that the inline instrumentation never
>>> performs unaligned accesses to the shadow memory?
>>

Correct, inline instrumentation assumes that all accesses are properly aligned as it
required by C standard. I knew that the kernel violates this rule in many places,
therefore I decided to add checks for unaligned accesses in outline case.


>> Inline instrumentation generally accesses only a single byte.
> 
> Sorry to be a little pedantic, but does that mean we'll never access the
> additional shadow, or does that mean it's very unlikely that we will?
> 
> I'm guessing/hoping it's the former!
> 

Outline will never access additional shadow byte: https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm#unaligned-accesses

> Thanks,
> Mark.
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64/kasan: don't allocate extra shadow memory
  2017-06-01 16:59         ` Andrey Ryabinin
@ 2017-06-01 17:00           ` Andrey Ryabinin
  2017-06-01 17:05             ` Dmitry Vyukov
  0 siblings, 1 reply; 12+ messages in thread
From: Andrey Ryabinin @ 2017-06-01 17:00 UTC (permalink / raw)
  To: Mark Rutland, Dmitry Vyukov
  Cc: Andrew Morton, Catalin Marinas, Will Deacon, LKML, kasan-dev,
	linux-mm, Alexander Potapenko, linux-arm-kernel



On 06/01/2017 07:59 PM, Andrey Ryabinin wrote:
> 
> 
> On 06/01/2017 07:52 PM, Mark Rutland wrote:
>> On Thu, Jun 01, 2017 at 06:45:32PM +0200, Dmitry Vyukov wrote:
>>> On Thu, Jun 1, 2017 at 6:34 PM, Mark Rutland <mark.rutland@arm.com> wrote:
>>>> On Thu, Jun 01, 2017 at 07:23:37PM +0300, Andrey Ryabinin wrote:
>>>>> We used to read several bytes of the shadow memory in advance.
>>>>> Therefore additional shadow memory mapped to prevent crash if
>>>>> speculative load would happen near the end of the mapped shadow memory.
>>>>>
>>>>> Now we don't have such speculative loads, so we no longer need to map
>>>>> additional shadow memory.
>>>>
>>>> I see that patch 1 fixed up the Linux helpers for outline
>>>> instrumentation.
>>>>
>>>> Just to check, is it also true that the inline instrumentation never
>>>> performs unaligned accesses to the shadow memory?
>>>
> 
> Correct, inline instrumentation assumes that all accesses are properly aligned as it
> required by C standard. I knew that the kernel violates this rule in many places,
> therefore I decided to add checks for unaligned accesses in outline case.
> 
> 
>>> Inline instrumentation generally accesses only a single byte.
>>
>> Sorry to be a little pedantic, but does that mean we'll never access the
>> additional shadow, or does that mean it's very unlikely that we will?
>>
>> I'm guessing/hoping it's the former!
>>
> 
> Outline will never access additional shadow byte: https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm#unaligned-accesses

s/Outline/inline  of course.

> 
>> Thanks,
>> Mark.
>>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64/kasan: don't allocate extra shadow memory
  2017-06-01 17:00           ` Andrey Ryabinin
@ 2017-06-01 17:05             ` Dmitry Vyukov
  2017-06-01 17:38               ` Dmitry Vyukov
  0 siblings, 1 reply; 12+ messages in thread
From: Dmitry Vyukov @ 2017-06-01 17:05 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Mark Rutland, Andrew Morton, Catalin Marinas, Will Deacon, LKML,
	kasan-dev, linux-mm, Alexander Potapenko, linux-arm-kernel,
	Yuri Gribov

On Thu, Jun 1, 2017 at 7:00 PM, Andrey Ryabinin <aryabinin@virtuozzo.com> wrote:
>
>
> On 06/01/2017 07:59 PM, Andrey Ryabinin wrote:
>>
>>
>> On 06/01/2017 07:52 PM, Mark Rutland wrote:
>>> On Thu, Jun 01, 2017 at 06:45:32PM +0200, Dmitry Vyukov wrote:
>>>> On Thu, Jun 1, 2017 at 6:34 PM, Mark Rutland <mark.rutland@arm.com> wrote:
>>>>> On Thu, Jun 01, 2017 at 07:23:37PM +0300, Andrey Ryabinin wrote:
>>>>>> We used to read several bytes of the shadow memory in advance.
>>>>>> Therefore additional shadow memory mapped to prevent crash if
>>>>>> speculative load would happen near the end of the mapped shadow memory.
>>>>>>
>>>>>> Now we don't have such speculative loads, so we no longer need to map
>>>>>> additional shadow memory.
>>>>>
>>>>> I see that patch 1 fixed up the Linux helpers for outline
>>>>> instrumentation.
>>>>>
>>>>> Just to check, is it also true that the inline instrumentation never
>>>>> performs unaligned accesses to the shadow memory?
>>>>
>>
>> Correct, inline instrumentation assumes that all accesses are properly aligned as it
>> required by C standard. I knew that the kernel violates this rule in many places,
>> therefore I decided to add checks for unaligned accesses in outline case.
>>
>>
>>>> Inline instrumentation generally accesses only a single byte.
>>>
>>> Sorry to be a little pedantic, but does that mean we'll never access the
>>> additional shadow, or does that mean it's very unlikely that we will?
>>>
>>> I'm guessing/hoping it's the former!
>>>
>>
>> Outline will never access additional shadow byte: https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm#unaligned-accesses
>
> s/Outline/inline  of course.


I suspect that actual implementations have diverged from that
description. Trying to follow asan_expand_check_ifn in:
https://gcc.gnu.org/viewcvs/gcc/trunk/gcc/asan.c?revision=246703&view=markup
but it's not trivial.

+Yuri, maybe you know off the top of your head if asan instrumentation
in gcc ever accesses off-by-one shadow byte (i.e. 1 byte after actual
object end)?

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64/kasan: don't allocate extra shadow memory
  2017-06-01 17:05             ` Dmitry Vyukov
@ 2017-06-01 17:38               ` Dmitry Vyukov
  0 siblings, 0 replies; 12+ messages in thread
From: Dmitry Vyukov @ 2017-06-01 17:38 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Mark Rutland, Andrew Morton, Catalin Marinas, Will Deacon, LKML,
	kasan-dev, linux-mm, Alexander Potapenko, linux-arm-kernel,
	Yuri Gribov

On Thu, Jun 1, 2017 at 7:05 PM, Dmitry Vyukov <dvyukov@google.com> wrote:
>>>>>>> We used to read several bytes of the shadow memory in advance.
>>>>>>> Therefore additional shadow memory mapped to prevent crash if
>>>>>>> speculative load would happen near the end of the mapped shadow memory.
>>>>>>>
>>>>>>> Now we don't have such speculative loads, so we no longer need to map
>>>>>>> additional shadow memory.
>>>>>>
>>>>>> I see that patch 1 fixed up the Linux helpers for outline
>>>>>> instrumentation.
>>>>>>
>>>>>> Just to check, is it also true that the inline instrumentation never
>>>>>> performs unaligned accesses to the shadow memory?
>>>>>
>>>
>>> Correct, inline instrumentation assumes that all accesses are properly aligned as it
>>> required by C standard. I knew that the kernel violates this rule in many places,
>>> therefore I decided to add checks for unaligned accesses in outline case.
>>>
>>>
>>>>> Inline instrumentation generally accesses only a single byte.
>>>>
>>>> Sorry to be a little pedantic, but does that mean we'll never access the
>>>> additional shadow, or does that mean it's very unlikely that we will?
>>>>
>>>> I'm guessing/hoping it's the former!
>>>>
>>>
>>> Outline will never access additional shadow byte: https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm#unaligned-accesses
>>
>> s/Outline/inline  of course.
>
>
> I suspect that actual implementations have diverged from that
> description. Trying to follow asan_expand_check_ifn in:
> https://gcc.gnu.org/viewcvs/gcc/trunk/gcc/asan.c?revision=246703&view=markup
> but it's not trivial.
>
> +Yuri, maybe you know off the top of your head if asan instrumentation
> in gcc ever accesses off-by-one shadow byte (i.e. 1 byte after actual
> object end)?

Thinking of this more. There is at least 1 case in user-space asan
where off-by-one shadow access would lead to similar crashes: for
mmap-ed regions we don't have redzones and map shadow only for the
region itself, so any off-by-one access would lead to crashes. So I
guess we are safe here. Or at least any crash would be gcc bug.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/4] mm/kasan: get rid of speculative shadow checks
  2017-06-01 16:23 [PATCH 1/4] mm/kasan: get rid of speculative shadow checks Andrey Ryabinin
                   ` (2 preceding siblings ...)
  2017-06-01 16:23 ` [PATCH 4/4] mm/kasan: Add support for memory hotplug Andrey Ryabinin
@ 2017-06-01 17:45 ` Dmitry Vyukov
  3 siblings, 0 replies; 12+ messages in thread
From: Dmitry Vyukov @ 2017-06-01 17:45 UTC (permalink / raw)
  To: Andrey Ryabinin
  Cc: Andrew Morton, Alexander Potapenko, kasan-dev, linux-mm, LKML

On Thu, Jun 1, 2017 at 6:23 PM, Andrey Ryabinin <aryabinin@virtuozzo.com> wrote:
> For some unaligned memory accesses we have to check additional
> byte of the shadow memory. Currently we load that byte speculatively
> to have only single load + branch on the optimistic fast path.
>
> However, this approach have some downsides:
>  - It's unaligned access, so this prevents porting KASAN on architectures
>     which doesn't support unaligned accesses.
>  - We have to map additional shadow page to prevent crash if
>     speculative load happens near the end of the mapped memory.
>     This would significantly complicate upcoming memory hotplug support.
>
> I wasn't able to notice any performance degradation with this patch.
> So these speculative loads is just a pain with no gain, let's remove
> them.
>
> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>


Acked-by: Dmitry Vyukov <dvyukov@google.com>

> ---
>  mm/kasan/kasan.c | 98 +++++++++-----------------------------------------------
>  1 file changed, 16 insertions(+), 82 deletions(-)
>
> diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
> index 85ee45b07615..e6fe07a98677 100644
> --- a/mm/kasan/kasan.c
> +++ b/mm/kasan/kasan.c
> @@ -134,94 +134,30 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
>         return false;
>  }
>
> -static __always_inline bool memory_is_poisoned_2(unsigned long addr)
> +static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
> +                                               unsigned long size)
>  {
> -       u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
> -
> -       if (unlikely(*shadow_addr)) {
> -               if (memory_is_poisoned_1(addr + 1))
> -                       return true;
> -
> -               /*
> -                * If single shadow byte covers 2-byte access, we don't
> -                * need to do anything more. Otherwise, test the first
> -                * shadow byte.
> -                */
> -               if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
> -                       return false;
> -
> -               return unlikely(*(u8 *)shadow_addr);
> -       }
> +       u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
>
> -       return false;
> -}
> -
> -static __always_inline bool memory_is_poisoned_4(unsigned long addr)
> -{
> -       u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
> -
> -       if (unlikely(*shadow_addr)) {
> -               if (memory_is_poisoned_1(addr + 3))
> -                       return true;
> -
> -               /*
> -                * If single shadow byte covers 4-byte access, we don't
> -                * need to do anything more. Otherwise, test the first
> -                * shadow byte.
> -                */
> -               if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
> -                       return false;
> -
> -               return unlikely(*(u8 *)shadow_addr);
> -       }
> -
> -       return false;
> -}
> -
> -static __always_inline bool memory_is_poisoned_8(unsigned long addr)
> -{
> -       u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
> -
> -       if (unlikely(*shadow_addr)) {
> -               if (memory_is_poisoned_1(addr + 7))
> -                       return true;
> -
> -               /*
> -                * If single shadow byte covers 8-byte access, we don't
> -                * need to do anything more. Otherwise, test the first
> -                * shadow byte.
> -                */
> -               if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
> -                       return false;
> -
> -               return unlikely(*(u8 *)shadow_addr);
> -       }
> +       /*
> +        * Access crosses 8(shadow size)-byte boundary. Such access maps
> +        * into 2 shadow bytes, so we need to check them both.
> +        */
> +       if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
> +               return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
>
> -       return false;
> +       return memory_is_poisoned_1(addr + size - 1);
>  }
>
>  static __always_inline bool memory_is_poisoned_16(unsigned long addr)
>  {
> -       u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
> -
> -       if (unlikely(*shadow_addr)) {
> -               u16 shadow_first_bytes = *(u16 *)shadow_addr;
> -
> -               if (unlikely(shadow_first_bytes))
> -                       return true;
> -
> -               /*
> -                * If two shadow bytes covers 16-byte access, we don't
> -                * need to do anything more. Otherwise, test the last
> -                * shadow byte.
> -                */
> -               if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
> -                       return false;
> +       u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
>
> -               return memory_is_poisoned_1(addr + 15);
> -       }
> +       /* Unaligned 16-bytes access maps into 3 shadow bytes. */
> +       if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
> +               return *shadow_addr || memory_is_poisoned_1(addr + 15);
>
> -       return false;
> +       return *shadow_addr;
>  }
>
>  static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
> @@ -292,11 +228,9 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
>                 case 1:
>                         return memory_is_poisoned_1(addr);
>                 case 2:
> -                       return memory_is_poisoned_2(addr);
>                 case 4:
> -                       return memory_is_poisoned_4(addr);
>                 case 8:
> -                       return memory_is_poisoned_8(addr);
> +                       return memory_is_poisoned_2_4_8(addr, size);
>                 case 16:
>                         return memory_is_poisoned_16(addr);
>                 default:
> --
> 2.13.0
>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2017-06-01 17:46 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-06-01 16:23 [PATCH 1/4] mm/kasan: get rid of speculative shadow checks Andrey Ryabinin
2017-06-01 16:23 ` [PATCH 2/4] x86/kasan: don't allocate extra shadow memory Andrey Ryabinin
2017-06-01 16:23 ` [PATCH 3/4] arm64/kasan: " Andrey Ryabinin
2017-06-01 16:34   ` Mark Rutland
2017-06-01 16:45     ` Dmitry Vyukov
2017-06-01 16:52       ` Mark Rutland
2017-06-01 16:59         ` Andrey Ryabinin
2017-06-01 17:00           ` Andrey Ryabinin
2017-06-01 17:05             ` Dmitry Vyukov
2017-06-01 17:38               ` Dmitry Vyukov
2017-06-01 16:23 ` [PATCH 4/4] mm/kasan: Add support for memory hotplug Andrey Ryabinin
2017-06-01 17:45 ` [PATCH 1/4] mm/kasan: get rid of speculative shadow checks Dmitry Vyukov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).