All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mm/x86: AMD Bulldozer ASLR fix
@ 2015-03-24 18:00 Hector Marco-Gisbert
  2015-03-24 19:15 ` Borislav Petkov
  0 siblings, 1 reply; 15+ messages in thread
From: Hector Marco-Gisbert @ 2015-03-24 18:00 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, Thomas Gleixner, Ingo Molnar, H. Peter Anvin, x86,
	Alexander Viro, Jan-Simon, linux-fsdevel, Hector Marco-Gisbert,
	Ismael Ripoll

A bug in Linux ASLR implementation which affects some AMD processors has been
found. The issue affects to all Linux process even if they are not using
shared libraries (statically compiled).  
 
The problem appears because some mmapped objects (VDSO, libraries, etc.)
are poorly randomized in an attempt to avoid cache aliasing penalties for
AMD Bulldozer (Family 15h) processors.

Affected systems have reduced the mmapped files entropy by eight. 

The following output is the run on an AMD Opteron 62xx class CPU processor 
under x86_64 Linux 4.0.0: 

for i in `seq 1 10`; do cat /proc/self/maps | grep "r-xp.*libc" ; done
b7588000-b7736000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b7570000-b771e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b75d0000-b777e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b75b0000-b775e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b7578000-b7726000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6


As shown in the previous output, the bits 12, 13 and 14 are always 0. The address
always ends in 0x8000 or 0x0000.

The bug is caused by a hack to improve performance by avoiding cache aliasing
penalties in the Family 15h of AMD Bulldozer processors (commit: dfb09f9b).

32-bit systems are specially sensitive to this issue because the entropy for
libraries is reduced from 2^8 to 2^5, which means that libraries only have 32
different places where they can be loaded.

This patch randomizes per boot the three affected bits, rather than setting
them to zero. Since all the shared pages have the same value at the bits
[12..14], there is no cache aliasing problems (which is supposed to be the
cause of performance loss). On the other hand, since the value is not
known by a potential remote attacker, the ASLR preserves its effectiveness. 

More details at:

http://hmarco.org/bugs/AMD-Bulldozer-linux-ASLR-weakness-reducing-mmaped-files-by-eight.html


Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
Signed-off-by: Ismael Ripoll <iripoll@disca.upv.es>
---
 arch/x86/include/asm/amd_15h.h |  6 ++++++
 arch/x86/kernel/cpu/amd.c      |  5 +++++
 arch/x86/kernel/sys_x86_64.c   | 16 +++++++++++++---
 3 files changed, 24 insertions(+), 3 deletions(-)
 create mode 100644 arch/x86/include/asm/amd_15h.h

diff --git a/arch/x86/include/asm/amd_15h.h b/arch/x86/include/asm/amd_15h.h
new file mode 100644
index 0000000..3e6fb6e
--- /dev/null
+++ b/arch/x86/include/asm/amd_15h.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_AMD_15H_H
+#define _ASM_X86_AMD_15H_H
+
+extern unsigned long rnd_bulldozer_bits __read_mostly;
+
+#endif /* _ASM_X86_AMD_15H_H */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 15c5df9..a693d54 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -5,6 +5,7 @@
 
 #include <linux/io.h>
 #include <linux/sched.h>
+#include <linux/random.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
@@ -18,6 +19,8 @@
 
 #include "cpu.h"
 
+unsigned long rnd_bulldozer_bits = 0;
+
 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
 {
 	u32 gprs[8] = { 0 };
@@ -488,6 +491,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
+		/* A random value per boot for bits 12,13 and 14 */
+		rnd_bulldozer_bits = get_random_int() & va_align.mask;
 	}
 }
 
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 30277e2..5b8ad01 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -18,6 +18,7 @@
 
 #include <asm/ia32.h>
 #include <asm/syscalls.h>
+#include <asm/amd_15h.h>
 
 /*
  * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
@@ -34,10 +35,16 @@ static unsigned long get_align_mask(void)
 	return va_align.mask;
 }
 
+static unsigned long get_bulldozer_bits(void){
+
+	return rnd_bulldozer_bits & get_align_mask();
+}
+
 unsigned long align_vdso_addr(unsigned long addr)
 {
 	unsigned long align_mask = get_align_mask();
-	return (addr + align_mask) & ~align_mask;
+	addr = (addr + align_mask) & ~align_mask;
+	return addr | get_bulldozer_bits();
 }
 
 static int __init control_va_addr_alignment(char *str)
@@ -137,7 +144,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 	info.high_limit = end;
 	info.align_mask = filp ? get_align_mask() : 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
-	return vm_unmapped_area(&info);
+	addr = vm_unmapped_area(&info);
+	if (!(addr & ~PAGE_MASK))
+		return filp ? (addr|get_bulldozer_bits()) : addr;
+	return addr;
 }
 
 unsigned long
@@ -178,7 +188,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 	info.align_offset = pgoff << PAGE_SHIFT;
 	addr = vm_unmapped_area(&info);
 	if (!(addr & ~PAGE_MASK))
-		return addr;
+		return filp ? (addr|get_bulldozer_bits()) : addr;
 	VM_BUG_ON(addr != -ENOMEM);
 
 bottomup:
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-24 18:00 [PATCH] mm/x86: AMD Bulldozer ASLR fix Hector Marco-Gisbert
@ 2015-03-24 19:15 ` Borislav Petkov
  2015-03-25 18:29   ` Hector Marco
  2015-03-25 18:36   ` Hector Marco-Gisbert
  0 siblings, 2 replies; 15+ messages in thread
From: Borislav Petkov @ 2015-03-24 19:15 UTC (permalink / raw)
  To: Hector Marco-Gisbert
  Cc: linux-kernel, akpm, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	x86, Alexander Viro, Jan-Simon, linux-fsdevel, Ismael Ripoll

On Tue, Mar 24, 2015 at 07:00:48PM +0100, Hector Marco-Gisbert wrote:
> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
> index 15c5df9..a693d54 100644
> --- a/arch/x86/kernel/cpu/amd.c
> +++ b/arch/x86/kernel/cpu/amd.c
> @@ -5,6 +5,7 @@
>  
>  #include <linux/io.h>
>  #include <linux/sched.h>
> +#include <linux/random.h>
>  #include <asm/processor.h>
>  #include <asm/apic.h>
>  #include <asm/cpu.h>
> @@ -18,6 +19,8 @@
>  
>  #include "cpu.h"
>  
> +unsigned long rnd_bulldozer_bits = 0;
> +
>  static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
>  {
>  	u32 gprs[8] = { 0 };
> @@ -488,6 +491,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
>  
>  		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
>  		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
> +		/* A random value per boot for bits 12,13 and 14 */
> +		rnd_bulldozer_bits = get_random_int() & va_align.mask;

Hmm, this should be done differently:

va_align should have a ->bits member which gets ORed in into the hole
made my va_align.mask...

>  	}
>  }
>  
> diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
> index 30277e2..5b8ad01 100644
> --- a/arch/x86/kernel/sys_x86_64.c
> +++ b/arch/x86/kernel/sys_x86_64.c
> @@ -18,6 +18,7 @@
>  
>  #include <asm/ia32.h>
>  #include <asm/syscalls.h>
> +#include <asm/amd_15h.h>
>  
>  /*
>   * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
> @@ -34,10 +35,16 @@ static unsigned long get_align_mask(void)
>  	return va_align.mask;
>  }
>  
> +static unsigned long get_bulldozer_bits(void){
> +
> +	return rnd_bulldozer_bits & get_align_mask();
> +}
> +
>  unsigned long align_vdso_addr(unsigned long addr)
>  {
>  	unsigned long align_mask = get_align_mask();
> -	return (addr + align_mask) & ~align_mask;
> +	addr = (addr + align_mask) & ~align_mask;
> +	return addr | get_bulldozer_bits();
>  }
>  
>  static int __init control_va_addr_alignment(char *str)
> @@ -137,7 +144,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
>  	info.high_limit = end;
>  	info.align_mask = filp ? get_align_mask() : 0;

	info.align_bits = get_align_bits() : 0;

>  	info.align_offset = pgoff << PAGE_SHIFT;
> -	return vm_unmapped_area(&info);
> +	addr = vm_unmapped_area(&info);
> +	if (!(addr & ~PAGE_MASK))
> +		return filp ? (addr|get_bulldozer_bits()) : addr;
> +	return addr;
>  }
>  
>  unsigned long
> @@ -178,7 +188,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
>  	info.align_offset = pgoff << PAGE_SHIFT;
>  	addr = vm_unmapped_area(&info);
>  	if (!(addr & ~PAGE_MASK))
> -		return addr;
> +		return filp ? (addr|get_bulldozer_bits()) : addr;

Ditto.

-- 
Regards/Gruss,
    Boris.

ECO tip #101: Trim your mails when you reply.
--

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-24 19:15 ` Borislav Petkov
@ 2015-03-25 18:29   ` Hector Marco
  2015-03-25 18:36   ` Hector Marco-Gisbert
  1 sibling, 0 replies; 15+ messages in thread
From: Hector Marco @ 2015-03-25 18:29 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: linux-kernel, akpm, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	x86, Alexander Viro, Jan-Simon, linux-fsdevel, Ismael Ripoll,
	Kees Cook - ASLRv3



El 24/03/15 a las 20:15, Borislav Petkov escribió:
> On Tue, Mar 24, 2015 at 07:00:48PM +0100, Hector Marco-Gisbert wrote:
>> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
>> index 15c5df9..a693d54 100644
>> --- a/arch/x86/kernel/cpu/amd.c
>> +++ b/arch/x86/kernel/cpu/amd.c
>> @@ -5,6 +5,7 @@
>>
>>   #include <linux/io.h>
>>   #include <linux/sched.h>
>> +#include <linux/random.h>
>>   #include <asm/processor.h>
>>   #include <asm/apic.h>
>>   #include <asm/cpu.h>
>> @@ -18,6 +19,8 @@
>>
>>   #include "cpu.h"
>>
>> +unsigned long rnd_bulldozer_bits = 0;
>> +
>>   static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
>>   {
>>   	u32 gprs[8] = { 0 };
>> @@ -488,6 +491,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
>>
>>   		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
>>   		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
>> +		/* A random value per boot for bits 12,13 and 14 */
>> +		rnd_bulldozer_bits = get_random_int() & va_align.mask;
>
> Hmm, this should be done differently:
>
> va_align should have a ->bits member which gets ORed in into the hole
> made my va_align.mask...
>

Yes, It looks better using va_align.

>>   	}
>>   }
>>
>> diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
>> index 30277e2..5b8ad01 100644
>> --- a/arch/x86/kernel/sys_x86_64.c
>> +++ b/arch/x86/kernel/sys_x86_64.c
>> @@ -18,6 +18,7 @@
>>
>>   #include <asm/ia32.h>
>>   #include <asm/syscalls.h>
>> +#include <asm/amd_15h.h>
>>
>>   /*
>>    * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
>> @@ -34,10 +35,16 @@ static unsigned long get_align_mask(void)
>>   	return va_align.mask;
>>   }
>>
>> +static unsigned long get_bulldozer_bits(void){
>> +
>> +	return rnd_bulldozer_bits & get_align_mask();
>> +}
>> +
>>   unsigned long align_vdso_addr(unsigned long addr)
>>   {
>>   	unsigned long align_mask = get_align_mask();
>> -	return (addr + align_mask) & ~align_mask;
>> +	addr = (addr + align_mask) & ~align_mask;
>> +	return addr | get_bulldozer_bits();
>>   }
>>
>>   static int __init control_va_addr_alignment(char *str)
>> @@ -137,7 +144,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
>>   	info.high_limit = end;
>>   	info.align_mask = filp ? get_align_mask() : 0;
>
> 	info.align_bits = get_align_bits() : 0;
>

I see your point. The drawback of adding a new field (align_bits) to the info 
struct is that all architectures need to initialize the info.align_bits. In 
addition, the generic functions unmapped_area()/unmapped_area_topdown() in file 
mm/mmap.c, need to be modified to set the bits [12..14] using the new field 
info.align_bits.

A possible alternative which does not add a new field, is to use the 
"align_offset" variable. By adding the "get_align_bits()" value to the 
"align_offset" the bits [12..14] are randomized per boot.

Patch coming.
Hector.

>>   	info.align_offset = pgoff << PAGE_SHIFT;
>> -	return vm_unmapped_area(&info);
>> +	addr = vm_unmapped_area(&info);
>> +	if (!(addr & ~PAGE_MASK))
>> +		return filp ? (addr|get_bulldozer_bits()) : addr;
>> +	return addr;
>>   }
>>
>>   unsigned long
>> @@ -178,7 +188,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
>>   	info.align_offset = pgoff << PAGE_SHIFT;
>>   	addr = vm_unmapped_area(&info);
>>   	if (!(addr & ~PAGE_MASK))
>> -		return addr;
>> +		return filp ? (addr|get_bulldozer_bits()) : addr;
>
> Ditto.
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-24 19:15 ` Borislav Petkov
  2015-03-25 18:29   ` Hector Marco
@ 2015-03-25 18:36   ` Hector Marco-Gisbert
  2015-03-26 19:08     ` Borislav Petkov
  1 sibling, 1 reply; 15+ messages in thread
From: Hector Marco-Gisbert @ 2015-03-25 18:36 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: linux-kernel, akpm, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	x86, Alexander Viro, Jan-Simon, linux-fsdevel, kees Cook,
	Hector Marco-Gisbert, Ismael Ripoll

A bug in Linux ASLR implementation which affects some AMD processors has been
found. The issue affects to all Linux process even if they are not using
shared libraries (statically compiled).

The problem appears because some mmapped objects (VDSO, libraries, etc.)
are poorly randomized in an attempt to avoid cache aliasing penalties for
AMD Bulldozer (Family 15h) processors.

Affected systems have reduced the mmapped files entropy by eight.

The following output is the run on an AMD Opteron 62xx class CPU processor
under x86_64 Linux 4.0.0:

for i in `seq 1 10`; do cat /proc/self/maps | grep "r-xp.*libc" ; done
b7588000-b7736000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b7570000-b771e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b75d0000-b777e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b75b0000-b775e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b7578000-b7726000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6

As shown in the previous output, the bits 12, 13 and 14 are always 0. The address
always ends in 0x8000 or 0x0000.

The bug is caused by a hack to improve performance by avoiding cache aliasing
penalties in the Family 15h of AMD Bulldozer processors (commit: dfb09f9b).

32-bit systems are specially sensitive to this issue because the entropy for
libraries is reduced from 2^8 to 2^5, which means that libraries only have 32
different places where they can be loaded.

This patch randomizes per boot the three affected bits, rather than setting
them to zero. Since all the shared pages have the same value at the bits
[12..14], there is no cache aliasing problems (which is supposed to be the
cause of performance loss). On the other hand, since the value is not
known by a potential remote attacker, the ASLR preserves its effectiveness.

More details at:

http://hmarco.org/bugs/AMD-Bulldozer-linux-ASLR-weakness-reducing-mmaped-files-by-eight.html


Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
Signed-off-by: Ismael Ripoll <iripoll@disca.upv.es>
---
 arch/x86/include/asm/elf.h   |  1 +
 arch/x86/kernel/cpu/amd.c    |  3 +++
 arch/x86/kernel/sys_x86_64.c | 20 +++++++++++++++++---
 3 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index ca3347a..bd292ce 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -365,6 +365,7 @@ enum align_flags {
 struct va_alignment {
 	int flags;
 	unsigned long mask;
+	unsigned long bits;
 } ____cacheline_aligned;
 
 extern struct va_alignment va_align;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 15c5df9..45a41be 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -5,6 +5,7 @@
 
 #include <linux/io.h>
 #include <linux/sched.h>
+#include <linux/random.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
@@ -488,6 +489,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
+		/* A random value per boot for bits 12,13 and 14 */
+		va_align.bits = get_random_int() & va_align.mask;
 	}
 }
 
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 30277e2..d38905d 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -34,10 +34,16 @@ static unsigned long get_align_mask(void)
 	return va_align.mask;
 }
 
+static unsigned long get_align_bits(void){
+
+	return va_align.bits & get_align_mask();
+}
+
 unsigned long align_vdso_addr(unsigned long addr)
 {
 	unsigned long align_mask = get_align_mask();
-	return (addr + align_mask) & ~align_mask;
+	addr = (addr + align_mask) & ~align_mask;
+	return addr | get_align_bits();
 }
 
 static int __init control_va_addr_alignment(char *str)
@@ -135,8 +141,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 	info.length = len;
 	info.low_limit = begin;
 	info.high_limit = end;
-	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	if(filp){
+		info.align_mask = get_align_mask();
+		info.align_offset += get_align_bits();
+	}
 	return vm_unmapped_area(&info);
 }
 
@@ -174,8 +184,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 	info.length = len;
 	info.low_limit = PAGE_SIZE;
 	info.high_limit = mm->mmap_base;
-	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	if(filp){
+		info.align_mask = get_align_mask();
+		info.align_offset += get_align_bits();
+	}
 	addr = vm_unmapped_area(&info);
 	if (!(addr & ~PAGE_MASK))
 		return addr;
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-25 18:36   ` Hector Marco-Gisbert
@ 2015-03-26 19:08     ` Borislav Petkov
  2015-03-27 11:38       ` Hector Marco-Gisbert
  0 siblings, 1 reply; 15+ messages in thread
From: Borislav Petkov @ 2015-03-26 19:08 UTC (permalink / raw)
  To: Hector Marco-Gisbert
  Cc: linux-kernel, akpm, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	x86, Alexander Viro, Jan-Simon, linux-fsdevel, kees Cook,
	Ismael Ripoll

On Wed, Mar 25, 2015 at 07:36:17PM +0100, Hector Marco-Gisbert wrote:
> A bug in Linux ASLR implementation which affects some AMD processors has been
> found. The issue affects to all Linux process even if they are not using
> shared libraries (statically compiled).
> 
> The problem appears because some mmapped objects (VDSO, libraries, etc.)
> are poorly randomized in an attempt to avoid cache aliasing penalties for
> AMD Bulldozer (Family 15h) processors.
> 
> Affected systems have reduced the mmapped files entropy by eight.
> 
> The following output is the run on an AMD Opteron 62xx class CPU processor
> under x86_64 Linux 4.0.0:
> 
> for i in `seq 1 10`; do cat /proc/self/maps | grep "r-xp.*libc" ; done
> b7588000-b7736000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
> b7570000-b771e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
> b75d0000-b777e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
> b75b0000-b775e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
> b7578000-b7726000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
> 
> As shown in the previous output, the bits 12, 13 and 14 are always 0. The address
> always ends in 0x8000 or 0x0000.
> 
> The bug is caused by a hack to improve performance by avoiding cache aliasing
> penalties in the Family 15h of AMD Bulldozer processors (commit: dfb09f9b).
> 
> 32-bit systems are specially sensitive to this issue because the entropy for
> libraries is reduced from 2^8 to 2^5, which means that libraries only have 32
> different places where they can be loaded.
> 
> This patch randomizes per boot the three affected bits, rather than setting
> them to zero. Since all the shared pages have the same value at the bits
> [12..14], there is no cache aliasing problems (which is supposed to be the
> cause of performance loss). On the other hand, since the value is not
> known by a potential remote attacker, the ASLR preserves its effectiveness.
> 
> More details at:
> 
> http://hmarco.org/bugs/AMD-Bulldozer-linux-ASLR-weakness-reducing-mmaped-files-by-eight.html
> 
> 
> Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
> Signed-off-by: Ismael Ripoll <iripoll@disca.upv.es>
> ---
>  arch/x86/include/asm/elf.h   |  1 +
>  arch/x86/kernel/cpu/amd.c    |  3 +++
>  arch/x86/kernel/sys_x86_64.c | 20 +++++++++++++++++---
>  3 files changed, 21 insertions(+), 3 deletions(-)

Please run your patch through checkpatch to address minor coding style
issues.

> diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
> index ca3347a..bd292ce 100644
> --- a/arch/x86/include/asm/elf.h
> +++ b/arch/x86/include/asm/elf.h
> @@ -365,6 +365,7 @@ enum align_flags {
>  struct va_alignment {
>  	int flags;
>  	unsigned long mask;
> +	unsigned long bits;
>  } ____cacheline_aligned;
>  
>  extern struct va_alignment va_align;
> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
> index 15c5df9..45a41be 100644
> --- a/arch/x86/kernel/cpu/amd.c
> +++ b/arch/x86/kernel/cpu/amd.c
> @@ -5,6 +5,7 @@
>  
>  #include <linux/io.h>
>  #include <linux/sched.h>
> +#include <linux/random.h>
>  #include <asm/processor.h>
>  #include <asm/apic.h>
>  #include <asm/cpu.h>
> @@ -488,6 +489,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
>  
>  		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
>  		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;

Newline here.

> +		/* A random value per boot for bits 12,13 and 14 */

		"A random value for bit slice [12:upper_bit)"

the interval end is open.

> +		va_align.bits = get_random_int() & va_align.mask;
>  	}
>  }
>  
> diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
> index 30277e2..d38905d 100644
> --- a/arch/x86/kernel/sys_x86_64.c
> +++ b/arch/x86/kernel/sys_x86_64.c
> @@ -34,10 +34,16 @@ static unsigned long get_align_mask(void)
>  	return va_align.mask;
>  }

Please add a short comment here over this function explaining why we're
doing this and thus what the value that goes into ->align_offset below
represents.

> +static unsigned long get_align_bits(void){
> +
> +	return va_align.bits & get_align_mask();
> +}
> +

...

Thanks.

-- 
Regards/Gruss,
    Boris.

ECO tip #101: Trim your mails when you reply.
--

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-26 19:08     ` Borislav Petkov
@ 2015-03-27 11:38       ` Hector Marco-Gisbert
  2015-03-27 12:14         ` Ingo Molnar
                           ` (2 more replies)
  0 siblings, 3 replies; 15+ messages in thread
From: Hector Marco-Gisbert @ 2015-03-27 11:38 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: linux-kernel, akpm, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	x86, Alexander Viro, Jan-Simon, linux-fsdevel, kees Cook,
	Hector Marco-Gisbert, Ismael Ripoll

A bug in Linux ASLR implementation which affects some AMD processors has been
found. The issue affects to all Linux process even if they are not using
shared libraries (statically compiled).

The problem appears because some mmapped objects (VDSO, libraries, etc.)
are poorly randomized in an attempt to avoid cache aliasing penalties for
AMD Bulldozer (Family 15h) processors.

Affected systems have reduced the mmapped files entropy by eight.

The following output is the run on an AMD Opteron 62xx class CPU processor
under x86_64 Linux 4.0.0:

for i in `seq 1 10`; do cat /proc/self/maps | grep "r-xp.*libc" ; done
b7588000-b7736000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b7570000-b771e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b75d0000-b777e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b75b0000-b775e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
b7578000-b7726000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6

As shown in the previous output, the bits 12, 13 and 14 are always 0. The address
always ends in 0x8000 or 0x0000.

The bug is caused by a hack to improve performance by avoiding cache aliasing
penalties in the Family 15h of AMD Bulldozer processors (commit: dfb09f9b).

32-bit systems are specially sensitive to this issue because the entropy for
libraries is reduced from 2^8 to 2^5, which means that libraries only have 32
different places where they can be loaded.

This patch randomizes per boot the three affected bits, rather than setting
them to zero. Since all the shared pages have the same value at the bits
[12..14], there is no cache aliasing problems (which is supposed to be the
cause of performance loss). On the other hand, since the value is not
known by a potential remote attacker, the ASLR preserves its effectiveness.

More details at:

http://hmarco.org/bugs/AMD-Bulldozer-linux-ASLR-weakness-reducing-mmaped-files-by-eight.html


Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
Signed-off-by: Ismael Ripoll <iripoll@disca.upv.es>
---
 arch/x86/include/asm/elf.h   |  1 +
 arch/x86/kernel/cpu/amd.c    |  4 ++++
 arch/x86/kernel/sys_x86_64.c | 29 ++++++++++++++++++++++++++---
 3 files changed, 31 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index ca3347a..bd292ce 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -365,6 +365,7 @@ enum align_flags {
 struct va_alignment {
 	int flags;
 	unsigned long mask;
+	unsigned long bits;
 } ____cacheline_aligned;
 
 extern struct va_alignment va_align;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 15c5df9..b4d0ddd 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -5,6 +5,7 @@
 
 #include <linux/io.h>
 #include <linux/sched.h>
+#include <linux/random.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
@@ -488,6 +489,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
+
+		/* A random value per boot for bit slice [12:upper_bit) */
+		va_align.bits = get_random_int() & va_align.mask;
 	}
 }
 
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 30277e2..5b3e66e 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -34,10 +34,25 @@ static unsigned long get_align_mask(void)
 	return va_align.mask;
 }
 
+/*
+ * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
+ * va_align.mask, [12:upper_bit), are set to a random value instead of zeroing
+ * them. This random value is computed once per boot. This form of ASLR is known
+ * as "per-boot ASLR".
+ *
+ * To achieve this, the random value is added to the info.align_offset value
+ * before calling vm_unmapped_area() or ORed directly to the address.
+ */
+static unsigned long get_align_bits(void)
+{
+	return va_align.bits & get_align_mask();
+}
+
 unsigned long align_vdso_addr(unsigned long addr)
 {
 	unsigned long align_mask = get_align_mask();
-	return (addr + align_mask) & ~align_mask;
+	addr = (addr + align_mask) & ~align_mask;
+	return addr | get_align_bits();
 }
 
 static int __init control_va_addr_alignment(char *str)
@@ -135,8 +150,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 	info.length = len;
 	info.low_limit = begin;
 	info.high_limit = end;
-	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	if (filp) {
+		info.align_mask = get_align_mask();
+		info.align_offset += get_align_bits();
+	}
 	return vm_unmapped_area(&info);
 }
 
@@ -174,8 +193,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 	info.length = len;
 	info.low_limit = PAGE_SIZE;
 	info.high_limit = mm->mmap_base;
-	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	if (filp) {
+		info.align_mask = get_align_mask();
+		info.align_offset += get_align_bits();
+	}
 	addr = vm_unmapped_area(&info);
 	if (!(addr & ~PAGE_MASK))
 		return addr;
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-27 11:38       ` Hector Marco-Gisbert
@ 2015-03-27 12:14         ` Ingo Molnar
  2015-03-27 12:35           ` Borislav Petkov
  2015-03-27 14:44         ` Borislav Petkov
  2015-03-31 12:37         ` [tip:x86/mm] x86/mm: Improve AMD Bulldozer ASLR workaround tip-bot for Hector Marco-Gisbert
  2 siblings, 1 reply; 15+ messages in thread
From: Ingo Molnar @ 2015-03-27 12:14 UTC (permalink / raw)
  To: Hector Marco-Gisbert
  Cc: Borislav Petkov, linux-kernel, akpm, Thomas Gleixner,
	Ingo Molnar, H. Peter Anvin, x86, Alexander Viro, Jan-Simon,
	linux-fsdevel, kees Cook, Ismael Ripoll


* Hector Marco-Gisbert <hecmargi@upv.es> wrote:

> A bug in Linux ASLR implementation which affects some AMD processors 
> has been found. The issue affects to all Linux process even if they 
> are not using shared libraries (statically compiled).

It's not a bug, it's a feature: to work around a Bulldozer cache 
aliasing performance problem we have to keep bits 12,13,14 equal for 
all mappings in the system.

Your patch improves upon that fix: by per-boot randomizing the 
constant portion of the randomized range.

Btw., does anyone know how relevant the performance fix is these days? 
A simpler improvement would be to remove the workaround altogether and 
recover proper randomization of bits 12,13,14.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-27 12:14         ` Ingo Molnar
@ 2015-03-27 12:35           ` Borislav Petkov
  0 siblings, 0 replies; 15+ messages in thread
From: Borislav Petkov @ 2015-03-27 12:35 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Hector Marco-Gisbert, linux-kernel, akpm, Thomas Gleixner,
	Ingo Molnar, H. Peter Anvin, x86, Alexander Viro, Jan-Simon,
	linux-fsdevel, kees Cook, Ismael Ripoll

On Fri, Mar 27, 2015 at 01:14:48PM +0100, Ingo Molnar wrote:
> It's not a bug, it's a feature: to work around a Bulldozer cache
> aliasing performance problem we have to keep bits 12,13,14 equal for
> all mappings in the system.
>
> Your patch improves upon that fix: by per-boot randomizing the
> constant portion of the randomized range.

I have this one on the TODO list for today, I'll take care of those
formulations when applying.

> Btw., does anyone know how relevant the performance fix is these days?
> A simpler improvement would be to remove the workaround altogether and
> recover proper randomization of bits 12,13,14.

Nothing has changed with that respect in F15h, i.e. Bulldozer uarch. I
think we still need this workaround in place.

-- 
Regards/Gruss,
    Boris.

ECO tip #101: Trim your mails when you reply.
--

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-27 11:38       ` Hector Marco-Gisbert
  2015-03-27 12:14         ` Ingo Molnar
@ 2015-03-27 14:44         ` Borislav Petkov
  2015-03-27 15:06           ` Hector Marco-Gisbert
                             ` (2 more replies)
  2015-03-31 12:37         ` [tip:x86/mm] x86/mm: Improve AMD Bulldozer ASLR workaround tip-bot for Hector Marco-Gisbert
  2 siblings, 3 replies; 15+ messages in thread
From: Borislav Petkov @ 2015-03-27 14:44 UTC (permalink / raw)
  To: Hector Marco-Gisbert
  Cc: linux-kernel, akpm, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	x86, Alexander Viro, Jan-Simon, linux-fsdevel, kees Cook,
	Ismael Ripoll

On Fri, Mar 27, 2015 at 12:38:21PM +0100, Hector Marco-Gisbert wrote:
> A bug in Linux ASLR implementation which affects some AMD processors has been
> found. The issue affects to all Linux process even if they are not using
> shared libraries (statically compiled).

...

> Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
> Signed-off-by: Ismael Ripoll <iripoll@disca.upv.es>

How am I to interpret Ismael's SOB here?

Did he write the patch, did he create it, ...?

Because this SOB chain is incorrect in this form. We have only one
author per commit. If you want to accredit Ismael, you can say:

Originally-by: Ismael
Based-on-a-patch-by: Ismael
Suggested-by: ...

and so on.

Alternatively, you can describe in free text his involvement and thus
have the proper attribution.

Pending this, I have this final version queued:

---
From: Hector Marco-Gisbert <hecmargi@upv.es>
Date: Fri, 27 Mar 2015 12:38:21 +0100
Subject: [PATCH] x86/mm: Improve AMD Bulldozer ASLR fix

The ASLR implementation needs to special-case AMD F15h processors by
clearing out bits [14:12] of the virtual address in order to avoid I$
cross invalidations and thus performance penalty for certain workloads.
For details, see:

  dfb09f9b7ab0 ("x86, amd: Avoid cache aliasing penalties on AMD family 15h")

This special case reduces the mmapped files entropy by eight.

The following output is the run on an AMD Opteron 62xx class CPU
processor under x86_64 Linux 4.0.0:

  $ for i in `seq 1 10`; do cat /proc/self/maps | grep "r-xp.*libc" ; done
  b7588000-b7736000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  b7570000-b771e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  b75d0000-b777e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  b75b0000-b775e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  b7578000-b7726000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  ...

Bits [12:14] are always 0, i.e. the address always ends in 0x8000 or
0x0000.

32-bit systems, as in the example above, are especially sensitive
to this issue because 32-bit randomness for VA space is 8 bits (see
mmap_rnd()). With the Bulldozer special case, this diminishes to only 32
different slots of mmap virtual addresses.

This patch randomizes per boot the three affected bits rather than
setting them to zero. Since all the shared pages have the same value
at bits [12..14], there is no cache aliasing problems. This value gets
generated during system boot and it is thus not known to a potential
remote attacker. Therefore, the impact from the Bulldozer workaround
gets diminished and ASLR randomness increased.

More details at:

  http://hmarco.org/bugs/AMD-Bulldozer-linux-ASLR-weakness-reducing-mmaped-files-by-eight.html

Original white paper by AMD dealing with the issue:

  http://developer.amd.com/wordpress/media/2012/10/SharedL1InstructionCacheonAMD15hCPU.pdf

Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86-ml <x86@kernel.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Jan-Simon <dl9pf@gmx.de>
Cc: linux-fsdevel@vger.kernel.org
Cc: Kees Cook <keescook@chromium.org>
Link: http://lkml.kernel.org/r/1427456301-3764-1-git-send-email-hecmargi@upv.es
Signed-off-by: Ismael Ripoll <iripoll@disca.upv.es>
Signed-off-by: Borislav Petkov <bp@suse.de>
---
 arch/x86/include/asm/elf.h   |  1 +
 arch/x86/kernel/cpu/amd.c    |  4 ++++
 arch/x86/kernel/sys_x86_64.c | 30 +++++++++++++++++++++++++++---
 3 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index ca3347a9dab5..bd292ce9be0a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -365,6 +365,7 @@ enum align_flags {
 struct va_alignment {
 	int flags;
 	unsigned long mask;
+	unsigned long bits;
 } ____cacheline_aligned;
 
 extern struct va_alignment va_align;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index a220239cea65..ec6a61b21b41 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -5,6 +5,7 @@
 
 #include <linux/io.h>
 #include <linux/sched.h>
+#include <linux/random.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
@@ -488,6 +489,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
+
+		/* A random value per boot for bit slice [12:upper_bit) */
+		va_align.bits = get_random_int() & va_align.mask;
 	}
 }
 
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 30277e27431a..10e0272d789a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -34,10 +34,26 @@ static unsigned long get_align_mask(void)
 	return va_align.mask;
 }
 
+/*
+ * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
+ * va_align.bits, [12:upper_bit), are set to a random value instead of
+ * zeroing them. This random value is computed once per boot. This form
+ * of ASLR is known as "per-boot ASLR".
+ *
+ * To achieve this, the random value is added to the info.align_offset
+ * value before calling vm_unmapped_area() or ORed directly to the
+ * address.
+ */
+static unsigned long get_align_bits(void)
+{
+	return va_align.bits & get_align_mask();
+}
+
 unsigned long align_vdso_addr(unsigned long addr)
 {
 	unsigned long align_mask = get_align_mask();
-	return (addr + align_mask) & ~align_mask;
+	addr = (addr + align_mask) & ~align_mask;
+	return addr | get_align_bits();
 }
 
 static int __init control_va_addr_alignment(char *str)
@@ -135,8 +151,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 	info.length = len;
 	info.low_limit = begin;
 	info.high_limit = end;
-	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	if (filp) {
+		info.align_mask = get_align_mask();
+		info.align_offset += get_align_bits();
+	}
 	return vm_unmapped_area(&info);
 }
 
@@ -174,8 +194,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 	info.length = len;
 	info.low_limit = PAGE_SIZE;
 	info.high_limit = mm->mmap_base;
-	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	if (filp) {
+		info.align_mask = get_align_mask();
+		info.align_offset += get_align_bits();
+	}
 	addr = vm_unmapped_area(&info);
 	if (!(addr & ~PAGE_MASK))
 		return addr;
-- 
2.3.3

-- 
Regards/Gruss,
    Boris.

ECO tip #101: Trim your mails when you reply.
--

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-27 14:44         ` Borislav Petkov
@ 2015-03-27 15:06           ` Hector Marco-Gisbert
  2015-03-28 13:10           ` Kees Cook
  2015-03-29  8:51           ` Ingo Molnar
  2 siblings, 0 replies; 15+ messages in thread
From: Hector Marco-Gisbert @ 2015-03-27 15:06 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: linux-kernel, akpm, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	x86, Alexander Viro, Jan-Simon, linux-fsdevel, kees Cook,
	Ismael Ripoll


>> Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
>> Signed-off-by: Ismael Ripoll <iripoll@disca.upv.es>
>
> How am I to interpret Ismael's SOB here?
>
> Did he write the patch, did he create it, ...?
>
> Because this SOB chain is incorrect in this form. We have only one
> author per commit. If you want to accredit Ismael, you can say:
>
> Originally-by: Ismael
> Based-on-a-patch-by: Ismael
> Suggested-by: ...
>
> and so on.


Mentored-by: Ismael Ripoll <iripoll@disca.upv.es>


Thank you.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-27 14:44         ` Borislav Petkov
  2015-03-27 15:06           ` Hector Marco-Gisbert
@ 2015-03-28 13:10           ` Kees Cook
  2015-03-29  8:51           ` Ingo Molnar
  2 siblings, 0 replies; 15+ messages in thread
From: Kees Cook @ 2015-03-28 13:10 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: Hector Marco-Gisbert, LKML, Andrew Morton, Thomas Gleixner,
	Ingo Molnar, H. Peter Anvin, x86, Alexander Viro, Jan-Simon,
	linux-fsdevel, Ismael Ripoll

On Fri, Mar 27, 2015 at 7:44 AM, Borislav Petkov <bp@alien8.de> wrote:
> On Fri, Mar 27, 2015 at 12:38:21PM +0100, Hector Marco-Gisbert wrote:
>> A bug in Linux ASLR implementation which affects some AMD processors has been
>> found. The issue affects to all Linux process even if they are not using
>> shared libraries (statically compiled).
>
> ...
>
>> Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
>> Signed-off-by: Ismael Ripoll <iripoll@disca.upv.es>
>
> How am I to interpret Ismael's SOB here?
>
> Did he write the patch, did he create it, ...?
>
> Because this SOB chain is incorrect in this form. We have only one
> author per commit. If you want to accredit Ismael, you can say:
>
> Originally-by: Ismael
> Based-on-a-patch-by: Ismael
> Suggested-by: ...
>
> and so on.
>
> Alternatively, you can describe in free text his involvement and thus
> have the proper attribution.
>
> Pending this, I have this final version queued:
>
> ---
> From: Hector Marco-Gisbert <hecmargi@upv.es>
> Date: Fri, 27 Mar 2015 12:38:21 +0100
> Subject: [PATCH] x86/mm: Improve AMD Bulldozer ASLR fix
>
> The ASLR implementation needs to special-case AMD F15h processors by
> clearing out bits [14:12] of the virtual address in order to avoid I$
> cross invalidations and thus performance penalty for certain workloads.
> For details, see:
>
>   dfb09f9b7ab0 ("x86, amd: Avoid cache aliasing penalties on AMD family 15h")
>
> This special case reduces the mmapped files entropy by eight.
>
> The following output is the run on an AMD Opteron 62xx class CPU
> processor under x86_64 Linux 4.0.0:
>
>   $ for i in `seq 1 10`; do cat /proc/self/maps | grep "r-xp.*libc" ; done
>   b7588000-b7736000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
>   b7570000-b771e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
>   b75d0000-b777e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
>   b75b0000-b775e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
>   b7578000-b7726000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
>   ...
>
> Bits [12:14] are always 0, i.e. the address always ends in 0x8000 or
> 0x0000.
>
> 32-bit systems, as in the example above, are especially sensitive
> to this issue because 32-bit randomness for VA space is 8 bits (see
> mmap_rnd()). With the Bulldozer special case, this diminishes to only 32
> different slots of mmap virtual addresses.
>
> This patch randomizes per boot the three affected bits rather than
> setting them to zero. Since all the shared pages have the same value
> at bits [12..14], there is no cache aliasing problems. This value gets
> generated during system boot and it is thus not known to a potential
> remote attacker. Therefore, the impact from the Bulldozer workaround
> gets diminished and ASLR randomness increased.
>
> More details at:
>
>   http://hmarco.org/bugs/AMD-Bulldozer-linux-ASLR-weakness-reducing-mmaped-files-by-eight.html
>
> Original white paper by AMD dealing with the issue:
>
>   http://developer.amd.com/wordpress/media/2012/10/SharedL1InstructionCacheonAMD15hCPU.pdf
>
> Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Cc: x86-ml <x86@kernel.org>
> Cc: Alexander Viro <viro@zeniv.linux.org.uk>
> Cc: Jan-Simon <dl9pf@gmx.de>
> Cc: linux-fsdevel@vger.kernel.org
> Cc: Kees Cook <keescook@chromium.org>
> Link: http://lkml.kernel.org/r/1427456301-3764-1-git-send-email-hecmargi@upv.es
> Signed-off-by: Ismael Ripoll <iripoll@disca.upv.es>
> Signed-off-by: Borislav Petkov <bp@suse.de>

Acked-by: Kees Cook <keescook@chromium.org>

-Kees

> ---
>  arch/x86/include/asm/elf.h   |  1 +
>  arch/x86/kernel/cpu/amd.c    |  4 ++++
>  arch/x86/kernel/sys_x86_64.c | 30 +++++++++++++++++++++++++++---
>  3 files changed, 32 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
> index ca3347a9dab5..bd292ce9be0a 100644
> --- a/arch/x86/include/asm/elf.h
> +++ b/arch/x86/include/asm/elf.h
> @@ -365,6 +365,7 @@ enum align_flags {
>  struct va_alignment {
>         int flags;
>         unsigned long mask;
> +       unsigned long bits;
>  } ____cacheline_aligned;
>
>  extern struct va_alignment va_align;
> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
> index a220239cea65..ec6a61b21b41 100644
> --- a/arch/x86/kernel/cpu/amd.c
> +++ b/arch/x86/kernel/cpu/amd.c
> @@ -5,6 +5,7 @@
>
>  #include <linux/io.h>
>  #include <linux/sched.h>
> +#include <linux/random.h>
>  #include <asm/processor.h>
>  #include <asm/apic.h>
>  #include <asm/cpu.h>
> @@ -488,6 +489,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
>
>                 va_align.mask     = (upperbit - 1) & PAGE_MASK;
>                 va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
> +
> +               /* A random value per boot for bit slice [12:upper_bit) */
> +               va_align.bits = get_random_int() & va_align.mask;
>         }
>  }
>
> diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
> index 30277e27431a..10e0272d789a 100644
> --- a/arch/x86/kernel/sys_x86_64.c
> +++ b/arch/x86/kernel/sys_x86_64.c
> @@ -34,10 +34,26 @@ static unsigned long get_align_mask(void)
>         return va_align.mask;
>  }
>
> +/*
> + * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
> + * va_align.bits, [12:upper_bit), are set to a random value instead of
> + * zeroing them. This random value is computed once per boot. This form
> + * of ASLR is known as "per-boot ASLR".
> + *
> + * To achieve this, the random value is added to the info.align_offset
> + * value before calling vm_unmapped_area() or ORed directly to the
> + * address.
> + */
> +static unsigned long get_align_bits(void)
> +{
> +       return va_align.bits & get_align_mask();
> +}
> +
>  unsigned long align_vdso_addr(unsigned long addr)
>  {
>         unsigned long align_mask = get_align_mask();
> -       return (addr + align_mask) & ~align_mask;
> +       addr = (addr + align_mask) & ~align_mask;
> +       return addr | get_align_bits();
>  }
>
>  static int __init control_va_addr_alignment(char *str)
> @@ -135,8 +151,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
>         info.length = len;
>         info.low_limit = begin;
>         info.high_limit = end;
> -       info.align_mask = filp ? get_align_mask() : 0;
> +       info.align_mask = 0;
>         info.align_offset = pgoff << PAGE_SHIFT;
> +       if (filp) {
> +               info.align_mask = get_align_mask();
> +               info.align_offset += get_align_bits();
> +       }
>         return vm_unmapped_area(&info);
>  }
>
> @@ -174,8 +194,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
>         info.length = len;
>         info.low_limit = PAGE_SIZE;
>         info.high_limit = mm->mmap_base;
> -       info.align_mask = filp ? get_align_mask() : 0;
> +       info.align_mask = 0;
>         info.align_offset = pgoff << PAGE_SHIFT;
> +       if (filp) {
> +               info.align_mask = get_align_mask();
> +               info.align_offset += get_align_bits();
> +       }
>         addr = vm_unmapped_area(&info);
>         if (!(addr & ~PAGE_MASK))
>                 return addr;
> --
> 2.3.3
>
> --
> Regards/Gruss,
>     Boris.
>
> ECO tip #101: Trim your mails when you reply.
> --



-- 
Kees Cook
Chrome OS Security

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-27 14:44         ` Borislav Petkov
  2015-03-27 15:06           ` Hector Marco-Gisbert
  2015-03-28 13:10           ` Kees Cook
@ 2015-03-29  8:51           ` Ingo Molnar
  2015-03-29  9:53             ` Borislav Petkov
  2 siblings, 1 reply; 15+ messages in thread
From: Ingo Molnar @ 2015-03-29  8:51 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: Hector Marco-Gisbert, linux-kernel, akpm, Thomas Gleixner,
	Ingo Molnar, H. Peter Anvin, x86, Alexander Viro, Jan-Simon,
	linux-fsdevel, kees Cook, Ismael Ripoll


* Borislav Petkov <bp@alien8.de> wrote:

> From: Hector Marco-Gisbert <hecmargi@upv.es>
> Date: Fri, 27 Mar 2015 12:38:21 +0100
> Subject: [PATCH] x86/mm: Improve AMD Bulldozer ASLR fix
> 
> The ASLR implementation needs to special-case AMD F15h processors by
> clearing out bits [14:12] of the virtual address in order to avoid I$
> cross invalidations and thus performance penalty for certain workloads.
> For details, see:
> 
>   dfb09f9b7ab0 ("x86, amd: Avoid cache aliasing penalties on AMD family 15h")
> 
> This special case reduces the mmapped files entropy by eight.

s/reduces the mmapped file's entropy by 3 bits

Which does:

 - a grammar fix

 - measure it in bits, as later on we are talking about randomness in 
   bits as well.

Btw., does this limitation affect both executable and non-executable 
mmap()s? Because data mmap()s don't need this I$ related workaround, 
right? So we could relax it for data-mmap()s?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-29  8:51           ` Ingo Molnar
@ 2015-03-29  9:53             ` Borislav Petkov
  2015-03-31  7:59               ` Ingo Molnar
  0 siblings, 1 reply; 15+ messages in thread
From: Borislav Petkov @ 2015-03-29  9:53 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Hector Marco-Gisbert, linux-kernel, akpm, Thomas Gleixner,
	Ingo Molnar, H. Peter Anvin, x86, Alexander Viro, Jan-Simon,
	linux-fsdevel, kees Cook, Ismael Ripoll

On Sun, Mar 29, 2015 at 10:51:22AM +0200, Ingo Molnar wrote:
> s/reduces the mmapped file's entropy by 3 bits
> 
> Which does:
> 
>  - a grammar fix
> 
>  - measure it in bits, as later on we are talking about randomness in 
>    bits as well.

Fixed.

> Btw., does this limitation affect both executable and non-executable
> mmap()s?

Only executable mappings.

> Because data mmap()s don't need this I$ related workaround, right? So
> we could relax it for data-mmap()s?

Well, AFAIR, we wanted to keep this as less intrusive as possble. If
we're going to relax this, one way to solve it would be to pass down
@prot from do_mmap_pgoff() and friends to get_unmapped_area() which
would need to touch other arches.

I'm not sure it is worth it...

-- 
Regards/Gruss,
    Boris.

ECO tip #101: Trim your mails when you reply.
--

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] mm/x86: AMD Bulldozer ASLR fix
  2015-03-29  9:53             ` Borislav Petkov
@ 2015-03-31  7:59               ` Ingo Molnar
  0 siblings, 0 replies; 15+ messages in thread
From: Ingo Molnar @ 2015-03-31  7:59 UTC (permalink / raw)
  To: Borislav Petkov
  Cc: Hector Marco-Gisbert, linux-kernel, akpm, Thomas Gleixner,
	Ingo Molnar, H. Peter Anvin, x86, Alexander Viro, Jan-Simon,
	linux-fsdevel, kees Cook, Ismael Ripoll


* Borislav Petkov <bp@alien8.de> wrote:

> On Sun, Mar 29, 2015 at 10:51:22AM +0200, Ingo Molnar wrote:
> > s/reduces the mmapped file's entropy by 3 bits
> > 
> > Which does:
> > 
> >  - a grammar fix
> > 
> >  - measure it in bits, as later on we are talking about randomness in 
> >    bits as well.
> 
> Fixed.
> 
> > Btw., does this limitation affect both executable and non-executable
> > mmap()s?
> 
> Only executable mappings.
> 
> > Because data mmap()s don't need this I$ related workaround, right? So
> > we could relax it for data-mmap()s?
> 
> Well, AFAIR, we wanted to keep this as less intrusive as possble. If
> we're going to relax this, one way to solve it would be to pass down
> @prot from do_mmap_pgoff() and friends to get_unmapped_area() which
> would need to touch other arches.
> 
> I'm not sure it is worth it...

Well, we could define arch_get_unmapped_area_exec(), and if an arch 
defines it, the mm code would call into it.

But yeah, it would add a function pointer to mm_struct, which I'm not 
sure is worth it.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [tip:x86/mm] x86/mm: Improve AMD Bulldozer ASLR workaround
  2015-03-27 11:38       ` Hector Marco-Gisbert
  2015-03-27 12:14         ` Ingo Molnar
  2015-03-27 14:44         ` Borislav Petkov
@ 2015-03-31 12:37         ` tip-bot for Hector Marco-Gisbert
  2 siblings, 0 replies; 15+ messages in thread
From: tip-bot for Hector Marco-Gisbert @ 2015-03-31 12:37 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: hecmargi, viro, akpm, tglx, dl9pf, hpa, keescook, bp, iripoll,
	mingo, linux-kernel

Commit-ID:  4e26d11f52684dc8b1632a8cfe450cb5197a8464
Gitweb:     http://git.kernel.org/tip/4e26d11f52684dc8b1632a8cfe450cb5197a8464
Author:     Hector Marco-Gisbert <hecmargi@upv.es>
AuthorDate: Fri, 27 Mar 2015 12:38:21 +0100
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 31 Mar 2015 10:01:17 +0200

x86/mm: Improve AMD Bulldozer ASLR workaround

The ASLR implementation needs to special-case AMD F15h processors by
clearing out bits [14:12] of the virtual address in order to avoid I$
cross invalidations and thus performance penalty for certain workloads.
For details, see:

  dfb09f9b7ab0 ("x86, amd: Avoid cache aliasing penalties on AMD family 15h")

This special case reduces the mmapped file's entropy by 3 bits.

The following output is the run on an AMD Opteron 62xx class CPU
processor under x86_64 Linux 4.0.0:

  $ for i in `seq 1 10`; do cat /proc/self/maps | grep "r-xp.*libc" ; done
  b7588000-b7736000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  b7570000-b771e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  b75d0000-b777e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  b75b0000-b775e000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  b7578000-b7726000 r-xp 00000000 00:01 4924       /lib/i386-linux-gnu/libc.so.6
  ...

Bits [12:14] are always 0, i.e. the address always ends in 0x8000 or
0x0000.

32-bit systems, as in the example above, are especially sensitive
to this issue because 32-bit randomness for VA space is 8 bits (see
mmap_rnd()). With the Bulldozer special case, this diminishes to only 32
different slots of mmap virtual addresses.

This patch randomizes per boot the three affected bits rather than
setting them to zero. Since all the shared pages have the same value
at bits [12..14], there is no cache aliasing problems. This value gets
generated during system boot and it is thus not known to a potential
remote attacker. Therefore, the impact from the Bulldozer workaround
gets diminished and ASLR randomness increased.

More details at:

  http://hmarco.org/bugs/AMD-Bulldozer-linux-ASLR-weakness-reducing-mmaped-files-by-eight.html

Original white paper by AMD dealing with the issue:

  http://developer.amd.com/wordpress/media/2012/10/SharedL1InstructionCacheonAMD15hCPU.pdf

Mentored-by: Ismael Ripoll <iripoll@disca.upv.es>
Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Kees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan-Simon <dl9pf@gmx.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-fsdevel@vger.kernel.org
Link: http://lkml.kernel.org/r/1427456301-3764-1-git-send-email-hecmargi@upv.es
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/x86/include/asm/elf.h   |  1 +
 arch/x86/kernel/cpu/amd.c    |  4 ++++
 arch/x86/kernel/sys_x86_64.c | 30 +++++++++++++++++++++++++++---
 3 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index ca3347a..bd292ce 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -365,6 +365,7 @@ enum align_flags {
 struct va_alignment {
 	int flags;
 	unsigned long mask;
+	unsigned long bits;
 } ____cacheline_aligned;
 
 extern struct va_alignment va_align;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index a220239..ec6a61b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -5,6 +5,7 @@
 
 #include <linux/io.h>
 #include <linux/sched.h>
+#include <linux/random.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
@@ -488,6 +489,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
+
+		/* A random value per boot for bit slice [12:upper_bit) */
+		va_align.bits = get_random_int() & va_align.mask;
 	}
 }
 
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 30277e2..10e0272 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -34,10 +34,26 @@ static unsigned long get_align_mask(void)
 	return va_align.mask;
 }
 
+/*
+ * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
+ * va_align.bits, [12:upper_bit), are set to a random value instead of
+ * zeroing them. This random value is computed once per boot. This form
+ * of ASLR is known as "per-boot ASLR".
+ *
+ * To achieve this, the random value is added to the info.align_offset
+ * value before calling vm_unmapped_area() or ORed directly to the
+ * address.
+ */
+static unsigned long get_align_bits(void)
+{
+	return va_align.bits & get_align_mask();
+}
+
 unsigned long align_vdso_addr(unsigned long addr)
 {
 	unsigned long align_mask = get_align_mask();
-	return (addr + align_mask) & ~align_mask;
+	addr = (addr + align_mask) & ~align_mask;
+	return addr | get_align_bits();
 }
 
 static int __init control_va_addr_alignment(char *str)
@@ -135,8 +151,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 	info.length = len;
 	info.low_limit = begin;
 	info.high_limit = end;
-	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	if (filp) {
+		info.align_mask = get_align_mask();
+		info.align_offset += get_align_bits();
+	}
 	return vm_unmapped_area(&info);
 }
 
@@ -174,8 +194,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 	info.length = len;
 	info.low_limit = PAGE_SIZE;
 	info.high_limit = mm->mmap_base;
-	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	if (filp) {
+		info.align_mask = get_align_mask();
+		info.align_offset += get_align_bits();
+	}
 	addr = vm_unmapped_area(&info);
 	if (!(addr & ~PAGE_MASK))
 		return addr;

^ permalink raw reply related	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2015-03-31 12:38 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-24 18:00 [PATCH] mm/x86: AMD Bulldozer ASLR fix Hector Marco-Gisbert
2015-03-24 19:15 ` Borislav Petkov
2015-03-25 18:29   ` Hector Marco
2015-03-25 18:36   ` Hector Marco-Gisbert
2015-03-26 19:08     ` Borislav Petkov
2015-03-27 11:38       ` Hector Marco-Gisbert
2015-03-27 12:14         ` Ingo Molnar
2015-03-27 12:35           ` Borislav Petkov
2015-03-27 14:44         ` Borislav Petkov
2015-03-27 15:06           ` Hector Marco-Gisbert
2015-03-28 13:10           ` Kees Cook
2015-03-29  8:51           ` Ingo Molnar
2015-03-29  9:53             ` Borislav Petkov
2015-03-31  7:59               ` Ingo Molnar
2015-03-31 12:37         ` [tip:x86/mm] x86/mm: Improve AMD Bulldozer ASLR workaround tip-bot for Hector Marco-Gisbert

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.