All of lore.kernel.org
 help / color / mirror / Atom feed
* [Patch] Enable SMEP CPU feature support for XEN itself
@ 2011-06-01 13:20 Yang, Wei Y
  2011-06-01 14:34 ` Keir Fraser
                   ` (2 more replies)
  0 siblings, 3 replies; 20+ messages in thread
From: Yang, Wei Y @ 2011-06-01 13:20 UTC (permalink / raw)
  To: xen-devel


Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP prevents
kernel from executing code in user. Updated Intel SDM describes this CPU feature.
The document will be published soon.

This patch enables SMEP in Xen to protect Xen hypervisor from executing pv guest
code, and kills a pv guest triggering SMEP fault.

 Signed-off-by: Yang, Wei <wei.y.yang@intel.com>
 Signed-off-by: Shan, Haitao <haitao.shan@intel.com>
 Signed-off-by: Li, Xin <xin.li@intel.com>

---
 arch/x86/cpu/common.c        |   16 ++++++++++++++++
 arch/x86/traps.c             |   43 +++++++++++++++++++++++++++++++++++++++++--
 include/asm-x86/cpufeature.h |    5 ++++-
 include/asm-x86/processor.h  |    1 +
 4 files changed, 62 insertions(+), 3 deletions(-)

diff -r d4f6310f1ef5 xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c	Wed Jun 01 11:11:43 2011 +0100
+++ b/xen/arch/x86/cpu/common.c	Wed Jun 01 19:53:52 2011 +0800
@@ -28,6 +28,9 @@
 integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
 unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
 integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
+/* nosmep: if true, Intel SMEP is disabled. */
+static bool_t __initdata disable_smep;
+boolean_param("nosmep", disable_smep);
 
 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
 
@@ -222,6 +225,17 @@
 	c->x86_capability[4] = cap4;
 }
 
+static void __cpuinit setup_smep(struct cpuinfo_x86 *c)
+{
+	if ( cpu_has(c, X86_FEATURE_SMEP) ) {
+		if( unlikely(disable_smep) ) {
+			setup_clear_cpu_cap(X86_FEATURE_SMEP);
+			clear_in_cr4(X86_CR4_SMEP);
+		} else
+			set_in_cr4(X86_CR4_SMEP);
+	}
+}
+
 void __cpuinit generic_identify(struct cpuinfo_x86 * c)
 {
 	u32 tfms, xlvl, capability, excap, ebx;
@@ -268,6 +282,8 @@
 		c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx;
 	}
 
+	setup_smep(c);
+
 	early_intel_workaround(c);
 
 #ifdef CONFIG_X86_HT
diff -r d4f6310f1ef5 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c	Wed Jun 01 11:11:43 2011 +0100
+++ b/xen/arch/x86/traps.c	Wed Jun 01 19:53:52 2011 +0800
@@ -1195,8 +1195,16 @@
     if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) ||
          (l3e_get_flags(l3e) & disallowed_flags) )
         return 0;
-    if ( l3e_get_flags(l3e) & _PAGE_PSE )
+    if ( l3e_get_flags(l3e) & _PAGE_PSE ) {
+        /* SMEP fault error code 10001b */
+        if ( (error_code & PFEC_insn_fetch) &&
+             !(error_code & PFEC_user_mode) &&
+             cpu_has_smep &&
+             (_PAGE_USER & l4e_get_flags(l4e) & l3e_get_flags(l3e)) )
+            return 2;
+
         return 1;
+    }
 #endif
 #endif
 
@@ -1207,8 +1215,21 @@
     if ( ((l2e_get_flags(l2e) & required_flags) != required_flags) ||
          (l2e_get_flags(l2e) & disallowed_flags) )
         return 0;
-    if ( l2e_get_flags(l2e) & _PAGE_PSE )
+    if ( l2e_get_flags(l2e) & _PAGE_PSE ) {
+        /* SMEP fault error code 10001b */
+        if ( (error_code & PFEC_insn_fetch) &&
+             !(error_code & PFEC_user_mode) &&
+             cpu_has_smep &&
+             (_PAGE_USER &
+#if CONFIG_PAGING_LEVELS >= 4
+              l4e_get_flags(l4e) &
+              l3e_get_flags(l3e) &
+#endif
+              l2e_get_flags(l2e)) )
+            return 2;
+
         return 1;
+    }
 
     l1t = map_domain_page(mfn);
     l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
@@ -1218,6 +1239,18 @@
          (l1e_get_flags(l1e) & disallowed_flags) )
         return 0;
 
+    /* SMEP fault error code 10001b */
+    if ( (error_code & PFEC_insn_fetch) &&
+         !(error_code & PFEC_user_mode) &&
+         cpu_has_smep &&
+         (_PAGE_USER &
+#if CONFIG_PAGING_LEVELS >= 4
+          l4e_get_flags(l4e) &
+          l3e_get_flags(l3e) &
+#endif
+          l2e_get_flags(l2e) & l1e_get_flags(l1e)) )
+        return 2;
+
     return 1;
 }
 
@@ -1235,6 +1268,12 @@
     is_spurious = __spurious_page_fault(addr, error_code);
     local_irq_restore(flags);
 
+    if ( is_spurious == 2 ) {
+        printk("SMEP fault at address %lx, crashing current domain %d\n",
+               addr, current->domain->domain_id);
+        domain_crash_synchronous();
+    }
+
     return is_spurious;
 }
 
diff -r d4f6310f1ef5 xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h	Wed Jun 01 11:11:43 2011 +0100
+++ b/xen/include/asm-x86/cpufeature.h	Wed Jun 01 19:53:52 2011 +0800
@@ -141,8 +141,9 @@
 #define X86_FEATURE_TBM         (6*32+21) /* trailing bit manipulations */
 #define X86_FEATURE_TOPOEXT     (6*32+22) /* topology extensions CPUID leafs */
 
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
 #define X86_FEATURE_FSGSBASE	(7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
+#define X86_FEATURE_SMEP	(7*32+ 7) /* Supervisor Mode Execution Protection */
 
 #define cpu_has(c, bit)		test_bit(bit, (c)->x86_capability)
 #define boot_cpu_has(bit)	test_bit(bit, boot_cpu_data.x86_capability)
@@ -201,6 +202,8 @@
 #define cpu_has_fsgsbase	boot_cpu_has(X86_FEATURE_FSGSBASE)
 #endif
 
+#define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
+
 #define cpu_has_ffxsr           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) \
                                  && boot_cpu_has(X86_FEATURE_FFXSR))
 
diff -r d4f6310f1ef5 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h	Wed Jun 01 11:11:43 2011 +0100
+++ b/xen/include/asm-x86/processor.h	Wed Jun 01 19:53:52 2011 +0800
@@ -85,6 +85,7 @@
 #define X86_CR4_SMXE		0x4000  /* enable SMX */
 #define X86_CR4_FSGSBASE	0x10000 /* enable {rd,wr}{fs,gs}base */
 #define X86_CR4_OSXSAVE	0x40000 /* enable XSAVE/XRSTOR */
+#define X86_CR4_SMEP		0x100000/* enable SMEP */
 
 /*
  * Trap/fault mnemonics.
 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 13:20 [Patch] Enable SMEP CPU feature support for XEN itself Yang, Wei Y
@ 2011-06-01 14:34 ` Keir Fraser
  2011-06-01 14:50   ` Li, Xin
  2011-06-01 15:17 ` Jan Beulich
  2011-06-01 15:26 ` Keir Fraser
  2 siblings, 1 reply; 20+ messages in thread
From: Keir Fraser @ 2011-06-01 14:34 UTC (permalink / raw)
  To: Yang, Wei Y, xen-devel; +Cc: Li, Xin

On 01/06/2011 14:20, "Yang, Wei Y" <wei.y.yang@intel.com> wrote:

> 
> Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP
> prevents
> kernel from executing code in user. Updated Intel SDM describes this CPU
> feature.
> The document will be published soon.

Then I can hardly be expected to review this patch. I have no idea what it
does!

 -- Keir

> This patch enables SMEP in Xen to protect Xen hypervisor from executing pv
> guest
> code, and kills a pv guest triggering SMEP fault.
> 
>  Signed-off-by: Yang, Wei <wei.y.yang@intel.com>
>  Signed-off-by: Shan, Haitao <haitao.shan@intel.com>
>  Signed-off-by: Li, Xin <xin.li@intel.com>
> 
> ---
>  arch/x86/cpu/common.c        |   16 ++++++++++++++++
>  arch/x86/traps.c             |   43
> +++++++++++++++++++++++++++++++++++++++++--
>  include/asm-x86/cpufeature.h |    5 ++++-
>  include/asm-x86/processor.h  |    1 +
>  4 files changed, 62 insertions(+), 3 deletions(-)
> 
> diff -r d4f6310f1ef5 xen/arch/x86/cpu/common.c
> --- a/xen/arch/x86/cpu/common.c Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/arch/x86/cpu/common.c Wed Jun 01 19:53:52 2011 +0800
> @@ -28,6 +28,9 @@
>  integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
>  unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
>  integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
> +/* nosmep: if true, Intel SMEP is disabled. */
> +static bool_t __initdata disable_smep;
> +boolean_param("nosmep", disable_smep);
>  
>  struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
>  
> @@ -222,6 +225,17 @@
> c->x86_capability[4] = cap4;
>  }
>  
> +static void __cpuinit setup_smep(struct cpuinfo_x86 *c)
> +{
> + if ( cpu_has(c, X86_FEATURE_SMEP) ) {
> +  if( unlikely(disable_smep) ) {
> +   setup_clear_cpu_cap(X86_FEATURE_SMEP);
> +   clear_in_cr4(X86_CR4_SMEP);
> +  } else
> +   set_in_cr4(X86_CR4_SMEP);
> + }
> +}
> +
>  void __cpuinit generic_identify(struct cpuinfo_x86 * c)
>  {
> u32 tfms, xlvl, capability, excap, ebx;
> @@ -268,6 +282,8 @@
> c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx;
> }
>  
> + setup_smep(c);
> +
> early_intel_workaround(c);
>  
>  #ifdef CONFIG_X86_HT
> diff -r d4f6310f1ef5 xen/arch/x86/traps.c
> --- a/xen/arch/x86/traps.c Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/arch/x86/traps.c Wed Jun 01 19:53:52 2011 +0800
> @@ -1195,8 +1195,16 @@
>      if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) ||
>           (l3e_get_flags(l3e) & disallowed_flags) )
>          return 0;
> -    if ( l3e_get_flags(l3e) & _PAGE_PSE )
> +    if ( l3e_get_flags(l3e) & _PAGE_PSE ) {
> +        /* SMEP fault error code 10001b */
> +        if ( (error_code & PFEC_insn_fetch) &&
> +             !(error_code & PFEC_user_mode) &&
> +             cpu_has_smep &&
> +             (_PAGE_USER & l4e_get_flags(l4e) & l3e_get_flags(l3e)) )
> +            return 2;
> +
>          return 1;
> +    }
>  #endif
>  #endif
>  
> @@ -1207,8 +1215,21 @@
>      if ( ((l2e_get_flags(l2e) & required_flags) != required_flags) ||
>           (l2e_get_flags(l2e) & disallowed_flags) )
>          return 0;
> -    if ( l2e_get_flags(l2e) & _PAGE_PSE )
> +    if ( l2e_get_flags(l2e) & _PAGE_PSE ) {
> +        /* SMEP fault error code 10001b */
> +        if ( (error_code & PFEC_insn_fetch) &&
> +             !(error_code & PFEC_user_mode) &&
> +             cpu_has_smep &&
> +             (_PAGE_USER &
> +#if CONFIG_PAGING_LEVELS >= 4
> +              l4e_get_flags(l4e) &
> +              l3e_get_flags(l3e) &
> +#endif
> +              l2e_get_flags(l2e)) )
> +            return 2;
> +
>          return 1;
> +    }
>  
>      l1t = map_domain_page(mfn);
>      l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
> @@ -1218,6 +1239,18 @@
>           (l1e_get_flags(l1e) & disallowed_flags) )
>          return 0;
>  
> +    /* SMEP fault error code 10001b */
> +    if ( (error_code & PFEC_insn_fetch) &&
> +         !(error_code & PFEC_user_mode) &&
> +         cpu_has_smep &&
> +         (_PAGE_USER &
> +#if CONFIG_PAGING_LEVELS >= 4
> +          l4e_get_flags(l4e) &
> +          l3e_get_flags(l3e) &
> +#endif
> +          l2e_get_flags(l2e) & l1e_get_flags(l1e)) )
> +        return 2;
> +
>      return 1;
>  }
>  
> @@ -1235,6 +1268,12 @@
>      is_spurious = __spurious_page_fault(addr, error_code);
>      local_irq_restore(flags);
>  
> +    if ( is_spurious == 2 ) {
> +        printk("SMEP fault at address %lx, crashing current domain %d\n",
> +               addr, current->domain->domain_id);
> +        domain_crash_synchronous();
> +    }
> +
>      return is_spurious;
>  }
>  
> diff -r d4f6310f1ef5 xen/include/asm-x86/cpufeature.h
> --- a/xen/include/asm-x86/cpufeature.h Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/include/asm-x86/cpufeature.h Wed Jun 01 19:53:52 2011 +0800
> @@ -141,8 +141,9 @@
>  #define X86_FEATURE_TBM         (6*32+21) /* trailing bit manipulations */
>  #define X86_FEATURE_TOPOEXT     (6*32+22) /* topology extensions CPUID leafs
> */
>  
> -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
> +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
>  #define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
> +#define X86_FEATURE_SMEP (7*32+ 7) /* Supervisor Mode Execution Protection */
>  
>  #define cpu_has(c, bit)  test_bit(bit, (c)->x86_capability)
>  #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
> @@ -201,6 +202,8 @@
>  #define cpu_has_fsgsbase boot_cpu_has(X86_FEATURE_FSGSBASE)
>  #endif
>  
> +#define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
> +
>  #define cpu_has_ffxsr           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
> \
>                                   && boot_cpu_has(X86_FEATURE_FFXSR))
>  
> diff -r d4f6310f1ef5 xen/include/asm-x86/processor.h
> --- a/xen/include/asm-x86/processor.h Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/include/asm-x86/processor.h Wed Jun 01 19:53:52 2011 +0800
> @@ -85,6 +85,7 @@
>  #define X86_CR4_SMXE  0x4000  /* enable SMX */
>  #define X86_CR4_FSGSBASE 0x10000 /* enable {rd,wr}{fs,gs}base */
>  #define X86_CR4_OSXSAVE 0x40000 /* enable XSAVE/XRSTOR */
> +#define X86_CR4_SMEP  0x100000/* enable SMEP */
>  
>  /*
>   * Trap/fault mnemonics.
>  
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 14:34 ` Keir Fraser
@ 2011-06-01 14:50   ` Li, Xin
  0 siblings, 0 replies; 20+ messages in thread
From: Li, Xin @ 2011-06-01 14:50 UTC (permalink / raw)
  To: Keir Fraser, Yang, Wei Y, xen-devel

> > Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP
> > prevents
> > kernel from executing code in user. Updated Intel SDM describes this CPU
> > feature.
> > The document will be published soon.
> 
> Then I can hardly be expected to review this patch. I have no idea what it
> does!

Let me explain it a bit:  when CPU running in kernel mode tries to fetch and execute
code whose user bit in all level page table entries are set, it will trigger an instruction
fetch page fault.  one of its usage is to prevent application to use kernel null pointer
to get root privilege.
Thanks!
-Xin

> 
>  -- Keir
> 
> > This patch enables SMEP in Xen to protect Xen hypervisor from executing pv
> > guest
> > code, and kills a pv guest triggering SMEP fault.
> >
> >  Signed-off-by: Yang, Wei <wei.y.yang@intel.com>
> >  Signed-off-by: Shan, Haitao <haitao.shan@intel.com>
> >  Signed-off-by: Li, Xin <xin.li@intel.com>
> >
> > ---
> >  arch/x86/cpu/common.c        |   16 ++++++++++++++++
> >  arch/x86/traps.c             |   43
> > +++++++++++++++++++++++++++++++++++++++++--
> >  include/asm-x86/cpufeature.h |    5 ++++-
> >  include/asm-x86/processor.h  |    1 +
> >  4 files changed, 62 insertions(+), 3 deletions(-)
> >
> > diff -r d4f6310f1ef5 xen/arch/x86/cpu/common.c
> > --- a/xen/arch/x86/cpu/common.c Wed Jun 01 11:11:43 2011 +0100
> > +++ b/xen/arch/x86/cpu/common.c Wed Jun 01 19:53:52 2011 +0800
> > @@ -28,6 +28,9 @@
> >  integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
> >  unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
> >  integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
> > +/* nosmep: if true, Intel SMEP is disabled. */
> > +static bool_t __initdata disable_smep;
> > +boolean_param("nosmep", disable_smep);
> >
> >  struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
> >
> > @@ -222,6 +225,17 @@
> > c->x86_capability[4] = cap4;
> >  }
> >
> > +static void __cpuinit setup_smep(struct cpuinfo_x86 *c)
> > +{
> > + if ( cpu_has(c, X86_FEATURE_SMEP) ) {
> > +  if( unlikely(disable_smep) ) {
> > +   setup_clear_cpu_cap(X86_FEATURE_SMEP);
> > +   clear_in_cr4(X86_CR4_SMEP);
> > +  } else
> > +   set_in_cr4(X86_CR4_SMEP);
> > + }
> > +}
> > +
> >  void __cpuinit generic_identify(struct cpuinfo_x86 * c)
> >  {
> > u32 tfms, xlvl, capability, excap, ebx;
> > @@ -268,6 +282,8 @@
> > c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx;
> > }
> >
> > + setup_smep(c);
> > +
> > early_intel_workaround(c);
> >
> >  #ifdef CONFIG_X86_HT
> > diff -r d4f6310f1ef5 xen/arch/x86/traps.c
> > --- a/xen/arch/x86/traps.c Wed Jun 01 11:11:43 2011 +0100
> > +++ b/xen/arch/x86/traps.c Wed Jun 01 19:53:52 2011 +0800
> > @@ -1195,8 +1195,16 @@
> >      if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) ||
> >           (l3e_get_flags(l3e) & disallowed_flags) )
> >          return 0;
> > -    if ( l3e_get_flags(l3e) & _PAGE_PSE )
> > +    if ( l3e_get_flags(l3e) & _PAGE_PSE ) {
> > +        /* SMEP fault error code 10001b */
> > +        if ( (error_code & PFEC_insn_fetch) &&
> > +             !(error_code & PFEC_user_mode) &&
> > +             cpu_has_smep &&
> > +             (_PAGE_USER & l4e_get_flags(l4e) & l3e_get_flags(l3e)) )
> > +            return 2;
> > +
> >          return 1;
> > +    }
> >  #endif
> >  #endif
> >
> > @@ -1207,8 +1215,21 @@
> >      if ( ((l2e_get_flags(l2e) & required_flags) != required_flags) ||
> >           (l2e_get_flags(l2e) & disallowed_flags) )
> >          return 0;
> > -    if ( l2e_get_flags(l2e) & _PAGE_PSE )
> > +    if ( l2e_get_flags(l2e) & _PAGE_PSE ) {
> > +        /* SMEP fault error code 10001b */
> > +        if ( (error_code & PFEC_insn_fetch) &&
> > +             !(error_code & PFEC_user_mode) &&
> > +             cpu_has_smep &&
> > +             (_PAGE_USER &
> > +#if CONFIG_PAGING_LEVELS >= 4
> > +              l4e_get_flags(l4e) &
> > +              l3e_get_flags(l3e) &
> > +#endif
> > +              l2e_get_flags(l2e)) )
> > +            return 2;
> > +
> >          return 1;
> > +    }
> >
> >      l1t = map_domain_page(mfn);
> >      l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
> > @@ -1218,6 +1239,18 @@
> >           (l1e_get_flags(l1e) & disallowed_flags) )
> >          return 0;
> >
> > +    /* SMEP fault error code 10001b */
> > +    if ( (error_code & PFEC_insn_fetch) &&
> > +         !(error_code & PFEC_user_mode) &&
> > +         cpu_has_smep &&
> > +         (_PAGE_USER &
> > +#if CONFIG_PAGING_LEVELS >= 4
> > +          l4e_get_flags(l4e) &
> > +          l3e_get_flags(l3e) &
> > +#endif
> > +          l2e_get_flags(l2e) & l1e_get_flags(l1e)) )
> > +        return 2;
> > +
> >      return 1;
> >  }
> >
> > @@ -1235,6 +1268,12 @@
> >      is_spurious = __spurious_page_fault(addr, error_code);
> >      local_irq_restore(flags);
> >
> > +    if ( is_spurious == 2 ) {
> > +        printk("SMEP fault at address %lx, crashing current domain %d\n",
> > +               addr, current->domain->domain_id);
> > +        domain_crash_synchronous();
> > +    }
> > +
> >      return is_spurious;
> >  }
> >
> > diff -r d4f6310f1ef5 xen/include/asm-x86/cpufeature.h
> > --- a/xen/include/asm-x86/cpufeature.h Wed Jun 01 11:11:43 2011 +0100
> > +++ b/xen/include/asm-x86/cpufeature.h Wed Jun 01 19:53:52 2011 +0800
> > @@ -141,8 +141,9 @@
> >  #define X86_FEATURE_TBM         (6*32+21) /* trailing bit manipulations */
> >  #define X86_FEATURE_TOPOEXT     (6*32+22) /* topology extensions CPUID
> leafs
> > */
> >
> > -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
> > +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
> >  #define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions
> */
> > +#define X86_FEATURE_SMEP (7*32+ 7) /* Supervisor Mode Execution Protection
> */
> >
> >  #define cpu_has(c, bit)  test_bit(bit, (c)->x86_capability)
> >  #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
> > @@ -201,6 +202,8 @@
> >  #define cpu_has_fsgsbase boot_cpu_has(X86_FEATURE_FSGSBASE)
> >  #endif
> >
> > +#define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
> > +
> >  #define cpu_has_ffxsr           ((boot_cpu_data.x86_vendor ==
> X86_VENDOR_AMD)
> > \
> >                                   &&
> boot_cpu_has(X86_FEATURE_FFXSR))
> >
> > diff -r d4f6310f1ef5 xen/include/asm-x86/processor.h
> > --- a/xen/include/asm-x86/processor.h Wed Jun 01 11:11:43 2011 +0100
> > +++ b/xen/include/asm-x86/processor.h Wed Jun 01 19:53:52 2011 +0800
> > @@ -85,6 +85,7 @@
> >  #define X86_CR4_SMXE  0x4000  /* enable SMX */
> >  #define X86_CR4_FSGSBASE 0x10000 /* enable {rd,wr}{fs,gs}base */
> >  #define X86_CR4_OSXSAVE 0x40000 /* enable XSAVE/XRSTOR */
> > +#define X86_CR4_SMEP  0x100000/* enable SMEP */
> >
> >  /*
> >   * Trap/fault mnemonics.
> >
> >
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@lists.xensource.com
> > http://lists.xensource.com/xen-devel
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 13:20 [Patch] Enable SMEP CPU feature support for XEN itself Yang, Wei Y
  2011-06-01 14:34 ` Keir Fraser
@ 2011-06-01 15:17 ` Jan Beulich
  2011-06-01 15:23   ` Ian Campbell
                     ` (2 more replies)
  2011-06-01 15:26 ` Keir Fraser
  2 siblings, 3 replies; 20+ messages in thread
From: Jan Beulich @ 2011-06-01 15:17 UTC (permalink / raw)
  To: Wei Y Yang; +Cc: xen-devel

>>> On 01.06.11 at 15:20, "Yang, Wei Y" <wei.y.yang@intel.com> wrote:
> --- a/xen/arch/x86/cpu/common.c	Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/arch/x86/cpu/common.c	Wed Jun 01 19:53:52 2011 +0800
> @@ -28,6 +28,9 @@
>  integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
>  unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
>  integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
> +/* nosmep: if true, Intel SMEP is disabled. */
> +static bool_t __initdata disable_smep;

An __initdata variable used in ...

> +boolean_param("nosmep", disable_smep);
>  
>  struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
>  
> @@ -222,6 +225,17 @@
>  	c->x86_capability[4] = cap4;
>  }
>  
> +static void __cpuinit setup_smep(struct cpuinfo_x86 *c)
> +{
> +	if ( cpu_has(c, X86_FEATURE_SMEP) ) {
> +		if( unlikely(disable_smep) ) {

... a __cpuinit function?

> +			setup_clear_cpu_cap(X86_FEATURE_SMEP);
> +			clear_in_cr4(X86_CR4_SMEP);
> +		} else
> +			set_in_cr4(X86_CR4_SMEP);

Anyway, the whole thing is overkill - {set,clear}_in_cr4() write
the updated bits to mmu_cr4_features, and these get loaded
on secondary CPUs *before* you have any chance of looking
at the CPUID bits. As with everything else, it's assumed that
APs don't have less features than the BP, and hence you only
need to set_in_cr4() once (on the BP). And then the function
can be __init.

> +	}
> +}
> +
>  void __cpuinit generic_identify(struct cpuinfo_x86 * c)
>  {
>  	u32 tfms, xlvl, capability, excap, ebx;
> @@ -268,6 +282,8 @@

Would also be really helpful if you patch was generated with -p.

>  		c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx;
>  	}
>  
> +	setup_smep(c);
> +
>  	early_intel_workaround(c);
>  
>  #ifdef CONFIG_X86_HT
> diff -r d4f6310f1ef5 xen/arch/x86/traps.c
> --- a/xen/arch/x86/traps.c	Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/arch/x86/traps.c	Wed Jun 01 19:53:52 2011 +0800
> @@ -1195,8 +1195,16 @@
>      if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) ||
>           (l3e_get_flags(l3e) & disallowed_flags) )
>          return 0;
> -    if ( l3e_get_flags(l3e) & _PAGE_PSE )
> +    if ( l3e_get_flags(l3e) & _PAGE_PSE ) {
> +        /* SMEP fault error code 10001b */
> +        if ( (error_code & PFEC_insn_fetch) &&
> +             !(error_code & PFEC_user_mode) &&
> +             cpu_has_smep &&
> +             (_PAGE_USER & l4e_get_flags(l4e) & l3e_get_flags(l3e)) )
> +            return 2;
> +
>          return 1;
> +    }
>  #endif
>  #endif
>  
> @@ -1207,8 +1215,21 @@
>      if ( ((l2e_get_flags(l2e) & required_flags) != required_flags) ||
>           (l2e_get_flags(l2e) & disallowed_flags) )
>          return 0;
> -    if ( l2e_get_flags(l2e) & _PAGE_PSE )
> +    if ( l2e_get_flags(l2e) & _PAGE_PSE ) {
> +        /* SMEP fault error code 10001b */
> +        if ( (error_code & PFEC_insn_fetch) &&
> +             !(error_code & PFEC_user_mode) &&
> +             cpu_has_smep &&
> +             (_PAGE_USER &
> +#if CONFIG_PAGING_LEVELS >= 4
> +              l4e_get_flags(l4e) &
> +              l3e_get_flags(l3e) &
> +#endif
> +              l2e_get_flags(l2e)) )
> +            return 2;
> +
>          return 1;
> +    }
>  
>      l1t = map_domain_page(mfn);
>      l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
> @@ -1218,6 +1239,18 @@
>           (l1e_get_flags(l1e) & disallowed_flags) )
>          return 0;
>  
> +    /* SMEP fault error code 10001b */
> +    if ( (error_code & PFEC_insn_fetch) &&
> +         !(error_code & PFEC_user_mode) &&
> +         cpu_has_smep &&
> +         (_PAGE_USER &
> +#if CONFIG_PAGING_LEVELS >= 4
> +          l4e_get_flags(l4e) &
> +          l3e_get_flags(l3e) &
> +#endif
> +          l2e_get_flags(l2e) & l1e_get_flags(l1e)) )
> +        return 2;

The further down I get the uglier this looks. Can't you simply
accumulate the user bit into a separate variable? That way the
compiler also doesn't need to keep around all the l[1234]e
variables.

Jan

> +
>      return 1;
>  }
>  
> @@ -1235,6 +1268,12 @@
>      is_spurious = __spurious_page_fault(addr, error_code);
>      local_irq_restore(flags);
>  
> +    if ( is_spurious == 2 ) {
> +        printk("SMEP fault at address %lx, crashing current domain %d\n",
> +               addr, current->domain->domain_id);
> +        domain_crash_synchronous();
> +    }
> +
>      return is_spurious;
>  }
>  
> diff -r d4f6310f1ef5 xen/include/asm-x86/cpufeature.h
> --- a/xen/include/asm-x86/cpufeature.h	Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/include/asm-x86/cpufeature.h	Wed Jun 01 19:53:52 2011 +0800
> @@ -141,8 +141,9 @@
>  #define X86_FEATURE_TBM         (6*32+21) /* trailing bit manipulations */
>  #define X86_FEATURE_TOPOEXT     (6*32+22) /* topology extensions CPUID 
> leafs */
>  
> -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
> +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
>  #define X86_FEATURE_FSGSBASE	(7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
> +#define X86_FEATURE_SMEP	(7*32+ 7) /* Supervisor Mode Execution Protection 
> */
>  
>  #define cpu_has(c, bit)		test_bit(bit, (c)->x86_capability)
>  #define boot_cpu_has(bit)	test_bit(bit, boot_cpu_data.x86_capability)
> @@ -201,6 +202,8 @@
>  #define cpu_has_fsgsbase	boot_cpu_has(X86_FEATURE_FSGSBASE)
>  #endif
>  
> +#define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
> +
>  #define cpu_has_ffxsr           ((boot_cpu_data.x86_vendor == 
> X86_VENDOR_AMD) \
>                                   && boot_cpu_has(X86_FEATURE_FFXSR))
>  
> diff -r d4f6310f1ef5 xen/include/asm-x86/processor.h
> --- a/xen/include/asm-x86/processor.h	Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/include/asm-x86/processor.h	Wed Jun 01 19:53:52 2011 +0800
> @@ -85,6 +85,7 @@
>  #define X86_CR4_SMXE		0x4000  /* enable SMX */
>  #define X86_CR4_FSGSBASE	0x10000 /* enable {rd,wr}{fs,gs}base */
>  #define X86_CR4_OSXSAVE	0x40000 /* enable XSAVE/XRSTOR */
> +#define X86_CR4_SMEP		0x100000/* enable SMEP */
>  
>  /*
>   * Trap/fault mnemonics.
>  
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com 
> http://lists.xensource.com/xen-devel 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 15:17 ` Jan Beulich
@ 2011-06-01 15:23   ` Ian Campbell
  2011-06-02  4:20   ` Li, Xin
  2011-06-02  7:45   ` Li, Xin
  2 siblings, 0 replies; 20+ messages in thread
From: Ian Campbell @ 2011-06-01 15:23 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Wei Y Yang, xen-devel

On Wed, 2011-06-01 at 16:17 +0100, Jan Beulich wrote:
> @@ -268,6 +282,8 @@
> 
> Would also be really helpful if you patch was generated with -p. 

Add this to your ~/.hgrc to make this the default:

[diff]
showfunc = True


Ian.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 13:20 [Patch] Enable SMEP CPU feature support for XEN itself Yang, Wei Y
  2011-06-01 14:34 ` Keir Fraser
  2011-06-01 15:17 ` Jan Beulich
@ 2011-06-01 15:26 ` Keir Fraser
  2011-06-01 16:15   ` Li, Xin
  2 siblings, 1 reply; 20+ messages in thread
From: Keir Fraser @ 2011-06-01 15:26 UTC (permalink / raw)
  To: Yang, Wei Y, xen-devel; +Cc: Li, Xin

On 01/06/2011 14:20, "Yang, Wei Y" <wei.y.yang@intel.com> wrote:

> 
> Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP
> prevents
> kernel from executing code in user. Updated Intel SDM describes this CPU
> feature.
> The document will be published soon.
> 
> This patch enables SMEP in Xen to protect Xen hypervisor from executing pv
> guest code,

Well not really. In the case that *Xen* execution triggers SMEP, you should
crash.

> and kills a pv guest triggering SMEP fault.

Should only occur when the guest kernel triggers the SMEP.

Basically you need to pull your check out of spurious_page_fault() and into
the two callers, because their responses should differ (one crashes the
guest, the other crashes the hypervisor).

Please define an enumeration for the return codes from spurious_pf, rather
than using magic numbers.

 -- Keir

>  Signed-off-by: Yang, Wei <wei.y.yang@intel.com>
>  Signed-off-by: Shan, Haitao <haitao.shan@intel.com>
>  Signed-off-by: Li, Xin <xin.li@intel.com>
> 
> ---
>  arch/x86/cpu/common.c        |   16 ++++++++++++++++
>  arch/x86/traps.c             |   43
> +++++++++++++++++++++++++++++++++++++++++--
>  include/asm-x86/cpufeature.h |    5 ++++-
>  include/asm-x86/processor.h  |    1 +
>  4 files changed, 62 insertions(+), 3 deletions(-)
> 
> diff -r d4f6310f1ef5 xen/arch/x86/cpu/common.c
> --- a/xen/arch/x86/cpu/common.c Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/arch/x86/cpu/common.c Wed Jun 01 19:53:52 2011 +0800
> @@ -28,6 +28,9 @@
>  integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
>  unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
>  integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
> +/* nosmep: if true, Intel SMEP is disabled. */
> +static bool_t __initdata disable_smep;
> +boolean_param("nosmep", disable_smep);
>  
>  struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
>  
> @@ -222,6 +225,17 @@
> c->x86_capability[4] = cap4;
>  }
>  
> +static void __cpuinit setup_smep(struct cpuinfo_x86 *c)
> +{
> + if ( cpu_has(c, X86_FEATURE_SMEP) ) {
> +  if( unlikely(disable_smep) ) {
> +   setup_clear_cpu_cap(X86_FEATURE_SMEP);
> +   clear_in_cr4(X86_CR4_SMEP);
> +  } else
> +   set_in_cr4(X86_CR4_SMEP);
> + }
> +}
> +
>  void __cpuinit generic_identify(struct cpuinfo_x86 * c)
>  {
> u32 tfms, xlvl, capability, excap, ebx;
> @@ -268,6 +282,8 @@
> c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx;
> }
>  
> + setup_smep(c);
> +
> early_intel_workaround(c);
>  
>  #ifdef CONFIG_X86_HT
> diff -r d4f6310f1ef5 xen/arch/x86/traps.c
> --- a/xen/arch/x86/traps.c Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/arch/x86/traps.c Wed Jun 01 19:53:52 2011 +0800
> @@ -1195,8 +1195,16 @@
>      if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) ||
>           (l3e_get_flags(l3e) & disallowed_flags) )
>          return 0;
> -    if ( l3e_get_flags(l3e) & _PAGE_PSE )
> +    if ( l3e_get_flags(l3e) & _PAGE_PSE ) {
> +        /* SMEP fault error code 10001b */
> +        if ( (error_code & PFEC_insn_fetch) &&
> +             !(error_code & PFEC_user_mode) &&
> +             cpu_has_smep &&
> +             (_PAGE_USER & l4e_get_flags(l4e) & l3e_get_flags(l3e)) )
> +            return 2;
> +
>          return 1;
> +    }
>  #endif
>  #endif
>  
> @@ -1207,8 +1215,21 @@
>      if ( ((l2e_get_flags(l2e) & required_flags) != required_flags) ||
>           (l2e_get_flags(l2e) & disallowed_flags) )
>          return 0;
> -    if ( l2e_get_flags(l2e) & _PAGE_PSE )
> +    if ( l2e_get_flags(l2e) & _PAGE_PSE ) {
> +        /* SMEP fault error code 10001b */
> +        if ( (error_code & PFEC_insn_fetch) &&
> +             !(error_code & PFEC_user_mode) &&
> +             cpu_has_smep &&
> +             (_PAGE_USER &
> +#if CONFIG_PAGING_LEVELS >= 4
> +              l4e_get_flags(l4e) &
> +              l3e_get_flags(l3e) &
> +#endif
> +              l2e_get_flags(l2e)) )
> +            return 2;
> +
>          return 1;
> +    }
>  
>      l1t = map_domain_page(mfn);
>      l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
> @@ -1218,6 +1239,18 @@
>           (l1e_get_flags(l1e) & disallowed_flags) )
>          return 0;
>  
> +    /* SMEP fault error code 10001b */
> +    if ( (error_code & PFEC_insn_fetch) &&
> +         !(error_code & PFEC_user_mode) &&
> +         cpu_has_smep &&
> +         (_PAGE_USER &
> +#if CONFIG_PAGING_LEVELS >= 4
> +          l4e_get_flags(l4e) &
> +          l3e_get_flags(l3e) &
> +#endif
> +          l2e_get_flags(l2e) & l1e_get_flags(l1e)) )
> +        return 2;
> +
>      return 1;
>  }
>  
> @@ -1235,6 +1268,12 @@
>      is_spurious = __spurious_page_fault(addr, error_code);
>      local_irq_restore(flags);
>  
> +    if ( is_spurious == 2 ) {
> +        printk("SMEP fault at address %lx, crashing current domain %d\n",
> +               addr, current->domain->domain_id);
> +        domain_crash_synchronous();
> +    }
> +
>      return is_spurious;
>  }
>  
> diff -r d4f6310f1ef5 xen/include/asm-x86/cpufeature.h
> --- a/xen/include/asm-x86/cpufeature.h Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/include/asm-x86/cpufeature.h Wed Jun 01 19:53:52 2011 +0800
> @@ -141,8 +141,9 @@
>  #define X86_FEATURE_TBM         (6*32+21) /* trailing bit manipulations */
>  #define X86_FEATURE_TOPOEXT     (6*32+22) /* topology extensions CPUID leafs
> */
>  
> -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
> +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
>  #define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
> +#define X86_FEATURE_SMEP (7*32+ 7) /* Supervisor Mode Execution Protection */
>  
>  #define cpu_has(c, bit)  test_bit(bit, (c)->x86_capability)
>  #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
> @@ -201,6 +202,8 @@
>  #define cpu_has_fsgsbase boot_cpu_has(X86_FEATURE_FSGSBASE)
>  #endif
>  
> +#define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
> +
>  #define cpu_has_ffxsr           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
> \
>                                   && boot_cpu_has(X86_FEATURE_FFXSR))
>  
> diff -r d4f6310f1ef5 xen/include/asm-x86/processor.h
> --- a/xen/include/asm-x86/processor.h Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/include/asm-x86/processor.h Wed Jun 01 19:53:52 2011 +0800
> @@ -85,6 +85,7 @@
>  #define X86_CR4_SMXE  0x4000  /* enable SMX */
>  #define X86_CR4_FSGSBASE 0x10000 /* enable {rd,wr}{fs,gs}base */
>  #define X86_CR4_OSXSAVE 0x40000 /* enable XSAVE/XRSTOR */
> +#define X86_CR4_SMEP  0x100000/* enable SMEP */
>  
>  /*
>   * Trap/fault mnemonics.
>  
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 15:26 ` Keir Fraser
@ 2011-06-01 16:15   ` Li, Xin
  2011-06-01 20:43     ` Keir Fraser
  0 siblings, 1 reply; 20+ messages in thread
From: Li, Xin @ 2011-06-01 16:15 UTC (permalink / raw)
  To: Keir Fraser, Yang, Wei Y, xen-devel

> > This patch enables SMEP in Xen to protect Xen hypervisor from executing pv
> > guest code,
> 
> Well not really. In the case that *Xen* execution triggers SMEP, you should
> crash.

You don't expect Xen can trigger SMEP? somehow I agree, but in case there is
any null pointer in Xen, an evil pv guest can easily get control of the system.

> 
> > and kills a pv guest triggering SMEP fault.
> 
> Should only occur when the guest kernel triggers the SMEP.

According to code base size, it's much easier for malicious applications to explore
security holes in kernel.  But unluckily SMEP doesn't apply to the ring 3 where
x86_64 pv kernel runs on.  It's wiser to use HVM :)

> Basically you need to pull your check out of spurious_page_fault() and into
> the two callers, because their responses should differ (one crashes the
> guest, the other crashes the hypervisor).
> Please define an enumeration for the return codes from spurious_pf, rather
> than using magic numbers.

Will do.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 16:15   ` Li, Xin
@ 2011-06-01 20:43     ` Keir Fraser
  2011-06-01 22:52       ` Li, Xin
  0 siblings, 1 reply; 20+ messages in thread
From: Keir Fraser @ 2011-06-01 20:43 UTC (permalink / raw)
  To: Li, Xin, Yang, Wei Y, xen-devel

On 01/06/2011 17:15, "Li, Xin" <xin.li@intel.com> wrote:

>>> This patch enables SMEP in Xen to protect Xen hypervisor from executing pv
>>> guest code,
>> 
>> Well not really. In the case that *Xen* execution triggers SMEP, you should
>> crash.
> 
> You don't expect Xen can trigger SMEP? somehow I agree, but in case there is
> any null pointer in Xen, an evil pv guest can easily get control of the
> system.

Of course. I don't disagree there can be bugs in Xen. :-)

>> 
>>> and kills a pv guest triggering SMEP fault.
>> 
>> Should only occur when the guest kernel triggers the SMEP.
> 
> According to code base size, it's much easier for malicious applications to
> explore
> security holes in kernel.  But unluckily SMEP doesn't apply to the ring 3
> where
> x86_64 pv kernel runs on.  It's wiser to use HVM :)

Yep, but 32-bit guests can still benefit.

>> Basically you need to pull your check out of spurious_page_fault() and into
>> the two callers, because their responses should differ (one crashes the
>> guest, the other crashes the hypervisor).
>> Please define an enumeration for the return codes from spurious_pf, rather
>> than using magic numbers.
> 
> Will do.

Thanks.

 - Keir

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 20:43     ` Keir Fraser
@ 2011-06-01 22:52       ` Li, Xin
  2011-06-02  6:25         ` Keir Fraser
  0 siblings, 1 reply; 20+ messages in thread
From: Li, Xin @ 2011-06-01 22:52 UTC (permalink / raw)
  To: Keir Fraser, Yang, Wei Y, xen-devel

> >>> and kills a pv guest triggering SMEP fault.
> >>
> >> Should only occur when the guest kernel triggers the SMEP.
> >
> > According to code base size, it's much easier for malicious applications to
> > explore
> > security holes in kernel.  But unluckily SMEP doesn't apply to the ring 3
> > where
> > x86_64 pv kernel runs on.  It's wiser to use HVM :)
> 
> Yep, but 32-bit guests can still benefit.

Can we know a guest will be 32bit or 64bit before it boots?
Code will be like
	xc_pv_cpuid_policy()
	{
        case 7, 0:
            if ( 64 bit pv guest )
                 disallow smep;
	}
I don't know if we can distinguish that when creating guest.
Thanks!
-Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 15:17 ` Jan Beulich
  2011-06-01 15:23   ` Ian Campbell
@ 2011-06-02  4:20   ` Li, Xin
  2011-06-02  7:45   ` Li, Xin
  2 siblings, 0 replies; 20+ messages in thread
From: Li, Xin @ 2011-06-02  4:20 UTC (permalink / raw)
  To: Jan Beulich, Yang, Wei Y; +Cc: xen-devel

> > --- a/xen/arch/x86/cpu/common.c	Wed Jun 01 11:11:43 2011 +0100
> > +++ b/xen/arch/x86/cpu/common.c	Wed Jun 01 19:53:52 2011 +0800
> > @@ -28,6 +28,9 @@
> >  integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
> >  unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
> >  integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
> > +/* nosmep: if true, Intel SMEP is disabled. */
> > +static bool_t __initdata disable_smep;
> 
> An __initdata variable used in ...

a mistake copied from native patch :) we'll change it to __cpuinitdata

> 
> > +boolean_param("nosmep", disable_smep);
> >
> >  struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
> >
> > @@ -222,6 +225,17 @@
> >  	c->x86_capability[4] = cap4;
> >  }
> >
> > +static void __cpuinit setup_smep(struct cpuinfo_x86 *c)
> > +{
> > +	if ( cpu_has(c, X86_FEATURE_SMEP) ) {
> > +		if( unlikely(disable_smep) ) {
> 
> ... a __cpuinit function?

If change disable_smep to __cpuinitdata, this should be ok.


> 
> > +			setup_clear_cpu_cap(X86_FEATURE_SMEP);
> > +			clear_in_cr4(X86_CR4_SMEP);
> > +		} else
> > +			set_in_cr4(X86_CR4_SMEP);
> 
> Anyway, the whole thing is overkill - {set,clear}_in_cr4() write
> the updated bits to mmu_cr4_features, and these get loaded
> on secondary CPUs *before* you have any chance of looking
> at the CPUID bits. As with everything else, it's assumed that
> APs don't have less features than the BP, and hence you only
> need to set_in_cr4() once (on the BP). And then the function
> can be __init.
>

Do you mean? 
        if ( cpu_has(c, X86_FEATURE_SMEP) )
                if( likely(!disable_smep) ) {
                        mmu_cr4_features |= X86_CR4_SMEP;
                        set_in_cr4(0);
                } else
                        setup_clear_cpu_cap(X86_FEATURE_SMEP);

Sounds good ... but the code will be harder to read, as it implicitly set smep?
Also where to put setup_smep thus it's only called in BP?

> > +	}
> > +}
> > +
> >  void __cpuinit generic_identify(struct cpuinfo_x86 * c)
> >  {
> >  	u32 tfms, xlvl, capability, excap, ebx;
> > @@ -268,6 +282,8 @@
> 
> Would also be really helpful if you patch was generated with -p.
> 
> >  		c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx;
> >  	}
> >
> > +	setup_smep(c);
> > +
> >  	early_intel_workaround(c);
> >
> >  #ifdef CONFIG_X86_HT
> > diff -r d4f6310f1ef5 xen/arch/x86/traps.c
> > --- a/xen/arch/x86/traps.c	Wed Jun 01 11:11:43 2011 +0100
> > +++ b/xen/arch/x86/traps.c	Wed Jun 01 19:53:52 2011 +0800
> > @@ -1195,8 +1195,16 @@
> >      if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) ||
> >           (l3e_get_flags(l3e) & disallowed_flags) )
> >          return 0;
> > -    if ( l3e_get_flags(l3e) & _PAGE_PSE )
> > +    if ( l3e_get_flags(l3e) & _PAGE_PSE ) {
> > +        /* SMEP fault error code 10001b */
> > +        if ( (error_code & PFEC_insn_fetch) &&
> > +             !(error_code & PFEC_user_mode) &&
> > +             cpu_has_smep &&
> > +             (_PAGE_USER & l4e_get_flags(l4e) & l3e_get_flags(l3e)) )
> > +            return 2;
> > +
> >          return 1;
> > +    }
> >  #endif
> >  #endif
> >
> > @@ -1207,8 +1215,21 @@
> >      if ( ((l2e_get_flags(l2e) & required_flags) != required_flags) ||
> >           (l2e_get_flags(l2e) & disallowed_flags) )
> >          return 0;
> > -    if ( l2e_get_flags(l2e) & _PAGE_PSE )
> > +    if ( l2e_get_flags(l2e) & _PAGE_PSE ) {
> > +        /* SMEP fault error code 10001b */
> > +        if ( (error_code & PFEC_insn_fetch) &&
> > +             !(error_code & PFEC_user_mode) &&
> > +             cpu_has_smep &&
> > +             (_PAGE_USER &
> > +#if CONFIG_PAGING_LEVELS >= 4
> > +              l4e_get_flags(l4e) &
> > +              l3e_get_flags(l3e) &
> > +#endif
> > +              l2e_get_flags(l2e)) )
> > +            return 2;
> > +
> >          return 1;
> > +    }
> >
> >      l1t = map_domain_page(mfn);
> >      l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
> > @@ -1218,6 +1239,18 @@
> >           (l1e_get_flags(l1e) & disallowed_flags) )
> >          return 0;
> >
> > +    /* SMEP fault error code 10001b */
> > +    if ( (error_code & PFEC_insn_fetch) &&
> > +         !(error_code & PFEC_user_mode) &&
> > +         cpu_has_smep &&
> > +         (_PAGE_USER &
> > +#if CONFIG_PAGING_LEVELS >= 4
> > +          l4e_get_flags(l4e) &
> > +          l3e_get_flags(l3e) &
> > +#endif
> > +          l2e_get_flags(l2e) & l1e_get_flags(l1e)) )
> > +        return 2;
> 
> The further down I get the uglier this looks. Can't you simply
> accumulate the user bit into a separate variable? That way the
> compiler also doesn't need to keep around all the l[1234]e
> variables.

At the beginning we did accumulate the user bit into a separate variable. However
SMEP faults hardly happen while we keep accumulating user bit no matter it's a
spurious fault or not, and even spurious faults are rare I guess.
Thanks!
-Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 22:52       ` Li, Xin
@ 2011-06-02  6:25         ` Keir Fraser
  2011-06-02 10:07           ` Li, Xin
  0 siblings, 1 reply; 20+ messages in thread
From: Keir Fraser @ 2011-06-02  6:25 UTC (permalink / raw)
  To: Li, Xin, Yang, Wei Y, xen-devel

On 01/06/2011 23:52, "Li, Xin" <xin.li@intel.com> wrote:

>>>>> and kills a pv guest triggering SMEP fault.
>>>> 
>>>> Should only occur when the guest kernel triggers the SMEP.
>>> 
>>> According to code base size, it's much easier for malicious applications to
>>> explore
>>> security holes in kernel.  But unluckily SMEP doesn't apply to the ring 3
>>> where
>>> x86_64 pv kernel runs on.  It's wiser to use HVM :)
>> 
>> Yep, but 32-bit guests can still benefit.
> 
> Can we know a guest will be 32bit or 64bit before it boots?
> Code will be like
> xc_pv_cpuid_policy()
> {
>         case 7, 0:
>             if ( 64 bit pv guest )
>                  disallow smep;
> }
> I don't know if we can distinguish that when creating guest.

Of course you can. See the guest_64bit flag already used in
xc_pv_cpuid_policy()!

However, given that the guest cannot influence whether SMEP is
enabled/disabled, perhaps it makes sense to always hide the feature? Also we
should unconditionally be hiding the CPUID feature in any case when Xen does
not support SMEP (because disabled on command line, or in the stable
branches without the feature patch applied) as otherwise guest can detect
the feature and will crash when it tries to enable the feature in CR4. This
is why it's a bad idea that we blacklist CPUID features for PV guests rather
than whitelist them. I will apply such a patch to all trees now.

 -- Keir

> Thanks!
> -Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-01 15:17 ` Jan Beulich
  2011-06-01 15:23   ` Ian Campbell
  2011-06-02  4:20   ` Li, Xin
@ 2011-06-02  7:45   ` Li, Xin
  2 siblings, 0 replies; 20+ messages in thread
From: Li, Xin @ 2011-06-02  7:45 UTC (permalink / raw)
  To: Jan Beulich, Yang, Wei Y; +Cc: xen-devel

> > > +			setup_clear_cpu_cap(X86_FEATURE_SMEP);
> > > +			clear_in_cr4(X86_CR4_SMEP);
> > > +		} else
> > > +			set_in_cr4(X86_CR4_SMEP);
> >
> > Anyway, the whole thing is overkill - {set,clear}_in_cr4() write
> > the updated bits to mmu_cr4_features, and these get loaded
> > on secondary CPUs *before* you have any chance of looking
> > at the CPUID bits. As with everything else, it's assumed that
> > APs don't have less features than the BP, and hence you only
> > need to set_in_cr4() once (on the BP). And then the function
> > can be __init.
> >
> 
> Do you mean?
>         if ( cpu_has(c, X86_FEATURE_SMEP) )
>                 if( likely(!disable_smep) ) {
>                         mmu_cr4_features |= X86_CR4_SMEP;
>                         set_in_cr4(0);
>                 } else
>                         setup_clear_cpu_cap(X86_FEATURE_SMEP);
> 
> Sounds good ... but the code will be harder to read, as it implicitly set smep?
> Also where to put setup_smep thus it's only called in BP?
> 
But it is a good idea to set X86_CR4_SMEP in mmu_cr4_features when SMEP
Is available.  thus

1) for PV, we can make patch like pv_guest_cr4_to_real_cr4
#define pv_guest_cr4_to_real_cr4(v)                         \
    (((v)->arch.pv_vcpu.ctrlreg[4]                          \
      | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_SMEP))    \
      | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)          \
      | ((xsave_enabled(v))? X86_CR4_OSXSAVE : 0))          \
      & ~X86_CR4_DE)
when set cr4.

2) For HVM, we don't need to explicitly add SMEP when write to HOST_CR4.

Thanks!
-Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for XEN itself
  2011-06-02  6:25         ` Keir Fraser
@ 2011-06-02 10:07           ` Li, Xin
  0 siblings, 0 replies; 20+ messages in thread
From: Li, Xin @ 2011-06-02 10:07 UTC (permalink / raw)
  To: Keir Fraser, Yang, Wei Y, xen-devel

> > I don't know if we can distinguish that when creating guest.
> 
> Of course you can. See the guest_64bit flag already used in
> xc_pv_cpuid_policy()!
> 
> However, given that the guest cannot influence whether SMEP is
> enabled/disabled, perhaps it makes sense to always hide the feature? Also we

SMEP can protect Xen hypervisor and 32bit guest kernel from application, but as
32bit guests run in ring 1, it still can exploit null pointer in Xen, although it's rare.

I vaguely remember Windows disallows execution from first page (or 4M?) of
virtual address space. Does Xen disallow PV guest kernel executing from there?

> should unconditionally be hiding the CPUID feature in any case when Xen does
> not support SMEP (because disabled on command line, or in the stable
> branches without the feature patch applied) as otherwise guest can detect
> the feature and will crash when it tries to enable the feature in CR4. This
> is why it's a bad idea that we blacklist CPUID features for PV guests rather
> than whitelist them. I will apply such a patch to all trees now.

You're right.  We will rebase the patch on your new code.
Thanks!
-Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Patch] Enable SMEP CPU feature support for  XEN itself
  2011-06-03 11:54       ` Li, Xin
@ 2011-06-03 12:34         ` Keir Fraser
  0 siblings, 0 replies; 20+ messages in thread
From: Keir Fraser @ 2011-06-03 12:34 UTC (permalink / raw)
  To: Li, Xin, Jan Beulich, Yang, Wei Y; +Cc: xen-devel

On 03/06/2011 12:54, "Li, Xin" <xin.li@intel.com> wrote:

>>> The whole point of setup_clear_cpu_cap() is that it only needs to run on the
>>> BP and the change gets picked up by every AP automatically as it boots.
>> 
>> Yeah, and we can't do leaf 7.0 detect in generic_identify. Or will again set
>> SMEP in
>> capabilities, but it should be ok.
> 
> I tried to not do leaf 7.0 detect on AP booting code, but system behaves
> abnormally
> (not hang, but "su -" never returns, after just revert the change, it runs
> well).
> 
> While at this point I want to focus on SMEP logic, we can make the
> improvements,
> to move such initialization to BP code only, later.  Also it makes no much
> sense to only
> move disable_smep that way, we should move all such variables.

Just get another rev of the patches out and we'll iterate if necessary.

 -- Keir

> Thanks!
> -Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for  XEN itself
  2011-06-02 22:49     ` Li, Xin
@ 2011-06-03 11:54       ` Li, Xin
  2011-06-03 12:34         ` Keir Fraser
  0 siblings, 1 reply; 20+ messages in thread
From: Li, Xin @ 2011-06-03 11:54 UTC (permalink / raw)
  To: Li, Xin, Keir Fraser, Jan Beulich, Yang, Wei Y; +Cc: xen-devel

> > The whole point of setup_clear_cpu_cap() is that it only needs to run on the
> > BP and the change gets picked up by every AP automatically as it boots.
> 
> Yeah, and we can't do leaf 7.0 detect in generic_identify. Or will again set SMEP in
> capabilities, but it should be ok.

I tried to not do leaf 7.0 detect on AP booting code, but system behaves abnormally
(not hang, but "su -" never returns, after just revert the change, it runs well).

While at this point I want to focus on SMEP logic, we can make the improvements,
to move such initialization to BP code only, later.  Also it makes no much sense to only
move disable_smep that way, we should move all such variables.

Thanks!
-Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for  XEN itself
  2011-06-02 19:24   ` Keir Fraser
@ 2011-06-02 22:49     ` Li, Xin
  2011-06-03 11:54       ` Li, Xin
  0 siblings, 1 reply; 20+ messages in thread
From: Li, Xin @ 2011-06-02 22:49 UTC (permalink / raw)
  To: Keir Fraser, Jan Beulich, Yang, Wei Y; +Cc: xen-devel

> >> } else
> >>> setup_clear_cpu_cap(X86_FEATURE_SMEP);
> >
> > This needs to be done on APs too.  Thus I think we still need define
> > setup_smep as __cpuinit.
> 
> The whole point of setup_clear_cpu_cap() is that it only needs to run on the
> BP and the change gets picked up by every AP automatically as it boots.

Yeah, and we can't do leaf 7.0 detect in generic_identify. Or will again set SMEP in capabilities, but it should be ok.
Thanks!
-Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Patch] Enable SMEP CPU feature support for  XEN itself
  2011-06-02 14:36 ` Li, Xin
  2011-06-02 15:05   ` Li, Xin
@ 2011-06-02 19:24   ` Keir Fraser
  2011-06-02 22:49     ` Li, Xin
  1 sibling, 1 reply; 20+ messages in thread
From: Keir Fraser @ 2011-06-02 19:24 UTC (permalink / raw)
  To: Li, Xin, Jan Beulich, Yang, Wei Y; +Cc: xen-devel

On 02/06/2011 15:36, "Li, Xin" <xin.li@intel.com> wrote:

>> } else
>>> setup_clear_cpu_cap(X86_FEATURE_SMEP);
> 
> This needs to be done on APs too.  Thus I think we still need define
> setup_smep as __cpuinit.

The whole point of setup_clear_cpu_cap() is that it only needs to run on the
BP and the change gets picked up by every AP automatically as it boots.

 -- Keir

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for  XEN itself
  2011-06-02 14:36 ` Li, Xin
@ 2011-06-02 15:05   ` Li, Xin
  2011-06-02 19:24   ` Keir Fraser
  1 sibling, 0 replies; 20+ messages in thread
From: Li, Xin @ 2011-06-02 15:05 UTC (permalink / raw)
  To: Li, Xin, Jan Beulich, Yang, Wei Y; +Cc: xen-devel

 
> >} else
> >>setup_clear_cpu_cap(X86_FEATURE_SMEP);
> 
> This needs to be done on APs too.  Thus I think we still need define setup_smep as
> __cpuinit.
This only applies to BP only, and actually all feature check is against BP even AP has a corresponding bit set.  So your suggestion should work.
Thanks!
-Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for  XEN itself
  2011-06-02 13:29 Jan Beulich
@ 2011-06-02 14:36 ` Li, Xin
  2011-06-02 15:05   ` Li, Xin
  2011-06-02 19:24   ` Keir Fraser
  0 siblings, 2 replies; 20+ messages in thread
From: Li, Xin @ 2011-06-02 14:36 UTC (permalink / raw)
  To: Jan Beulich, Yang, Wei Y; +Cc: xen-devel

>>mmu_cr4_features |= X86_CR4_SMEP;

>Why?

I replied in another reply to you, but just repeat here:
But it is a good idea to set X86_CR4_SMEP in mmu_cr4_features when SMEP
Is available.  thus

1) for PV, we can make patch like pv_guest_cr4_to_real_cr4
#define pv_guest_cr4_to_real_cr4(v)                         \
    (((v)->arch.pv_vcpu.ctrlreg[4]                          \
      | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_SMEP))    \
      | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)          \
      | ((xsave_enabled(v))? X86_CR4_OSXSAVE : 0))          \
      & ~X86_CR4_DE)
when set cr4.

2) For HVM, we don't need to explicitly add SMEP when write to HOST_CR4.


>>set_in_cr4(0);

>set_in_cr4(X86_CR4_SMEP) does exactly what you need.

Yes, but once we have X86_CR4_SMEP in mmu_cr4_features, set_in_cr4(0) does
the same thing except looks ugly.

>} else
>>setup_clear_cpu_cap(X86_FEATURE_SMEP);

This needs to be done on APs too.  Thus I think we still need define setup_smep as __cpuinit.

>>At the beginning we did accumulate the user bit into a separate variable. However
>>SMEP faults hardly happen while we keep accumulating user bit no matter it's a
>>spurious fault or not, and even spurious faults are rare I guess.

>Remember that we're going through this function for almost every page
>fault happening in Xen, and also for the majority of those originating
>from certain pv guests (when they have suppress_spurious_page_faults
>set).

>Also, my comment was to a large part aiming at better legibility of the
>code you add.

Yes, for legibility we may change it back.
Thanks!
-Xin

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [Patch] Enable SMEP CPU feature support for XEN itself
@ 2011-06-02 13:29 Jan Beulich
  2011-06-02 14:36 ` Li, Xin
  0 siblings, 1 reply; 20+ messages in thread
From: Jan Beulich @ 2011-06-02 13:29 UTC (permalink / raw)
  To: wei.y.yang, xin.li; +Cc: xen-devel


[-- Attachment #1.1: Type: text/plain, Size: 2642 bytes --]

>>> "Li, Xin"  06/02/11 6:20 AM >>>
>> > +boolean_param("nosmep", disable_smep);
>> >
>> >  struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
>> >
>> > @@ -222,6 +225,17 @@
>> >      c->x86_capability[4] = cap4;
>> >  }
>> >
>> > +static void __cpuinit setup_smep(struct cpuinfo_x86 *c)
>> > +{
>> > +    if ( cpu_has(c, X86_FEATURE_SMEP) ) {
>> > +        if( unlikely(disable_smep) ) {
>> 
>> ... a __cpuinit function?
>
>If change disable_smep to __cpuinitdata, this should be ok.

You would be okay, but as I wrote further down both are really
only needed on the BP.

>> > +            setup_clear_cpu_cap(X86_FEATURE_SMEP);
>> > +            clear_in_cr4(X86_CR4_SMEP);
>> > +        } else
>> > +            set_in_cr4(X86_CR4_SMEP);
>> 
>> Anyway, the whole thing is overkill - {set,clear}_in_cr4() write
>> the updated bits to mmu_cr4_features, and these get loaded
>> on secondary CPUs *before* you have any chance of looking
>> at the CPUID bits. As with everything else, it's assumed that
>> APs don't have less features than the BP, and hence you only
>> need to set_in_cr4() once (on the BP). And then the function
>> can be __init.
>>
>
>Do you mean? 
>if ( cpu_has(c, X86_FEATURE_SMEP) )
>if( likely(!disable_smep) ) {
>mmu_cr4_features |= X86_CR4_SMEP;

Why?

>set_in_cr4(0);

set_in_cr4(X86_CR4_SMEP) does exactly what you need.

>} else
>setup_clear_cpu_cap(X86_FEATURE_SMEP);
>
>Sounds good ... but the code will be harder to read, as it implicitly set smep?
>Also where to put setup_smep thus it's only called in BP?

early_cpu_detect() would seem to be the most logical place, though
it doesn't have all the x86_capabilities[] fields set up yet. The BP-only
part at the end of identify_cpu() would also be a possible place.

trap_init() would be another possible (and reasonably logical) place.

>> The further down I get the uglier this looks. Can't you simply
>> accumulate the user bit into a separate variable? That way the
>> compiler also doesn't need to keep around all the l[1234]e
>> variables.
>
>At the beginning we did accumulate the user bit into a separate variable. However
>SMEP faults hardly happen while we keep accumulating user bit no matter it's a
>spurious fault or not, and even spurious faults are rare I guess.

Remember that we're going through this function for almost every page
fault happening in Xen, and also for the majority of those originating
from certain pv guests (when they have suppress_spurious_page_faults
set).

Also, my comment was to a large part aiming at better legibility of the
code you add.

Jan


[-- Attachment #1.2: HTML --]
[-- Type: text/html, Size: 3650 bytes --]

[-- Attachment #2: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2011-06-03 12:34 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-06-01 13:20 [Patch] Enable SMEP CPU feature support for XEN itself Yang, Wei Y
2011-06-01 14:34 ` Keir Fraser
2011-06-01 14:50   ` Li, Xin
2011-06-01 15:17 ` Jan Beulich
2011-06-01 15:23   ` Ian Campbell
2011-06-02  4:20   ` Li, Xin
2011-06-02  7:45   ` Li, Xin
2011-06-01 15:26 ` Keir Fraser
2011-06-01 16:15   ` Li, Xin
2011-06-01 20:43     ` Keir Fraser
2011-06-01 22:52       ` Li, Xin
2011-06-02  6:25         ` Keir Fraser
2011-06-02 10:07           ` Li, Xin
2011-06-02 13:29 Jan Beulich
2011-06-02 14:36 ` Li, Xin
2011-06-02 15:05   ` Li, Xin
2011-06-02 19:24   ` Keir Fraser
2011-06-02 22:49     ` Li, Xin
2011-06-03 11:54       ` Li, Xin
2011-06-03 12:34         ` Keir Fraser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.