All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] X86: cpuid faulting feature enable
@ 2011-07-01 14:32 Liu, Jinsong
  2011-07-01 15:48 ` Jan Beulich
  2011-07-01 21:31 ` Keir Fraser
  0 siblings, 2 replies; 19+ messages in thread
From: Liu, Jinsong @ 2011-07-01 14:32 UTC (permalink / raw)
  To: Keir Fraser, xen-devel; +Cc: Tian, Kevin, Shan, Haitao, Li, Xin

[-- Attachment #1: Type: text/plain, Size: 7897 bytes --]

X86: cpuid faulting feature enable

Latest Intel processor add cpuid faulting feature. This patch is used to support cpuid faulting in Xen.
Like cpuid spoofing, cpuid faulting mainly used to support live migration. When cpl>0, cpuid instruction will produce GP, vmm then emulate execution of the cpuid instruction. Hence will appear to guest software the value chosen by the vmm.

Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>

diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/cpu/common.c	Fri Jul 01 19:04:41 2011 +0800
@@ -603,6 +603,18 @@ void __init early_cpu_init(void)
 #endif
 	early_cpu_detect();
 }
+
+static int __init cpuid_faulting_init(void)
+{
+	if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 
+		cpu_has_cpuid_faulting ) {
+		cpuid_faulting_flip = intel_cpuid_faulting_flip;
+	}
+
+	return 0;
+}
+__initcall(cpuid_faulting_init);
+
 /*
  * cpu_init() initializes state that is per-CPU. Some data is already
  * initialized (naturally) in the bootstrap process, such as the GDT
diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
--- a/xen/arch/x86/cpu/cpu.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/cpu/cpu.h	Fri Jul 01 19:04:41 2011 +0800
@@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
 extern void generic_identify(struct cpuinfo_x86 * c);
 
 extern void early_intel_workaround(struct cpuinfo_x86 *c);
-
+extern void intel_cpuid_faulting_flip(unsigned int enable);
diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
--- a/xen/arch/x86/cpu/intel.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/cpu/intel.c	Fri Jul 01 19:04:41 2011 +0800
@@ -24,6 +24,39 @@
  */
 struct movsl_mask movsl_mask __read_mostly;
 #endif
+
+static unsigned int intel_cpuid_faulting_enumerate(void)
+{
+	uint32_t hi, lo;
+	struct cpuinfo_x86 *c = &cpu_data[smp_processor_id()];
+
+	/*
+	* Currently only one type of intel processor support cpuid faulting.
+	* FIXME when needed in the future.
+	*/
+	if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask == 2)))
+		return 0;
+
+	rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
+	if (lo & (1 << 31))
+		return 1;
+
+	return 0;
+}
+
+void intel_cpuid_faulting_flip(unsigned int enable)
+{
+	uint32_t hi, lo;
+
+	rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+	if (enable)
+		lo |= 1;
+	else
+		lo &= ~1;
+	wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+
+	per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable;
+}
 
 /*
  * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
@@ -194,7 +227,10 @@ static void __devinit init_intel(struct 
 		detect_ht(c);
 	}
 
-	set_cpuidmask(c);
+	if (intel_cpuid_faulting_enumerate())
+		set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+	else
+		set_cpuidmask(c);
 
 	/* Work around errata */
 	Intel_errata_workarounds(c);
diff -r 593d51c5f4ee xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/domain.c	Fri Jul 01 19:04:41 2011 +0800
@@ -63,6 +63,9 @@
 
 DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
 DEFINE_PER_CPU(unsigned long, cr4);
+DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled);
+
+void (*cpuid_faulting_flip)(unsigned int enable);
 
 static void default_idle(void);
 static void default_dead_idle(void);
@@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s
             load_LDT(next);
             load_segments(next);
         }
+
+	if ( cpuid_faulting_flip )
+	{
+		unsigned int enable;
+
+		enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
+		if ( enable ^ this_cpu(cpuid_faulting_enabled) )
+			cpuid_faulting_flip(enable);
+	}
     }
 
     context_saved(prev);
diff -r 593d51c5f4ee xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/traps.c	Fri Jul 01 19:04:41 2011 +0800
@@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct 
 
  twobyte_opcode:
     /*
-     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP (0x1,0xF9)
-     * are executable only from guest kernel mode (virtual ring 0).
+     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP (0x1,0xF9),
+     * and CPUID (0xa2), are executable only from guest kernel mode 
+     * (virtual ring 0).
      */
     opcode = insn_fetch(u8, code_base, eip, code_limit);
-    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode != 0x31) )
+    if ( !guest_kernel_mode(v, regs) && 
+        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )
         goto fail;
 
     if ( lock && (opcode & ~3) != 0x20 )
@@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct 
             regs->edx = (uint32_t)(msr_content >> 32);
             break;
         }
+        break;
+
+    case 0xa2: /* CPUID */
+        pv_cpuid(regs);
         break;
 
     default:
diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/include/asm-x86/cpufeature.h	Fri Jul 01 19:04:41 2011 +0800
@@ -79,6 +79,7 @@
 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
 #define X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable */
 #define X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions */
+#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -175,6 +176,7 @@
 #define cpu_has_page1gb		0
 #define cpu_has_efer		(boot_cpu_data.x86_capability[1] & 0x20100800)
 #define cpu_has_fsgsbase	0
+#define cpu_has_cpuid_faulting  0
 #else /* __x86_64__ */
 #define cpu_has_vme		0
 #define cpu_has_de		1
@@ -201,6 +203,7 @@
 #define cpu_has_page1gb		boot_cpu_has(X86_FEATURE_PAGE1GB)
 #define cpu_has_efer		1
 #define cpu_has_fsgsbase	boot_cpu_has(X86_FEATURE_FSGSBASE)
+#define cpu_has_cpuid_faulting  boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
 #endif
 
 #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/include/asm-x86/msr-index.h	Fri Jul 01 19:04:41 2011 +0800
@@ -155,11 +155,6 @@
 #define MSR_P6_PERFCTR1			0x000000c2
 #define MSR_P6_EVNTSEL0			0x00000186
 #define MSR_P6_EVNTSEL1			0x00000187
-
-/* MSRs for Intel cpuid feature mask */
-#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
-#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
-#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
 
 /* MSRs & bits used for VMX enabling */
 #define MSR_IA32_VMX_BASIC                      0x480
@@ -492,6 +487,15 @@
 #define MSR_CORE_PERF_GLOBAL_CTRL	0x0000038f
 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x00000390
 
+/* Intel cpuid spoofing MSRs */
+#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
+#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
+#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
+
+/* Intel cpuid faulting MSRs */
+#define MSR_INTEL_PLATFORM_INFO		0x000000ce
+#define MSR_INTEL_MISC_FEATURES_ENABLES	0x00000140
+
 /* Geode defined MSRs */
 #define MSR_GEODE_BUSCONT_CONF0		0x00001900
 
diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/include/asm-x86/processor.h	Fri Jul 01 19:04:41 2011 +0800
@@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
 #define cpu_data (&boot_cpu_data)
 #define current_cpu_data boot_cpu_data
 #endif
+
+extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled);
+
+extern void (*cpuid_faulting_flip)(unsigned int enable);
 
 extern u64 host_pat;
 extern int phys_proc_id[NR_CPUS];

[-- Attachment #2: cpuid_faulting.patch --]
[-- Type: application/octet-stream, Size: 7674 bytes --]

X86: cpuid faulting feature enable

Latest Intel processor add cpuid faulting feature. This patch is used to support cpuid faulting in Xen.
Like cpuid spoofing, cpuid faulting mainly used to support live migration. When cpl>0, cpuid instruction will produce GP, vmm then emulate execution of the cpuid instruction. Hence will appear to guest software the value chosen by the vmm.

Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>

diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/cpu/common.c	Fri Jul 01 19:04:41 2011 +0800
@@ -603,6 +603,18 @@ void __init early_cpu_init(void)
 #endif
 	early_cpu_detect();
 }
+
+static int __init cpuid_faulting_init(void)
+{
+	if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 
+		cpu_has_cpuid_faulting ) {
+		cpuid_faulting_flip = intel_cpuid_faulting_flip;
+	}
+
+	return 0;
+}
+__initcall(cpuid_faulting_init);
+
 /*
  * cpu_init() initializes state that is per-CPU. Some data is already
  * initialized (naturally) in the bootstrap process, such as the GDT
diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
--- a/xen/arch/x86/cpu/cpu.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/cpu/cpu.h	Fri Jul 01 19:04:41 2011 +0800
@@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
 extern void generic_identify(struct cpuinfo_x86 * c);
 
 extern void early_intel_workaround(struct cpuinfo_x86 *c);
-
+extern void intel_cpuid_faulting_flip(unsigned int enable);
diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
--- a/xen/arch/x86/cpu/intel.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/cpu/intel.c	Fri Jul 01 19:04:41 2011 +0800
@@ -24,6 +24,39 @@
  */
 struct movsl_mask movsl_mask __read_mostly;
 #endif
+
+static unsigned int intel_cpuid_faulting_enumerate(void)
+{
+	uint32_t hi, lo;
+	struct cpuinfo_x86 *c = &cpu_data[smp_processor_id()];
+
+	/*
+	* Currently only one type of intel processor support cpuid faulting.
+	* FIXME when needed in the future.
+	*/
+	if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask == 2)))
+		return 0;
+
+	rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
+	if (lo & (1 << 31))
+		return 1;
+
+	return 0;
+}
+
+void intel_cpuid_faulting_flip(unsigned int enable)
+{
+	uint32_t hi, lo;
+
+	rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+	if (enable)
+		lo |= 1;
+	else
+		lo &= ~1;
+	wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+
+	per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable;
+}
 
 /*
  * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
@@ -194,7 +227,10 @@ static void __devinit init_intel(struct 
 		detect_ht(c);
 	}
 
-	set_cpuidmask(c);
+	if (intel_cpuid_faulting_enumerate())
+		set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+	else
+		set_cpuidmask(c);
 
 	/* Work around errata */
 	Intel_errata_workarounds(c);
diff -r 593d51c5f4ee xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/domain.c	Fri Jul 01 19:04:41 2011 +0800
@@ -63,6 +63,9 @@
 
 DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
 DEFINE_PER_CPU(unsigned long, cr4);
+DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled);
+
+void (*cpuid_faulting_flip)(unsigned int enable);
 
 static void default_idle(void);
 static void default_dead_idle(void);
@@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s
             load_LDT(next);
             load_segments(next);
         }
+
+	if ( cpuid_faulting_flip )
+	{
+		unsigned int enable;
+
+		enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
+		if ( enable ^ this_cpu(cpuid_faulting_enabled) )
+			cpuid_faulting_flip(enable);
+	}
     }
 
     context_saved(prev);
diff -r 593d51c5f4ee xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/traps.c	Fri Jul 01 19:04:41 2011 +0800
@@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct 
 
  twobyte_opcode:
     /*
-     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP (0x1,0xF9)
-     * are executable only from guest kernel mode (virtual ring 0).
+     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP (0x1,0xF9),
+     * and CPUID (0xa2), are executable only from guest kernel mode 
+     * (virtual ring 0).
      */
     opcode = insn_fetch(u8, code_base, eip, code_limit);
-    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode != 0x31) )
+    if ( !guest_kernel_mode(v, regs) && 
+        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )
         goto fail;
 
     if ( lock && (opcode & ~3) != 0x20 )
@@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct 
             regs->edx = (uint32_t)(msr_content >> 32);
             break;
         }
+        break;
+
+    case 0xa2: /* CPUID */
+        pv_cpuid(regs);
         break;
 
     default:
diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/include/asm-x86/cpufeature.h	Fri Jul 01 19:04:41 2011 +0800
@@ -79,6 +79,7 @@
 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
 #define X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable */
 #define X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions */
+#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -175,6 +176,7 @@
 #define cpu_has_page1gb		0
 #define cpu_has_efer		(boot_cpu_data.x86_capability[1] & 0x20100800)
 #define cpu_has_fsgsbase	0
+#define cpu_has_cpuid_faulting  0
 #else /* __x86_64__ */
 #define cpu_has_vme		0
 #define cpu_has_de		1
@@ -201,6 +203,7 @@
 #define cpu_has_page1gb		boot_cpu_has(X86_FEATURE_PAGE1GB)
 #define cpu_has_efer		1
 #define cpu_has_fsgsbase	boot_cpu_has(X86_FEATURE_FSGSBASE)
+#define cpu_has_cpuid_faulting  boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
 #endif
 
 #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/include/asm-x86/msr-index.h	Fri Jul 01 19:04:41 2011 +0800
@@ -155,11 +155,6 @@
 #define MSR_P6_PERFCTR1			0x000000c2
 #define MSR_P6_EVNTSEL0			0x00000186
 #define MSR_P6_EVNTSEL1			0x00000187
-
-/* MSRs for Intel cpuid feature mask */
-#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
-#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
-#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
 
 /* MSRs & bits used for VMX enabling */
 #define MSR_IA32_VMX_BASIC                      0x480
@@ -492,6 +487,15 @@
 #define MSR_CORE_PERF_GLOBAL_CTRL	0x0000038f
 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x00000390
 
+/* Intel cpuid spoofing MSRs */
+#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
+#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
+#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
+
+/* Intel cpuid faulting MSRs */
+#define MSR_INTEL_PLATFORM_INFO		0x000000ce
+#define MSR_INTEL_MISC_FEATURES_ENABLES	0x00000140
+
 /* Geode defined MSRs */
 #define MSR_GEODE_BUSCONT_CONF0		0x00001900
 
diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/include/asm-x86/processor.h	Fri Jul 01 19:04:41 2011 +0800
@@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
 #define cpu_data (&boot_cpu_data)
 #define current_cpu_data boot_cpu_data
 #endif
+
+extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled);
+
+extern void (*cpuid_faulting_flip)(unsigned int enable);
 
 extern u64 host_pat;
 extern int phys_proc_id[NR_CPUS];

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 14:32 [PATCH] X86: cpuid faulting feature enable Liu, Jinsong
@ 2011-07-01 15:48 ` Jan Beulich
  2011-07-01 16:07   ` Keir Fraser
  2011-07-01 17:48   ` Liu, Jinsong
  2011-07-01 21:31 ` Keir Fraser
  1 sibling, 2 replies; 19+ messages in thread
From: Jan Beulich @ 2011-07-01 15:48 UTC (permalink / raw)
  To: Jinsong Liu; +Cc: xen-devel, Kevin Tian, Keir Fraser, Haitao Shan, Xin Li

>>> On 01.07.11 at 16:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
> X86: cpuid faulting feature enable
> 
> Latest Intel processor add cpuid faulting feature. This patch is used to 
> support cpuid faulting in Xen.
> Like cpuid spoofing, cpuid faulting mainly used to support live migration. 
> When cpl>0, cpuid instruction will produce GP, vmm then emulate execution of 
> the cpuid instruction. Hence will appear to guest software the value chosen 
> by the vmm.
> 
> Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
> 
> diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
> --- a/xen/arch/x86/cpu/common.c	Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/cpu/common.c	Fri Jul 01 19:04:41 2011 +0800
> @@ -603,6 +603,18 @@ void __init early_cpu_init(void)
>  #endif
>  	early_cpu_detect();
>  }
> +
> +static int __init cpuid_faulting_init(void)
> +{
> +	if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 
> +		cpu_has_cpuid_faulting ) {
> +		cpuid_faulting_flip = intel_cpuid_faulting_flip;
> +	}
> +
> +	return 0;
> +}
> +__initcall(cpuid_faulting_init);
> +
>  /*
>   * cpu_init() initializes state that is per-CPU. Some data is already
>   * initialized (naturally) in the bootstrap process, such as the GDT
> diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
> --- a/xen/arch/x86/cpu/cpu.h	Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/cpu/cpu.h	Fri Jul 01 19:04:41 2011 +0800
> @@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
>  extern void generic_identify(struct cpuinfo_x86 * c);
>  
>  extern void early_intel_workaround(struct cpuinfo_x86 *c);
> -
> +extern void intel_cpuid_faulting_flip(unsigned int enable);
> diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
> --- a/xen/arch/x86/cpu/intel.c	Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/cpu/intel.c	Fri Jul 01 19:04:41 2011 +0800
> @@ -24,6 +24,39 @@
>   */
>  struct movsl_mask movsl_mask __read_mostly;
>  #endif
> +
> +static unsigned int intel_cpuid_faulting_enumerate(void)
> +{
> +	uint32_t hi, lo;
> +	struct cpuinfo_x86 *c = &cpu_data[smp_processor_id()];
> +
> +	/*
> +	* Currently only one type of intel processor support cpuid faulting.
> +	* FIXME when needed in the future.
> +	*/
> +	if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask == 2)))

Down to a particular stepping? That surely doesn't make sense for
anything but your own experimenting.

> +		return 0;
> +
> +	rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
> +	if (lo & (1 << 31))
> +		return 1;
> +
> +	return 0;
> +}
> +
> +void intel_cpuid_faulting_flip(unsigned int enable)
> +{
> +	uint32_t hi, lo;
> +
> +	rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
> +	if (enable)
> +		lo |= 1;
> +	else
> +		lo &= ~1;
> +	wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
> +
> +	per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable;
> +}
>  
>  /*
>   * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
> @@ -194,7 +227,10 @@ static void __devinit init_intel(struct 
>  		detect_ht(c);
>  	}
>  
> -	set_cpuidmask(c);
> +	if (intel_cpuid_faulting_enumerate())
> +		set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
> +	else
> +		set_cpuidmask(c);
>  
>  	/* Work around errata */
>  	Intel_errata_workarounds(c);
> diff -r 593d51c5f4ee xen/arch/x86/domain.c
> --- a/xen/arch/x86/domain.c	Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/domain.c	Fri Jul 01 19:04:41 2011 +0800
> @@ -63,6 +63,9 @@
>  
>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>  DEFINE_PER_CPU(unsigned long, cr4);
> +DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled);
> +
> +void (*cpuid_faulting_flip)(unsigned int enable);

bool_t for both (and elsewhere in the patch)?

>  
>  static void default_idle(void);
>  static void default_dead_idle(void);
> @@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s
>              load_LDT(next);
>              load_segments(next);
>          }
> +
> +	if ( cpuid_faulting_flip )
> +	{
> +		unsigned int enable;
> +
> +		enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);

Excluding Dom0 here is perhaps questionable (as it could allow hiding
features not supported by Xen from applications).

> +		if ( enable ^ this_cpu(cpuid_faulting_enabled) )
> +			cpuid_faulting_flip(enable);
> +	}
>      }
>  
>      context_saved(prev);
> diff -r 593d51c5f4ee xen/arch/x86/traps.c
> --- a/xen/arch/x86/traps.c	Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/traps.c	Fri Jul 01 19:04:41 2011 +0800
> @@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct 
>  
>   twobyte_opcode:
>      /*
> -     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP (0x1,0xF9)
> -     * are executable only from guest kernel mode (virtual ring 0).
> +     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP (0x1,0xF9),
> +     * and CPUID (0xa2), are executable only from guest kernel mode 
> +     * (virtual ring 0).
>       */
>      opcode = insn_fetch(u8, code_base, eip, code_limit);
> -    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode != 0x31) )
> +    if ( !guest_kernel_mode(v, regs) && 
> +        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )
>          goto fail;
>  
>      if ( lock && (opcode & ~3) != 0x20 )
> @@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct 
>              regs->edx = (uint32_t)(msr_content >> 32);
>              break;
>          }
> +        break;
> +
> +    case 0xa2: /* CPUID */
> +        pv_cpuid(regs);
>          break;
>  
>      default:
> diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
> --- a/xen/include/asm-x86/cpufeature.h	Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/include/asm-x86/cpufeature.h	Fri Jul 01 19:04:41 2011 +0800
> @@ -79,6 +79,7 @@
>  #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon 
> */
>  #define X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable 
> */
>  #define X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions 
> */
> +#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
>  
>  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
>  #define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
> @@ -175,6 +176,7 @@
>  #define cpu_has_page1gb		0
>  #define cpu_has_efer		(boot_cpu_data.x86_capability[1] & 0x20100800)
>  #define cpu_has_fsgsbase	0
> +#define cpu_has_cpuid_faulting  0

Why? I can't see anything in here that would require making this a
64-bit-only feature.

Jan

>  #else /* __x86_64__ */
>  #define cpu_has_vme		0
>  #define cpu_has_de		1
> @@ -201,6 +203,7 @@
>  #define cpu_has_page1gb		boot_cpu_has(X86_FEATURE_PAGE1GB)
>  #define cpu_has_efer		1
>  #define cpu_has_fsgsbase	boot_cpu_has(X86_FEATURE_FSGSBASE)
> +#define cpu_has_cpuid_faulting  boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
>  #endif
>  
>  #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
> diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
> --- a/xen/include/asm-x86/msr-index.h	Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/include/asm-x86/msr-index.h	Fri Jul 01 19:04:41 2011 +0800
> @@ -155,11 +155,6 @@
>  #define MSR_P6_PERFCTR1			0x000000c2
>  #define MSR_P6_EVNTSEL0			0x00000186
>  #define MSR_P6_EVNTSEL1			0x00000187
> -
> -/* MSRs for Intel cpuid feature mask */
> -#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
> -#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
> -#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
>  
>  /* MSRs & bits used for VMX enabling */
>  #define MSR_IA32_VMX_BASIC                      0x480
> @@ -492,6 +487,15 @@
>  #define MSR_CORE_PERF_GLOBAL_CTRL	0x0000038f
>  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x00000390
>  
> +/* Intel cpuid spoofing MSRs */
> +#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
> +#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
> +#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
> +
> +/* Intel cpuid faulting MSRs */
> +#define MSR_INTEL_PLATFORM_INFO		0x000000ce
> +#define MSR_INTEL_MISC_FEATURES_ENABLES	0x00000140
> +
>  /* Geode defined MSRs */
>  #define MSR_GEODE_BUSCONT_CONF0		0x00001900
>  
> diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
> --- a/xen/include/asm-x86/processor.h	Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/include/asm-x86/processor.h	Fri Jul 01 19:04:41 2011 +0800
> @@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
>  #define cpu_data (&boot_cpu_data)
>  #define current_cpu_data boot_cpu_data
>  #endif
> +
> +extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled);
> +
> +extern void (*cpuid_faulting_flip)(unsigned int enable);
>  
>  extern u64 host_pat;
>  extern int phys_proc_id[NR_CPUS];

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 15:48 ` Jan Beulich
@ 2011-07-01 16:07   ` Keir Fraser
  2011-07-02  3:04     ` Tian, Kevin
  2011-07-01 17:48   ` Liu, Jinsong
  1 sibling, 1 reply; 19+ messages in thread
From: Keir Fraser @ 2011-07-01 16:07 UTC (permalink / raw)
  To: Jan Beulich, Jinsong Liu; +Cc: Kevin Tian, xen-devel, Haitao Shan, Xin Li

On 01/07/2011 16:48, "Jan Beulich" <JBeulich@novell.com> wrote:

>> + if ( cpuid_faulting_flip )
>> + {
>> +  unsigned int enable;
>> +
>> +  enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
> 
> Excluding Dom0 here is perhaps questionable (as it could allow hiding
> features not supported by Xen from applications).

It's probably because of a need for raw cpuid in libxc/xc_cpuid_x86.c where
we need to detect things like long mode. We could do that another way, or
provide a new paravirtualised raw cpuid just for dom0.

Not a barrier to this patch going in imo.

I suppose HVM VCPUs are excluded because otherwise the new feature would be
unexpectedly enabled in non-root mode too, and make HVM guests crash? :-)

 -- Keir

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 15:48 ` Jan Beulich
  2011-07-01 16:07   ` Keir Fraser
@ 2011-07-01 17:48   ` Liu, Jinsong
  2011-07-01 18:02     ` Keir Fraser
  1 sibling, 1 reply; 19+ messages in thread
From: Liu, Jinsong @ 2011-07-01 17:48 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Tian, Kevin, xen-devel, Shan, Haitao, Keir Fraser, Li, Xin

Jan Beulich wrote:
>>>> On 01.07.11 at 16:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
>> X86: cpuid faulting feature enable
>> 
>> Latest Intel processor add cpuid faulting feature. This patch is
>> used to support cpuid faulting in Xen.
>> Like cpuid spoofing, cpuid faulting mainly used to support live
>> migration. When cpl>0, cpuid instruction will produce GP, vmm then
>> emulate execution of the cpuid instruction. Hence will appear to
>> guest software the value chosen by the vmm. 
>> 
>> Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
>> 
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
>> --- a/xen/arch/x86/cpu/common.c	Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/common.c	Fri Jul 01 19:04:41 2011 +0800
>> @@ -603,6 +603,18 @@ void __init early_cpu_init(void)  #endif
>>  	early_cpu_detect();
>>  }
>> +
>> +static int __init cpuid_faulting_init(void)
>> +{
>> +	if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
>> +		cpu_has_cpuid_faulting ) { +		cpuid_faulting_flip =
>> intel_cpuid_faulting_flip; +	} +
>> +	return 0;
>> +}
>> +__initcall(cpuid_faulting_init);
>> +
>>  /*
>>   * cpu_init() initializes state that is per-CPU. Some data is
>> already 
>>   * initialized (naturally) in the bootstrap process, such as the GDT
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
>> --- a/xen/arch/x86/cpu/cpu.h	Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/cpu.h	Fri Jul 01 19:04:41 2011 +0800
>> @@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
>>  extern void generic_identify(struct cpuinfo_x86 * c);
>> 
>>  extern void early_intel_workaround(struct cpuinfo_x86 *c); -
>> +extern void intel_cpuid_faulting_flip(unsigned int enable);
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
>> --- a/xen/arch/x86/cpu/intel.c	Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/intel.c	Fri Jul 01 19:04:41 2011 +0800 @@
>>   -24,6 +24,39 @@ */
>>  struct movsl_mask movsl_mask __read_mostly;
>>  #endif
>> +
>> +static unsigned int intel_cpuid_faulting_enumerate(void) +{
>> +	uint32_t hi, lo;
>> +	struct cpuinfo_x86 *c = &cpu_data[smp_processor_id()]; +
>> +	/*
>> +	* Currently only one type of intel processor support cpuid
>> faulting. +	* FIXME when needed in the future.
>> +	*/
>> +	if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask == 2)))
> 
> Down to a particular stepping? That surely doesn't make sense for
> anything but your own experimenting.

Yes, it's some ugly.
Currently cpuid faulting is not a architecturally commited feature, and, some other Intel processors (which do not has cpuid faulting feature) also has 0xceh MSR.
Hence I use current way for safe. However, I marked it as FIXME to update in the future accordingly.

> 
>> +		return 0;
>> +
>> +	rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
>> +	if (lo & (1 << 31))
>> +		return 1;
>> +
>> +	return 0;
>> +}
>> +
>> +void intel_cpuid_faulting_flip(unsigned int enable) +{
>> +	uint32_t hi, lo;
>> +
>> +	rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>> +	if (enable)
>> +		lo |= 1;
>> +	else
>> +		lo &= ~1;
>> +	wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>> +
>> +	per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable; +}
>> 
>>  /*
>>   * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
>> @@ -194,7 +227,10 @@ static void __devinit init_intel(struct 
>>  	detect_ht(c); }
>> 
>> -	set_cpuidmask(c);
>> +	if (intel_cpuid_faulting_enumerate())
>> +		set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability); +	else
>> +		set_cpuidmask(c);
>> 
>>  	/* Work around errata */
>>  	Intel_errata_workarounds(c);
>> diff -r 593d51c5f4ee xen/arch/x86/domain.c
>> --- a/xen/arch/x86/domain.c	Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/domain.c	Fri Jul 01 19:04:41 2011 +0800 @@ -63,6
>> +63,9 @@ 
>> 
>>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>>  DEFINE_PER_CPU(unsigned long, cr4);
>> +DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>> +void (*cpuid_faulting_flip)(unsigned int enable);
> 
> bool_t for both (and elsewhere in the patch)?

OK, will change it to bool_t.


> 
>> 
>>  static void default_idle(void);
>>  static void default_dead_idle(void);
>> @@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s     
>>              load_LDT(next); load_segments(next);
>>          }
>> +
>> +	if ( cpuid_faulting_flip )
>> +	{
>> +		unsigned int enable;
>> +
>> +		enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
> 
> Excluding Dom0 here is perhaps questionable (as it could allow hiding
> features not supported by Xen from applications).
> 
>> +		if ( enable ^ this_cpu(cpuid_faulting_enabled) )
>> +			cpuid_faulting_flip(enable);
>> +	}
>>      }
>> 
>>      context_saved(prev);
>> diff -r 593d51c5f4ee xen/arch/x86/traps.c
>> --- a/xen/arch/x86/traps.c	Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/traps.c	Fri Jul 01 19:04:41 2011 +0800
>> @@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct
>> 
>>   twobyte_opcode:
>>      /*
>> -     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP
>> (0x1,0xF9) 
>> -     * are executable only from guest kernel mode (virtual ring 0).
>> +     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP
>> (0x1,0xF9), +     * and CPUID (0xa2), are executable only from guest
>>       kernel mode +     * (virtual ring 0). */
>>      opcode = insn_fetch(u8, code_base, eip, code_limit);
>> -    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode
>> != 0x31) ) +    if ( !guest_kernel_mode(v, regs) &&
>> +        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )  
>> goto fail; 
>> 
>>      if ( lock && (opcode & ~3) != 0x20 )
>> @@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct
>>              regs->edx = (uint32_t)(msr_content >> 32);             
>>          break; }
>> +        break;
>> +
>> +    case 0xa2: /* CPUID */
>> +        pv_cpuid(regs);
>>          break;
>> 
>>      default:
>> diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
>> --- a/xen/include/asm-x86/cpufeature.h	Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/cpufeature.h	Fri Jul 01 19:04:41 2011
>>  +0800 @@ -79,6 +79,7 @@ #define X86_FEATURE_ARCH_PERFMON (3*32+11)
>>  /* Intel Architectural PerfMon */ #define X86_FEATURE_TSC_RELIABLE
>>  (3*32+12) /* TSC is known to be reliable */ #define
>> X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions
>> */ +#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting
>> */  
>> 
>>  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4
>>  */ #define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD
>>  Extensions-3 */ @@ -175,6 +176,7 @@ #define cpu_has_page1gb		0
>>  #define cpu_has_efer		(boot_cpu_data.x86_capability[1] &
>> 0x20100800)  #define cpu_has_fsgsbase	0 +#define
>> cpu_has_cpuid_faulting  0 
> 
> Why? I can't see anything in here that would require making this a
> 64-bit-only feature.

OK, will update _i386_.

Thanks,
Jinsong

> 
> Jan
> 
>>  #else /* __x86_64__ */
>>  #define cpu_has_vme		0
>>  #define cpu_has_de		1
>> @@ -201,6 +203,7 @@
>>  #define cpu_has_page1gb		boot_cpu_has(X86_FEATURE_PAGE1GB)  #define
>>  cpu_has_efer		1 #define
>> cpu_has_fsgsbase	boot_cpu_has(X86_FEATURE_FSGSBASE) +#define
>> cpu_has_cpuid_faulting  boot_cpu_has(X86_FEATURE_CPUID_FAULTING) 
>> #endif  
>> 
>>  #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
>> diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
>> --- a/xen/include/asm-x86/msr-index.h	Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/msr-index.h	Fri Jul 01 19:04:41 2011 +0800
>>  @@ -155,11 +155,6 @@ #define MSR_P6_PERFCTR1			0x000000c2
>>  #define MSR_P6_EVNTSEL0			0x00000186
>>  #define MSR_P6_EVNTSEL1			0x00000187
>> -
>> -/* MSRs for Intel cpuid feature mask */
>> -#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
>> -#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
>> -#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
>> 
>>  /* MSRs & bits used for VMX enabling */
>>  #define MSR_IA32_VMX_BASIC                      0x480 @@ -492,6
>>  +487,15 @@ #define MSR_CORE_PERF_GLOBAL_CTRL	0x0000038f
>>  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x00000390
>> 
>> +/* Intel cpuid spoofing MSRs */
>> +#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
>> +#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
>> +#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131 +
>> +/* Intel cpuid faulting MSRs */
>> +#define MSR_INTEL_PLATFORM_INFO		0x000000ce
>> +#define MSR_INTEL_MISC_FEATURES_ENABLES	0x00000140 +
>>  /* Geode defined MSRs */
>>  #define MSR_GEODE_BUSCONT_CONF0		0x00001900
>> 
>> diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
>> --- a/xen/include/asm-x86/processor.h	Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/processor.h	Fri Jul 01 19:04:41 2011 +0800
>> @@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
>>  #define cpu_data (&boot_cpu_data)
>>  #define current_cpu_data boot_cpu_data
>>  #endif
>> +
>> +extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>> +extern void (*cpuid_faulting_flip)(unsigned int enable);
>> 
>>  extern u64 host_pat;
>>  extern int phys_proc_id[NR_CPUS];
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 17:48   ` Liu, Jinsong
@ 2011-07-01 18:02     ` Keir Fraser
  2011-07-01 18:12       ` Liu, Jinsong
  2011-07-01 18:14       ` Liu, Jinsong
  0 siblings, 2 replies; 19+ messages in thread
From: Keir Fraser @ 2011-07-01 18:02 UTC (permalink / raw)
  To: Liu, Jinsong, Jan Beulich; +Cc: Tian, Kevin, xen-devel, Shan, Haitao, Li, Xin

On 01/07/2011 18:48, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:

>> Down to a particular stepping? That surely doesn't make sense for
>> anything but your own experimenting.
> 
> Yes, it's some ugly.
> Currently cpuid faulting is not a architecturally commited feature, and, some
> other Intel processors (which do not has cpuid faulting feature) also has
> 0xceh MSR.
> Hence I use current way for safe. However, I marked it as FIXME to update in
> the future accordingly.

But Intel's own supporting document states that bit 31 of the PLATFORM_INFO
MSR should be sufficient to identify the cpuid faulting feature. Do you
really need the stepping check as well? Could you just do a rdmsr_safe
read-and-check of PLATFORM_INFO_MSR[31] instead?

It would be okay for other Intel CPUs to have MSR 0xce, so long as they
don't set bit 31...

 -- Keir

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 18:02     ` Keir Fraser
@ 2011-07-01 18:12       ` Liu, Jinsong
  2011-07-01 18:14       ` Liu, Jinsong
  1 sibling, 0 replies; 19+ messages in thread
From: Liu, Jinsong @ 2011-07-01 18:12 UTC (permalink / raw)
  To: Keir Fraser, Jan Beulich; +Cc: Tian, Kevin, xen-devel, Shan, Haitao, Li, Xin

Keir Fraser wrote:
> On 01/07/2011 18:48, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
> 
>>> Down to a particular stepping? That surely doesn't make sense for
>>> anything but your own experimenting.
>> 
>> Yes, it's some ugly.
>> Currently cpuid faulting is not a architecturally commited feature,
>> and, some other Intel processors (which do not has cpuid faulting
>> feature) also has 0xceh MSR. Hence I use current way for safe.
>> However, I marked it as FIXME to update in the future accordingly.
> 
> But Intel's own supporting document states that bit 31 of the
> PLATFORM_INFO MSR should be sufficient to identify the cpuid faulting
> feature. Do you really need the stepping check as well? Could you
> just do a rdmsr_safe read-and-check of PLATFORM_INFO_MSR[31] instead?
> 
> It would be okay for other Intel CPUs to have MSR 0xce, so long as
> they don't set bit 31...
> 
>  -- Keir

That's good. It does formally state.
We can move family/model/stepping now.

Thanks,
Jinsong

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 18:02     ` Keir Fraser
  2011-07-01 18:12       ` Liu, Jinsong
@ 2011-07-01 18:14       ` Liu, Jinsong
  1 sibling, 0 replies; 19+ messages in thread
From: Liu, Jinsong @ 2011-07-01 18:14 UTC (permalink / raw)
  To: Liu, Jinsong, Keir Fraser, Jan Beulich
  Cc: Tian, Kevin, xen-devel, Shan, Haitao, Li, Xin

Liu, Jinsong wrote:
> Keir Fraser wrote:
>> On 01/07/2011 18:48, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
>> 
>>>> Down to a particular stepping? That surely doesn't make sense for
>>>> anything but your own experimenting.
>>> 
>>> Yes, it's some ugly.
>>> Currently cpuid faulting is not a architecturally commited feature,
>>> and, some other Intel processors (which do not has cpuid faulting
>>> feature) also has 0xceh MSR. Hence I use current way for safe.
>>> However, I marked it as FIXME to update in the future accordingly.
>> 
>> But Intel's own supporting document states that bit 31 of the
>> PLATFORM_INFO MSR should be sufficient to identify the cpuid faulting
>> feature. Do you really need the stepping check as well? Could you
>> just do a rdmsr_safe read-and-check of PLATFORM_INFO_MSR[31] instead?
>> 
>> It would be okay for other Intel CPUs to have MSR 0xce, so long as
>> they don't set bit 31... 
>> 
>>  -- Keir
> 
> That's good. It does formally state.
> We can move family/model/stepping now.

sorry, remove. my poor english :(

> 
> Thanks,
> Jinsong

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 14:32 [PATCH] X86: cpuid faulting feature enable Liu, Jinsong
  2011-07-01 15:48 ` Jan Beulich
@ 2011-07-01 21:31 ` Keir Fraser
  2011-07-02  3:15   ` Tian, Kevin
                     ` (2 more replies)
  1 sibling, 3 replies; 19+ messages in thread
From: Keir Fraser @ 2011-07-01 21:31 UTC (permalink / raw)
  To: Liu, Jinsong, xen-devel; +Cc: Tian, Kevin, Shan, Haitao, Li, Xin

On 01/07/2011 15:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:

> X86: cpuid faulting feature enable
> 
> Latest Intel processor add cpuid faulting feature. This patch is used to
> support cpuid faulting in Xen.
> Like cpuid spoofing, cpuid faulting mainly used to support live migration.
> When cpl>0, cpuid instruction will produce GP, vmm then emulate execution of
> the cpuid instruction. Hence will appear to guest software the value chosen by
> the vmm.

I fixed this up quite a bit and applied as c/s 23653. Please take a look and
give it a test. In particular note the changes I made in intel_init(), to
make sure that APs are at least as featureful as the BSP w.r.t. cpuid
faulting.

 -- Keir

> Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
> 
> diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
> --- a/xen/arch/x86/cpu/common.c Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/cpu/common.c Fri Jul 01 19:04:41 2011 +0800
> @@ -603,6 +603,18 @@ void __init early_cpu_init(void)
>  #endif
> early_cpu_detect();
>  }
> +
> +static int __init cpuid_faulting_init(void)
> +{
> + if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
> +  cpu_has_cpuid_faulting ) {
> +  cpuid_faulting_flip = intel_cpuid_faulting_flip;
> + }
> +
> + return 0;
> +}
> +__initcall(cpuid_faulting_init);
> +
>  /*
>   * cpu_init() initializes state that is per-CPU. Some data is already
>   * initialized (naturally) in the bootstrap process, such as the GDT
> diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
> --- a/xen/arch/x86/cpu/cpu.h Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/cpu/cpu.h Fri Jul 01 19:04:41 2011 +0800
> @@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
>  extern void generic_identify(struct cpuinfo_x86 * c);
>  
>  extern void early_intel_workaround(struct cpuinfo_x86 *c);
> -
> +extern void intel_cpuid_faulting_flip(unsigned int enable);
> diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
> --- a/xen/arch/x86/cpu/intel.c Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/cpu/intel.c Fri Jul 01 19:04:41 2011 +0800
> @@ -24,6 +24,39 @@
>   */
>  struct movsl_mask movsl_mask __read_mostly;
>  #endif
> +
> +static unsigned int intel_cpuid_faulting_enumerate(void)
> +{
> + uint32_t hi, lo;
> + struct cpuinfo_x86 *c = &cpu_data[smp_processor_id()];
> +
> + /*
> + * Currently only one type of intel processor support cpuid faulting.
> + * FIXME when needed in the future.
> + */
> + if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask == 2)))
> +  return 0;
> +
> + rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
> + if (lo & (1 << 31))
> +  return 1;
> +
> + return 0;
> +}
> +
> +void intel_cpuid_faulting_flip(unsigned int enable)
> +{
> + uint32_t hi, lo;
> +
> + rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
> + if (enable)
> +  lo |= 1;
> + else
> +  lo &= ~1;
> + wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
> +
> + per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable;
> +}
>  
>  /*
>   * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
> @@ -194,7 +227,10 @@ static void __devinit init_intel(struct
> detect_ht(c);
> }
>  
> - set_cpuidmask(c);
> + if (intel_cpuid_faulting_enumerate())
> +  set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
> + else
> +  set_cpuidmask(c);
>  
> /* Work around errata */
> Intel_errata_workarounds(c);
> diff -r 593d51c5f4ee xen/arch/x86/domain.c
> --- a/xen/arch/x86/domain.c Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/domain.c Fri Jul 01 19:04:41 2011 +0800
> @@ -63,6 +63,9 @@
>  
>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>  DEFINE_PER_CPU(unsigned long, cr4);
> +DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled);
> +
> +void (*cpuid_faulting_flip)(unsigned int enable);
>  
>  static void default_idle(void);
>  static void default_dead_idle(void);
> @@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s
>              load_LDT(next);
>              load_segments(next);
>          }
> +
> + if ( cpuid_faulting_flip )
> + {
> +  unsigned int enable;
> +
> +  enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
> +  if ( enable ^ this_cpu(cpuid_faulting_enabled) )
> +   cpuid_faulting_flip(enable);
> + }
>      }
>  
>      context_saved(prev);
> diff -r 593d51c5f4ee xen/arch/x86/traps.c
> --- a/xen/arch/x86/traps.c Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/arch/x86/traps.c Fri Jul 01 19:04:41 2011 +0800
> @@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct
>  
>   twobyte_opcode:
>      /*
> -     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP (0x1,0xF9)
> -     * are executable only from guest kernel mode (virtual ring 0).
> +     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP (0x1,0xF9),
> +     * and CPUID (0xa2), are executable only from guest kernel mode
> +     * (virtual ring 0).
>       */
>      opcode = insn_fetch(u8, code_base, eip, code_limit);
> -    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode != 0x31) )
> +    if ( !guest_kernel_mode(v, regs) &&
> +        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )
>          goto fail;
>  
>      if ( lock && (opcode & ~3) != 0x20 )
> @@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct
>              regs->edx = (uint32_t)(msr_content >> 32);
>              break;
>          }
> +        break;
> +
> +    case 0xa2: /* CPUID */
> +        pv_cpuid(regs);
>          break;
>  
>      default:
> diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
> --- a/xen/include/asm-x86/cpufeature.h Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/include/asm-x86/cpufeature.h Fri Jul 01 19:04:41 2011 +0800
> @@ -79,6 +79,7 @@
>  #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
>  #define X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable */
>  #define X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions */
> +#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
>  
>  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
>  #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
> @@ -175,6 +176,7 @@
>  #define cpu_has_page1gb  0
>  #define cpu_has_efer  (boot_cpu_data.x86_capability[1] & 0x20100800)
>  #define cpu_has_fsgsbase 0
> +#define cpu_has_cpuid_faulting  0
>  #else /* __x86_64__ */
>  #define cpu_has_vme  0
>  #define cpu_has_de  1
> @@ -201,6 +203,7 @@
>  #define cpu_has_page1gb  boot_cpu_has(X86_FEATURE_PAGE1GB)
>  #define cpu_has_efer  1
>  #define cpu_has_fsgsbase boot_cpu_has(X86_FEATURE_FSGSBASE)
> +#define cpu_has_cpuid_faulting  boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
>  #endif
>  
>  #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
> diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
> --- a/xen/include/asm-x86/msr-index.h Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/include/asm-x86/msr-index.h Fri Jul 01 19:04:41 2011 +0800
> @@ -155,11 +155,6 @@
>  #define MSR_P6_PERFCTR1   0x000000c2
>  #define MSR_P6_EVNTSEL0   0x00000186
>  #define MSR_P6_EVNTSEL1   0x00000187
> -
> -/* MSRs for Intel cpuid feature mask */
> -#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
> -#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
> -#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
>  
>  /* MSRs & bits used for VMX enabling */
>  #define MSR_IA32_VMX_BASIC                      0x480
> @@ -492,6 +487,15 @@
>  #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
>  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
>  
> +/* Intel cpuid spoofing MSRs */
> +#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
> +#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
> +#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
> +
> +/* Intel cpuid faulting MSRs */
> +#define MSR_INTEL_PLATFORM_INFO  0x000000ce
> +#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140
> +
>  /* Geode defined MSRs */
>  #define MSR_GEODE_BUSCONT_CONF0  0x00001900
>  
> diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
> --- a/xen/include/asm-x86/processor.h Sun Jun 12 22:27:01 2011 +0800
> +++ b/xen/include/asm-x86/processor.h Fri Jul 01 19:04:41 2011 +0800
> @@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
>  #define cpu_data (&boot_cpu_data)
>  #define current_cpu_data boot_cpu_data
>  #endif
> +
> +extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled);
> +
> +extern void (*cpuid_faulting_flip)(unsigned int enable);
>  
>  extern u64 host_pat;
>  extern int phys_proc_id[NR_CPUS];

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 16:07   ` Keir Fraser
@ 2011-07-02  3:04     ` Tian, Kevin
  0 siblings, 0 replies; 19+ messages in thread
From: Tian, Kevin @ 2011-07-02  3:04 UTC (permalink / raw)
  To: Keir Fraser, Jan Beulich, Liu, Jinsong; +Cc: xen-devel, Shan, Haitao, Li, Xin

> From: Keir Fraser [mailto:keir.xen@gmail.com]
> Sent: Saturday, July 02, 2011 12:08 AM
> 
> On 01/07/2011 16:48, "Jan Beulich" <JBeulich@novell.com> wrote:
> 
> >> + if ( cpuid_faulting_flip )
> >> + {
> >> +  unsigned int enable;
> >> +
> >> +  enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
> >
> > Excluding Dom0 here is perhaps questionable (as it could allow hiding
> > features not supported by Xen from applications).
> 
> It's probably because of a need for raw cpuid in libxc/xc_cpuid_x86.c where
> we need to detect things like long mode. We could do that another way, or
> provide a new paravirtualised raw cpuid just for dom0.
> 
> Not a barrier to this patch going in imo.
> 
> I suppose HVM VCPUs are excluded because otherwise the new feature would
> be
> unexpectedly enabled in non-root mode too, and make HVM guests crash? :-)
> 

yes, unless we convert GP to cause VM-exit as an alternative. :-)

Thanks
Kevin

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 21:31 ` Keir Fraser
@ 2011-07-02  3:15   ` Tian, Kevin
  2011-07-02  7:49     ` Keir Fraser
  2011-07-03  9:40   ` Liu, Jinsong
  2011-07-04  3:19   ` Liu, Jinsong
  2 siblings, 1 reply; 19+ messages in thread
From: Tian, Kevin @ 2011-07-02  3:15 UTC (permalink / raw)
  To: Keir Fraser, Liu, Jinsong, xen-devel; +Cc: Shan, Haitao, Li, Xin

> From: Keir Fraser [mailto:keir.xen@gmail.com] On Behalf Of Keir Fraser
> Sent: Saturday, July 02, 2011 5:31 AM
> 
> On 01/07/2011 15:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
> 
> > X86: cpuid faulting feature enable
> >
> > Latest Intel processor add cpuid faulting feature. This patch is used to
> > support cpuid faulting in Xen.
> > Like cpuid spoofing, cpuid faulting mainly used to support live migration.
> > When cpl>0, cpuid instruction will produce GP, vmm then emulate execution
> of
> > the cpuid instruction. Hence will appear to guest software the value chosen
> by
> > the vmm.
> 
> I fixed this up quite a bit and applied as c/s 23653. Please take a look and
> give it a test. In particular note the changes I made in intel_init(), to
> make sure that APs are at least as featureful as the BSP w.r.t. cpuid
> faulting.
> 

	2.42	+	} else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
    2.43 +		BUG_ON(!probe_intel_cpuid_faulting());
    2.44 +		set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
    2.45 +	}
    2.46 +
    2.47 +	if (!cpu_has_cpuid_faulting)
    2.48 +		set_cpuidmask(c);
    2.49 +
    2.50 +	BUG_ON(cpu_has(c, X86_FEATURE_CPUID_FAULTING) !=
    2.51 +	       boot_cpu_has(X86_FEATURE_CPUID_FAULTING));

the latter BUG_ON is useless since if AP is inconsistent with BSP we already
get bug triggered in earlier BUG_ON.

Thanks
Kevin

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] X86: cpuid faulting feature enable
  2011-07-02  3:15   ` Tian, Kevin
@ 2011-07-02  7:49     ` Keir Fraser
  2011-07-04  1:06       ` Shan, Haitao
  0 siblings, 1 reply; 19+ messages in thread
From: Keir Fraser @ 2011-07-02  7:49 UTC (permalink / raw)
  To: Tian, Kevin, Liu, Jinsong, xen-devel; +Cc: Shan, Haitao, Li, Xin

On 02/07/2011 04:15, "Tian, Kevin" <kevin.tian@intel.com> wrote:

>> From: Keir Fraser [mailto:keir.xen@gmail.com] On Behalf Of Keir Fraser
>> Sent: Saturday, July 02, 2011 5:31 AM
>> 
>> On 01/07/2011 15:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
>> 
>>> X86: cpuid faulting feature enable
>>> 
>>> Latest Intel processor add cpuid faulting feature. This patch is used to
>>> support cpuid faulting in Xen.
>>> Like cpuid spoofing, cpuid faulting mainly used to support live migration.
>>> When cpl>0, cpuid instruction will produce GP, vmm then emulate execution
>> of
>>> the cpuid instruction. Hence will appear to guest software the value chosen
>> by
>>> the vmm.
>> 
>> I fixed this up quite a bit and applied as c/s 23653. Please take a look and
>> give it a test. In particular note the changes I made in intel_init(), to
>> make sure that APs are at least as featureful as the BSP w.r.t. cpuid
>> faulting.
>> 
> 
> 2.42 + } else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
>     2.43 +  BUG_ON(!probe_intel_cpuid_faulting());
>     2.44 +  set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
>     2.45 + }
>     2.46 +
>     2.47 + if (!cpu_has_cpuid_faulting)
>     2.48 +  set_cpuidmask(c);
>     2.49 +
>     2.50 + BUG_ON(cpu_has(c, X86_FEATURE_CPUID_FAULTING) !=
>     2.51 +        boot_cpu_has(X86_FEATURE_CPUID_FAULTING));
> 
> the latter BUG_ON is useless since if AP is inconsistent with BSP we already
> get bug triggered in earlier BUG_ON.

Oops, I forgot to remove the redundant second BUG_ON. I'll do that now.
Thanks!

 -- Keir

> Thanks
> Kevin

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 21:31 ` Keir Fraser
  2011-07-02  3:15   ` Tian, Kevin
@ 2011-07-03  9:40   ` Liu, Jinsong
  2011-07-04  3:19   ` Liu, Jinsong
  2 siblings, 0 replies; 19+ messages in thread
From: Liu, Jinsong @ 2011-07-03  9:40 UTC (permalink / raw)
  To: Keir Fraser, xen-devel; +Cc: Tian, Kevin, Shan, Haitao, Li, Xin

Keir Fraser wrote:
> On 01/07/2011 15:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
> 
>> X86: cpuid faulting feature enable
>> 
>> Latest Intel processor add cpuid faulting feature. This patch is
>> used to support cpuid faulting in Xen.
>> Like cpuid spoofing, cpuid faulting mainly used to support live
>> migration. When cpl>0, cpuid instruction will produce GP, vmm then
>> emulate execution of the cpuid instruction. Hence will appear to
>> guest software the value chosen by the vmm.
> 
> I fixed this up quite a bit and applied as c/s 23653. Please take a
> look and give it a test. In particular note the changes I made in
> intel_init(), to make sure that APs are at least as featureful as the
> BSP w.r.t. cpuid faulting.
> 
>  -- Keir

OK, I will test it on Monday.
I think it's good to check APs as BSP at init_intel.

Thanks,
Jinsong


> 
>> Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
>> 
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
>> --- a/xen/arch/x86/cpu/common.c Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/common.c Fri Jul 01 19:04:41 2011 +0800
>> @@ -603,6 +603,18 @@ void __init early_cpu_init(void)  #endif
>> early_cpu_detect();
>>  }
>> +
>> +static int __init cpuid_faulting_init(void)
>> +{
>> + if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + 
>> cpu_has_cpuid_faulting ) { +  cpuid_faulting_flip =
>> intel_cpuid_faulting_flip; + } +
>> + return 0;
>> +}
>> +__initcall(cpuid_faulting_init);
>> +
>>  /*
>>   * cpu_init() initializes state that is per-CPU. Some data is
>> already 
>>   * initialized (naturally) in the bootstrap process, such as the GDT
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
>> --- a/xen/arch/x86/cpu/cpu.h Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/cpu.h Fri Jul 01 19:04:41 2011 +0800
>> @@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
>>  extern void generic_identify(struct cpuinfo_x86 * c);
>> 
>>  extern void early_intel_workaround(struct cpuinfo_x86 *c); -
>> +extern void intel_cpuid_faulting_flip(unsigned int enable);
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
>> --- a/xen/arch/x86/cpu/intel.c Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/intel.c Fri Jul 01 19:04:41 2011 +0800 @@
>>   -24,6 +24,39 @@ */
>>  struct movsl_mask movsl_mask __read_mostly;
>>  #endif
>> +
>> +static unsigned int intel_cpuid_faulting_enumerate(void) +{
>> + uint32_t hi, lo;
>> + struct cpuinfo_x86 *c = &cpu_data[smp_processor_id()]; +
>> + /*
>> + * Currently only one type of intel processor support cpuid
>> faulting. + * FIXME when needed in the future.
>> + */
>> + if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask ==
>> 2))) +  return 0; +
>> + rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
>> + if (lo & (1 << 31))
>> +  return 1;
>> +
>> + return 0;
>> +}
>> +
>> +void intel_cpuid_faulting_flip(unsigned int enable) +{
>> + uint32_t hi, lo;
>> +
>> + rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>> + if (enable)
>> +  lo |= 1;
>> + else
>> +  lo &= ~1;
>> + wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>> +
>> + per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable; +}
>> 
>>  /*
>>   * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
>> @@ -194,7 +227,10 @@ static void __devinit init_intel(struct
>> detect_ht(c);
>> }
>> 
>> - set_cpuidmask(c);
>> + if (intel_cpuid_faulting_enumerate())
>> +  set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability); + else
>> +  set_cpuidmask(c);
>> 
>> /* Work around errata */
>> Intel_errata_workarounds(c);
>> diff -r 593d51c5f4ee xen/arch/x86/domain.c
>> --- a/xen/arch/x86/domain.c Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/domain.c Fri Jul 01 19:04:41 2011 +0800 @@ -63,6
>> +63,9 @@ 
>> 
>>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>>  DEFINE_PER_CPU(unsigned long, cr4);
>> +DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>> +void (*cpuid_faulting_flip)(unsigned int enable);
>> 
>>  static void default_idle(void);
>>  static void default_dead_idle(void);
>> @@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s     
>>              load_LDT(next); load_segments(next);
>>          }
>> +
>> + if ( cpuid_faulting_flip )
>> + {
>> +  unsigned int enable;
>> +
>> +  enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
>> +  if ( enable ^ this_cpu(cpuid_faulting_enabled) )
>> +   cpuid_faulting_flip(enable);
>> + }
>>      }
>> 
>>      context_saved(prev);
>> diff -r 593d51c5f4ee xen/arch/x86/traps.c
>> --- a/xen/arch/x86/traps.c Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/traps.c Fri Jul 01 19:04:41 2011 +0800
>> @@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct
>> 
>>   twobyte_opcode:
>>      /*
>> -     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP
>> (0x1,0xF9) 
>> -     * are executable only from guest kernel mode (virtual ring 0).
>> +     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP
>> (0x1,0xF9), +     * and CPUID (0xa2), are executable only from guest
>>       kernel mode +     * (virtual ring 0). */
>>      opcode = insn_fetch(u8, code_base, eip, code_limit);
>> -    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode
>> != 0x31) ) +    if ( !guest_kernel_mode(v, regs) &&
>> +        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )  
>> goto fail; 
>> 
>>      if ( lock && (opcode & ~3) != 0x20 )
>> @@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct
>>              regs->edx = (uint32_t)(msr_content >> 32);             
>>          break; }
>> +        break;
>> +
>> +    case 0xa2: /* CPUID */
>> +        pv_cpuid(regs);
>>          break;
>> 
>>      default:
>> diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
>> --- a/xen/include/asm-x86/cpufeature.h Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/cpufeature.h Fri Jul 01 19:04:41 2011
>>  +0800 @@ -79,6 +79,7 @@ #define X86_FEATURE_ARCH_PERFMON (3*32+11)
>>  /* Intel Architectural PerfMon */ #define X86_FEATURE_TSC_RELIABLE
>>  (3*32+12) /* TSC is known to be reliable */ #define
>> X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions
>> */ +#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting
>> */  
>> 
>>  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4
>>  */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD
>>  Extensions-3 */ @@ -175,6 +176,7 @@ #define cpu_has_page1gb  0
>>  #define cpu_has_efer  (boot_cpu_data.x86_capability[1] &
>> 0x20100800)  #define cpu_has_fsgsbase 0 +#define
>>  cpu_has_cpuid_faulting  0 #else /* __x86_64__ */
>>  #define cpu_has_vme  0
>>  #define cpu_has_de  1
>> @@ -201,6 +203,7 @@
>>  #define cpu_has_page1gb  boot_cpu_has(X86_FEATURE_PAGE1GB)  #define
>>  cpu_has_efer  1 #define cpu_has_fsgsbase
>> boot_cpu_has(X86_FEATURE_FSGSBASE) +#define cpu_has_cpuid_faulting 
>> boot_cpu_has(X86_FEATURE_CPUID_FAULTING)  #endif 
>> 
>>  #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
>> diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
>> --- a/xen/include/asm-x86/msr-index.h Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/msr-index.h Fri Jul 01 19:04:41 2011 +0800
>>  @@ -155,11 +155,6 @@ #define MSR_P6_PERFCTR1   0x000000c2
>>  #define MSR_P6_EVNTSEL0   0x00000186
>>  #define MSR_P6_EVNTSEL1   0x00000187
>> -
>> -/* MSRs for Intel cpuid feature mask */
>> -#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
>> -#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
>> -#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
>> 
>>  /* MSRs & bits used for VMX enabling */
>>  #define MSR_IA32_VMX_BASIC                      0x480 @@ -492,6
>>  +487,15 @@ #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
>>  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
>> 
>> +/* Intel cpuid spoofing MSRs */
>> +#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
>> +#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
>> +#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131 +
>> +/* Intel cpuid faulting MSRs */
>> +#define MSR_INTEL_PLATFORM_INFO  0x000000ce
>> +#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140 +
>>  /* Geode defined MSRs */
>>  #define MSR_GEODE_BUSCONT_CONF0  0x00001900
>> 
>> diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
>> --- a/xen/include/asm-x86/processor.h Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/processor.h Fri Jul 01 19:04:41 2011 +0800
>> @@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
>>  #define cpu_data (&boot_cpu_data)
>>  #define current_cpu_data boot_cpu_data
>>  #endif
>> +
>> +extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>> +extern void (*cpuid_faulting_flip)(unsigned int enable);
>> 
>>  extern u64 host_pat;
>>  extern int phys_proc_id[NR_CPUS];

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] X86: cpuid faulting feature enable
  2011-07-02  7:49     ` Keir Fraser
@ 2011-07-04  1:06       ` Shan, Haitao
  2011-07-04  6:58         ` Keir Fraser
  0 siblings, 1 reply; 19+ messages in thread
From: Shan, Haitao @ 2011-07-04  1:06 UTC (permalink / raw)
  To: Keir Fraser, Tian, Kevin, Liu, Jinsong, xen-devel; +Cc: Li, Xin

diff -r 177ddb2f4ebf -r 71b58748cfee xen/arch/x86/cpu/cpu.h
--- a/xen/arch/x86/cpu/cpu.h    Fri Jul 01 20:48:00 2011 +0100
+++ b/xen/arch/x86/cpu/cpu.h    Fri Jul 01 22:28:53 2011 +0100
@@ -30,4 +30,4 @@ extern void display_cacheinfo(struct cpu
 extern void generic_identify(struct cpuinfo_x86 * c);

 extern void early_intel_workaround(struct cpuinfo_x86 *c);
-
+extern void intel_cpuid_faulting_flip(unsigned int enable);

Should we remove this part at all? Seems it is not used now.

Shan Haitao

-----Original Message-----
From: Keir Fraser [mailto:keir.xen@gmail.com] 
Sent: Saturday, July 02, 2011 3:50 PM
To: Tian, Kevin; Liu, Jinsong; xen-devel@lists.xensource.com
Cc: Shan, Haitao; Li, Xin
Subject: Re: [PATCH] X86: cpuid faulting feature enable

On 02/07/2011 04:15, "Tian, Kevin" <kevin.tian@intel.com> wrote:

>> From: Keir Fraser [mailto:keir.xen@gmail.com] On Behalf Of Keir 
>> Fraser
>> Sent: Saturday, July 02, 2011 5:31 AM
>> 
>> On 01/07/2011 15:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
>> 
>>> X86: cpuid faulting feature enable
>>> 
>>> Latest Intel processor add cpuid faulting feature. This patch is 
>>> used to support cpuid faulting in Xen.
>>> Like cpuid spoofing, cpuid faulting mainly used to support live migration.
>>> When cpl>0, cpuid instruction will produce GP, vmm then emulate 
>>> execution
>> of
>>> the cpuid instruction. Hence will appear to guest software the value 
>>> chosen
>> by
>>> the vmm.
>> 
>> I fixed this up quite a bit and applied as c/s 23653. Please take a 
>> look and give it a test. In particular note the changes I made in 
>> intel_init(), to make sure that APs are at least as featureful as the 
>> BSP w.r.t. cpuid faulting.
>> 
> 
> 2.42 + } else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
>     2.43 +  BUG_ON(!probe_intel_cpuid_faulting());
>     2.44 +  set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
>     2.45 + }
>     2.46 +
>     2.47 + if (!cpu_has_cpuid_faulting)
>     2.48 +  set_cpuidmask(c);
>     2.49 +
>     2.50 + BUG_ON(cpu_has(c, X86_FEATURE_CPUID_FAULTING) !=
>     2.51 +        boot_cpu_has(X86_FEATURE_CPUID_FAULTING));
> 
> the latter BUG_ON is useless since if AP is inconsistent with BSP we 
> already get bug triggered in earlier BUG_ON.

Oops, I forgot to remove the redundant second BUG_ON. I'll do that now.
Thanks!

 -- Keir

> Thanks
> Kevin

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 21:31 ` Keir Fraser
  2011-07-02  3:15   ` Tian, Kevin
  2011-07-03  9:40   ` Liu, Jinsong
@ 2011-07-04  3:19   ` Liu, Jinsong
  2011-07-04  6:59     ` Keir Fraser
  2 siblings, 1 reply; 19+ messages in thread
From: Liu, Jinsong @ 2011-07-04  3:19 UTC (permalink / raw)
  To: Keir Fraser, xen-devel; +Cc: Tian, Kevin, Shan, Haitao, Li, Xin

Keir Fraser wrote:
> On 01/07/2011 15:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
> 
>> X86: cpuid faulting feature enable
>> 
>> Latest Intel processor add cpuid faulting feature. This patch is
>> used to support cpuid faulting in Xen.
>> Like cpuid spoofing, cpuid faulting mainly used to support live
>> migration. When cpl>0, cpuid instruction will produce GP, vmm then
>> emulate execution of the cpuid instruction. Hence will appear to
>> guest software the value chosen by the vmm.
> 
> I fixed this up quite a bit and applied as c/s 23653. Please take a
> look and give it a test. In particular note the changes I made in
> intel_init(), to make sure that APs are at least as featureful as the
> BSP w.r.t. cpuid faulting.
> 
>  -- Keir

I have some concern for 'set_cpuid_faulting'. It was used at common code 'context_switch' while it defined at arch_specific cpu/intel.c. It would be trouble for the extension of other arch like AMD.
Another minor point is, at probe_intel_cpuid_faulting, it would be better to use 64b value: uint64_t x;

Thanks,
Jinsong

> 
>> Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
>> 
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
>> --- a/xen/arch/x86/cpu/common.c Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/common.c Fri Jul 01 19:04:41 2011 +0800
>> @@ -603,6 +603,18 @@ void __init early_cpu_init(void)  #endif
>> early_cpu_detect();
>>  }
>> +
>> +static int __init cpuid_faulting_init(void)
>> +{
>> + if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + 
>> cpu_has_cpuid_faulting ) { +  cpuid_faulting_flip =
>> intel_cpuid_faulting_flip; + } +
>> + return 0;
>> +}
>> +__initcall(cpuid_faulting_init);
>> +
>>  /*
>>   * cpu_init() initializes state that is per-CPU. Some data is
>> already 
>>   * initialized (naturally) in the bootstrap process, such as the GDT
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
>> --- a/xen/arch/x86/cpu/cpu.h Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/cpu.h Fri Jul 01 19:04:41 2011 +0800
>> @@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
>>  extern void generic_identify(struct cpuinfo_x86 * c);
>> 
>>  extern void early_intel_workaround(struct cpuinfo_x86 *c); -
>> +extern void intel_cpuid_faulting_flip(unsigned int enable);
>> diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
>> --- a/xen/arch/x86/cpu/intel.c Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/cpu/intel.c Fri Jul 01 19:04:41 2011 +0800 @@
>>   -24,6 +24,39 @@ */
>>  struct movsl_mask movsl_mask __read_mostly;
>>  #endif
>> +
>> +static unsigned int intel_cpuid_faulting_enumerate(void) +{
>> + uint32_t hi, lo;
>> + struct cpuinfo_x86 *c = &cpu_data[smp_processor_id()]; +
>> + /*
>> + * Currently only one type of intel processor support cpuid
>> faulting. + * FIXME when needed in the future.
>> + */
>> + if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask ==
>> 2))) +  return 0; +
>> + rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
>> + if (lo & (1 << 31))
>> +  return 1;
>> +
>> + return 0;
>> +}
>> +
>> +void intel_cpuid_faulting_flip(unsigned int enable) +{
>> + uint32_t hi, lo;
>> +
>> + rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>> + if (enable)
>> +  lo |= 1;
>> + else
>> +  lo &= ~1;
>> + wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>> +
>> + per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable; +}
>> 
>>  /*
>>   * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
>> @@ -194,7 +227,10 @@ static void __devinit init_intel(struct
>> detect_ht(c);
>> }
>> 
>> - set_cpuidmask(c);
>> + if (intel_cpuid_faulting_enumerate())
>> +  set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability); + else
>> +  set_cpuidmask(c);
>> 
>> /* Work around errata */
>> Intel_errata_workarounds(c);
>> diff -r 593d51c5f4ee xen/arch/x86/domain.c
>> --- a/xen/arch/x86/domain.c Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/domain.c Fri Jul 01 19:04:41 2011 +0800 @@ -63,6
>> +63,9 @@ 
>> 
>>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>>  DEFINE_PER_CPU(unsigned long, cr4);
>> +DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>> +void (*cpuid_faulting_flip)(unsigned int enable);
>> 
>>  static void default_idle(void);
>>  static void default_dead_idle(void);
>> @@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s     
>>              load_LDT(next); load_segments(next);
>>          }
>> +
>> + if ( cpuid_faulting_flip )
>> + {
>> +  unsigned int enable;
>> +
>> +  enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
>> +  if ( enable ^ this_cpu(cpuid_faulting_enabled) )
>> +   cpuid_faulting_flip(enable);
>> + }
>>      }
>> 
>>      context_saved(prev);
>> diff -r 593d51c5f4ee xen/arch/x86/traps.c
>> --- a/xen/arch/x86/traps.c Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/arch/x86/traps.c Fri Jul 01 19:04:41 2011 +0800
>> @@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct
>> 
>>   twobyte_opcode:
>>      /*
>> -     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP
>> (0x1,0xF9) 
>> -     * are executable only from guest kernel mode (virtual ring 0).
>> +     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP
>> (0x1,0xF9), +     * and CPUID (0xa2), are executable only from guest
>>       kernel mode +     * (virtual ring 0). */
>>      opcode = insn_fetch(u8, code_base, eip, code_limit);
>> -    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode
>> != 0x31) ) +    if ( !guest_kernel_mode(v, regs) &&
>> +        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )  
>> goto fail; 
>> 
>>      if ( lock && (opcode & ~3) != 0x20 )
>> @@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct
>>              regs->edx = (uint32_t)(msr_content >> 32);             
>>          break; }
>> +        break;
>> +
>> +    case 0xa2: /* CPUID */
>> +        pv_cpuid(regs);
>>          break;
>> 
>>      default:
>> diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
>> --- a/xen/include/asm-x86/cpufeature.h Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/cpufeature.h Fri Jul 01 19:04:41 2011
>>  +0800 @@ -79,6 +79,7 @@ #define X86_FEATURE_ARCH_PERFMON (3*32+11)
>>  /* Intel Architectural PerfMon */ #define X86_FEATURE_TSC_RELIABLE
>>  (3*32+12) /* TSC is known to be reliable */ #define
>> X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions
>> */ +#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting
>> */  
>> 
>>  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4
>>  */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD
>>  Extensions-3 */ @@ -175,6 +176,7 @@ #define cpu_has_page1gb  0
>>  #define cpu_has_efer  (boot_cpu_data.x86_capability[1] &
>> 0x20100800)  #define cpu_has_fsgsbase 0 +#define
>>  cpu_has_cpuid_faulting  0 #else /* __x86_64__ */
>>  #define cpu_has_vme  0
>>  #define cpu_has_de  1
>> @@ -201,6 +203,7 @@
>>  #define cpu_has_page1gb  boot_cpu_has(X86_FEATURE_PAGE1GB)  #define
>>  cpu_has_efer  1 #define cpu_has_fsgsbase
>> boot_cpu_has(X86_FEATURE_FSGSBASE) +#define cpu_has_cpuid_faulting 
>> boot_cpu_has(X86_FEATURE_CPUID_FAULTING)  #endif 
>> 
>>  #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
>> diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
>> --- a/xen/include/asm-x86/msr-index.h Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/msr-index.h Fri Jul 01 19:04:41 2011 +0800
>>  @@ -155,11 +155,6 @@ #define MSR_P6_PERFCTR1   0x000000c2
>>  #define MSR_P6_EVNTSEL0   0x00000186
>>  #define MSR_P6_EVNTSEL1   0x00000187
>> -
>> -/* MSRs for Intel cpuid feature mask */
>> -#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
>> -#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
>> -#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
>> 
>>  /* MSRs & bits used for VMX enabling */
>>  #define MSR_IA32_VMX_BASIC                      0x480 @@ -492,6
>>  +487,15 @@ #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
>>  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
>> 
>> +/* Intel cpuid spoofing MSRs */
>> +#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
>> +#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
>> +#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131 +
>> +/* Intel cpuid faulting MSRs */
>> +#define MSR_INTEL_PLATFORM_INFO  0x000000ce
>> +#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140 +
>>  /* Geode defined MSRs */
>>  #define MSR_GEODE_BUSCONT_CONF0  0x00001900
>> 
>> diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
>> --- a/xen/include/asm-x86/processor.h Sun Jun 12 22:27:01 2011 +0800
>> +++ b/xen/include/asm-x86/processor.h Fri Jul 01 19:04:41 2011 +0800
>> @@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
>>  #define cpu_data (&boot_cpu_data)
>>  #define current_cpu_data boot_cpu_data
>>  #endif
>> +
>> +extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>> +extern void (*cpuid_faulting_flip)(unsigned int enable);
>> 
>>  extern u64 host_pat;
>>  extern int phys_proc_id[NR_CPUS];

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] X86: cpuid faulting feature enable
  2011-07-04  1:06       ` Shan, Haitao
@ 2011-07-04  6:58         ` Keir Fraser
  0 siblings, 0 replies; 19+ messages in thread
From: Keir Fraser @ 2011-07-04  6:58 UTC (permalink / raw)
  To: Shan, Haitao, Tian, Kevin, Liu, Jinsong, xen-devel; +Cc: Li, Xin

On 04/07/2011 02:06, "Shan, Haitao" <haitao.shan@intel.com> wrote:

> diff -r 177ddb2f4ebf -r 71b58748cfee xen/arch/x86/cpu/cpu.h
> --- a/xen/arch/x86/cpu/cpu.h    Fri Jul 01 20:48:00 2011 +0100
> +++ b/xen/arch/x86/cpu/cpu.h    Fri Jul 01 22:28:53 2011 +0100
> @@ -30,4 +30,4 @@ extern void display_cacheinfo(struct cpu
>  extern void generic_identify(struct cpuinfo_x86 * c);
> 
>  extern void early_intel_workaround(struct cpuinfo_x86 *c);
> -
> +extern void intel_cpuid_faulting_flip(unsigned int enable);
> 
> Should we remove this part at all? Seems it is not used now.

Done. Thanks.

> Shan Haitao
> 
> -----Original Message-----
> From: Keir Fraser [mailto:keir.xen@gmail.com]
> Sent: Saturday, July 02, 2011 3:50 PM
> To: Tian, Kevin; Liu, Jinsong; xen-devel@lists.xensource.com
> Cc: Shan, Haitao; Li, Xin
> Subject: Re: [PATCH] X86: cpuid faulting feature enable
> 
> On 02/07/2011 04:15, "Tian, Kevin" <kevin.tian@intel.com> wrote:
> 
>>> From: Keir Fraser [mailto:keir.xen@gmail.com] On Behalf Of Keir
>>> Fraser
>>> Sent: Saturday, July 02, 2011 5:31 AM
>>> 
>>> On 01/07/2011 15:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
>>> 
>>>> X86: cpuid faulting feature enable
>>>> 
>>>> Latest Intel processor add cpuid faulting feature. This patch is
>>>> used to support cpuid faulting in Xen.
>>>> Like cpuid spoofing, cpuid faulting mainly used to support live migration.
>>>> When cpl>0, cpuid instruction will produce GP, vmm then emulate
>>>> execution
>>> of
>>>> the cpuid instruction. Hence will appear to guest software the value
>>>> chosen
>>> by
>>>> the vmm.
>>> 
>>> I fixed this up quite a bit and applied as c/s 23653. Please take a
>>> look and give it a test. In particular note the changes I made in
>>> intel_init(), to make sure that APs are at least as featureful as the
>>> BSP w.r.t. cpuid faulting.
>>> 
>> 
>> 2.42 + } else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
>>     2.43 +  BUG_ON(!probe_intel_cpuid_faulting());
>>     2.44 +  set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
>>     2.45 + }
>>     2.46 +
>>     2.47 + if (!cpu_has_cpuid_faulting)
>>     2.48 +  set_cpuidmask(c);
>>     2.49 +
>>     2.50 + BUG_ON(cpu_has(c, X86_FEATURE_CPUID_FAULTING) !=
>>     2.51 +        boot_cpu_has(X86_FEATURE_CPUID_FAULTING));
>> 
>> the latter BUG_ON is useless since if AP is inconsistent with BSP we
>> already get bug triggered in earlier BUG_ON.
> 
> Oops, I forgot to remove the redundant second BUG_ON. I'll do that now.
> Thanks!
> 
>  -- Keir
> 
>> Thanks
>> Kevin
> 
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] X86: cpuid faulting feature enable
  2011-07-04  3:19   ` Liu, Jinsong
@ 2011-07-04  6:59     ` Keir Fraser
  2011-07-04  8:10       ` Liu, Jinsong
  0 siblings, 1 reply; 19+ messages in thread
From: Keir Fraser @ 2011-07-04  6:59 UTC (permalink / raw)
  To: Liu, Jinsong, xen-devel; +Cc: Tian, Kevin, Shan, Haitao, Li, Xin

On 04/07/2011 04:19, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:

> Keir Fraser wrote:
>> On 01/07/2011 15:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
>> 
>>> X86: cpuid faulting feature enable
>>> 
>>> Latest Intel processor add cpuid faulting feature. This patch is
>>> used to support cpuid faulting in Xen.
>>> Like cpuid spoofing, cpuid faulting mainly used to support live
>>> migration. When cpl>0, cpuid instruction will produce GP, vmm then
>>> emulate execution of the cpuid instruction. Hence will appear to
>>> guest software the value chosen by the vmm.
>> 
>> I fixed this up quite a bit and applied as c/s 23653. Please take a
>> look and give it a test. In particular note the changes I made in
>> intel_init(), to make sure that APs are at least as featureful as the
>> BSP w.r.t. cpuid faulting.
>> 
>>  -- Keir
> 
> I have some concern for 'set_cpuid_faulting'. It was used at common code
> 'context_switch' while it defined at arch_specific cpu/intel.c. It would be
> trouble for the extension of other arch like AMD.

It can easily be turned into a function pointer again, or whatever, if need
be.

> Another minor point is, at probe_intel_cpuid_faulting, it would be better to
> use 64b value: uint64_t x;

Yes, not really necessary, but it's cleaner to call rdmsr_safe with a
uint64_t so I've made that change.

 Thanks,
 Keir

> Thanks,
> Jinsong
> 
>> 
>>> Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
>>> 
>>> diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
>>> --- a/xen/arch/x86/cpu/common.c Sun Jun 12 22:27:01 2011 +0800
>>> +++ b/xen/arch/x86/cpu/common.c Fri Jul 01 19:04:41 2011 +0800
>>> @@ -603,6 +603,18 @@ void __init early_cpu_init(void)  #endif
>>> early_cpu_detect();
>>>  }
>>> +
>>> +static int __init cpuid_faulting_init(void)
>>> +{
>>> + if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && +
>>> cpu_has_cpuid_faulting ) { +  cpuid_faulting_flip =
>>> intel_cpuid_faulting_flip; + } +
>>> + return 0;
>>> +}
>>> +__initcall(cpuid_faulting_init);
>>> +
>>>  /*
>>>   * cpu_init() initializes state that is per-CPU. Some data is
>>> already 
>>>   * initialized (naturally) in the bootstrap process, such as the GDT
>>> diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
>>> --- a/xen/arch/x86/cpu/cpu.h Sun Jun 12 22:27:01 2011 +0800
>>> +++ b/xen/arch/x86/cpu/cpu.h Fri Jul 01 19:04:41 2011 +0800
>>> @@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
>>>  extern void generic_identify(struct cpuinfo_x86 * c);
>>> 
>>>  extern void early_intel_workaround(struct cpuinfo_x86 *c); -
>>> +extern void intel_cpuid_faulting_flip(unsigned int enable);
>>> diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
>>> --- a/xen/arch/x86/cpu/intel.c Sun Jun 12 22:27:01 2011 +0800
>>> +++ b/xen/arch/x86/cpu/intel.c Fri Jul 01 19:04:41 2011 +0800 @@
>>>   -24,6 +24,39 @@ */
>>>  struct movsl_mask movsl_mask __read_mostly;
>>>  #endif
>>> +
>>> +static unsigned int intel_cpuid_faulting_enumerate(void) +{
>>> + uint32_t hi, lo;
>>> + struct cpuinfo_x86 *c = &cpu_data[smp_processor_id()]; +
>>> + /*
>>> + * Currently only one type of intel processor support cpuid
>>> faulting. + * FIXME when needed in the future.
>>> + */
>>> + if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask ==
>>> 2))) +  return 0; +
>>> + rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
>>> + if (lo & (1 << 31))
>>> +  return 1;
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +void intel_cpuid_faulting_flip(unsigned int enable) +{
>>> + uint32_t hi, lo;
>>> +
>>> + rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>>> + if (enable)
>>> +  lo |= 1;
>>> + else
>>> +  lo &= ~1;
>>> + wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>>> +
>>> + per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable; +}
>>> 
>>>  /*
>>>   * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
>>> @@ -194,7 +227,10 @@ static void __devinit init_intel(struct
>>> detect_ht(c);
>>> }
>>> 
>>> - set_cpuidmask(c);
>>> + if (intel_cpuid_faulting_enumerate())
>>> +  set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability); + else
>>> +  set_cpuidmask(c);
>>> 
>>> /* Work around errata */
>>> Intel_errata_workarounds(c);
>>> diff -r 593d51c5f4ee xen/arch/x86/domain.c
>>> --- a/xen/arch/x86/domain.c Sun Jun 12 22:27:01 2011 +0800
>>> +++ b/xen/arch/x86/domain.c Fri Jul 01 19:04:41 2011 +0800 @@ -63,6
>>> +63,9 @@ 
>>> 
>>>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>>>  DEFINE_PER_CPU(unsigned long, cr4);
>>> +DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>>> +void (*cpuid_faulting_flip)(unsigned int enable);
>>> 
>>>  static void default_idle(void);
>>>  static void default_dead_idle(void);
>>> @@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s
>>>              load_LDT(next); load_segments(next);
>>>          }
>>> +
>>> + if ( cpuid_faulting_flip )
>>> + {
>>> +  unsigned int enable;
>>> +
>>> +  enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
>>> +  if ( enable ^ this_cpu(cpuid_faulting_enabled) )
>>> +   cpuid_faulting_flip(enable);
>>> + }
>>>      }
>>> 
>>>      context_saved(prev);
>>> diff -r 593d51c5f4ee xen/arch/x86/traps.c
>>> --- a/xen/arch/x86/traps.c Sun Jun 12 22:27:01 2011 +0800
>>> +++ b/xen/arch/x86/traps.c Fri Jul 01 19:04:41 2011 +0800
>>> @@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct
>>> 
>>>   twobyte_opcode:
>>>      /*
>>> -     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP
>>> (0x1,0xF9) 
>>> -     * are executable only from guest kernel mode (virtual ring 0).
>>> +     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP
>>> (0x1,0xF9), +     * and CPUID (0xa2), are executable only from guest
>>>       kernel mode +     * (virtual ring 0). */
>>>      opcode = insn_fetch(u8, code_base, eip, code_limit);
>>> -    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode
>>> != 0x31) ) +    if ( !guest_kernel_mode(v, regs) &&
>>> +        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )
>>> goto fail; 
>>> 
>>>      if ( lock && (opcode & ~3) != 0x20 )
>>> @@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct
>>>              regs->edx = (uint32_t)(msr_content >> 32);
>>>          break; }
>>> +        break;
>>> +
>>> +    case 0xa2: /* CPUID */
>>> +        pv_cpuid(regs);
>>>          break;
>>> 
>>>      default:
>>> diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
>>> --- a/xen/include/asm-x86/cpufeature.h Sun Jun 12 22:27:01 2011 +0800
>>> +++ b/xen/include/asm-x86/cpufeature.h Fri Jul 01 19:04:41 2011
>>>  +0800 @@ -79,6 +79,7 @@ #define X86_FEATURE_ARCH_PERFMON (3*32+11)
>>>  /* Intel Architectural PerfMon */ #define X86_FEATURE_TSC_RELIABLE
>>>  (3*32+12) /* TSC is known to be reliable */ #define
>>> X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions
>>> */ +#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting
>>> */  
>>> 
>>>  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4
>>>  */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD
>>>  Extensions-3 */ @@ -175,6 +176,7 @@ #define cpu_has_page1gb  0
>>>  #define cpu_has_efer  (boot_cpu_data.x86_capability[1] &
>>> 0x20100800)  #define cpu_has_fsgsbase 0 +#define
>>>  cpu_has_cpuid_faulting  0 #else /* __x86_64__ */
>>>  #define cpu_has_vme  0
>>>  #define cpu_has_de  1
>>> @@ -201,6 +203,7 @@
>>>  #define cpu_has_page1gb  boot_cpu_has(X86_FEATURE_PAGE1GB)  #define
>>>  cpu_has_efer  1 #define cpu_has_fsgsbase
>>> boot_cpu_has(X86_FEATURE_FSGSBASE) +#define cpu_has_cpuid_faulting
>>> boot_cpu_has(X86_FEATURE_CPUID_FAULTING)  #endif
>>> 
>>>  #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
>>> diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
>>> --- a/xen/include/asm-x86/msr-index.h Sun Jun 12 22:27:01 2011 +0800
>>> +++ b/xen/include/asm-x86/msr-index.h Fri Jul 01 19:04:41 2011 +0800
>>>  @@ -155,11 +155,6 @@ #define MSR_P6_PERFCTR1   0x000000c2
>>>  #define MSR_P6_EVNTSEL0   0x00000186
>>>  #define MSR_P6_EVNTSEL1   0x00000187
>>> -
>>> -/* MSRs for Intel cpuid feature mask */
>>> -#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
>>> -#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
>>> -#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
>>> 
>>>  /* MSRs & bits used for VMX enabling */
>>>  #define MSR_IA32_VMX_BASIC                      0x480 @@ -492,6
>>>  +487,15 @@ #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
>>>  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
>>> 
>>> +/* Intel cpuid spoofing MSRs */
>>> +#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
>>> +#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
>>> +#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131 +
>>> +/* Intel cpuid faulting MSRs */
>>> +#define MSR_INTEL_PLATFORM_INFO  0x000000ce
>>> +#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140 +
>>>  /* Geode defined MSRs */
>>>  #define MSR_GEODE_BUSCONT_CONF0  0x00001900
>>> 
>>> diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
>>> --- a/xen/include/asm-x86/processor.h Sun Jun 12 22:27:01 2011 +0800
>>> +++ b/xen/include/asm-x86/processor.h Fri Jul 01 19:04:41 2011 +0800
>>> @@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
>>>  #define cpu_data (&boot_cpu_data)
>>>  #define current_cpu_data boot_cpu_data
>>>  #endif
>>> +
>>> +extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>>> +extern void (*cpuid_faulting_flip)(unsigned int enable);
>>> 
>>>  extern u64 host_pat;
>>>  extern int phys_proc_id[NR_CPUS];
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] X86: cpuid faulting feature enable
  2011-07-04  6:59     ` Keir Fraser
@ 2011-07-04  8:10       ` Liu, Jinsong
  0 siblings, 0 replies; 19+ messages in thread
From: Liu, Jinsong @ 2011-07-04  8:10 UTC (permalink / raw)
  To: Keir Fraser, xen-devel; +Cc: Tian, Kevin, Shan, Haitao, Li, Xin

Keir Fraser wrote:
> On 04/07/2011 04:19, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
> 
>> Keir Fraser wrote:
>>> On 01/07/2011 15:32, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:
>>> 
>>>> X86: cpuid faulting feature enable
>>>> 
>>>> Latest Intel processor add cpuid faulting feature. This patch is
>>>> used to support cpuid faulting in Xen.
>>>> Like cpuid spoofing, cpuid faulting mainly used to support live
>>>> migration. When cpl>0, cpuid instruction will produce GP, vmm then
>>>> emulate execution of the cpuid instruction. Hence will appear to
>>>> guest software the value chosen by the vmm.
>>> 
>>> I fixed this up quite a bit and applied as c/s 23653. Please take a
>>> look and give it a test. In particular note the changes I made in
>>> intel_init(), to make sure that APs are at least as featureful as
>>> the BSP w.r.t. cpuid faulting. 
>>> 
>>>  -- Keir

Test done based on latest c/s 23655, it's OK.

Thanks,
Jinsong

>> 
>> I have some concern for 'set_cpuid_faulting'. It was used at common
>> code 'context_switch' while it defined at arch_specific cpu/intel.c.
>> It would be trouble for the extension of other arch like AMD.
> 
> It can easily be turned into a function pointer again, or whatever,
> if need be.
> 
>> Another minor point is, at probe_intel_cpuid_faulting, it would be
>> better to use 64b value: uint64_t x;
> 
> Yes, not really necessary, but it's cleaner to call rdmsr_safe with a
> uint64_t so I've made that change.
> 
>  Thanks,
>  Keir
> 
>> Thanks,
>> Jinsong
>> 
>>> 
>>>> Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
>>>> 
>>>> diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
>>>> --- a/xen/arch/x86/cpu/common.c Sun Jun 12 22:27:01 2011 +0800
>>>> +++ b/xen/arch/x86/cpu/common.c Fri Jul 01 19:04:41 2011 +0800
>>>> @@ -603,6 +603,18 @@ void __init early_cpu_init(void)  #endif
>>>>  early_cpu_detect(); }
>>>> +
>>>> +static int __init cpuid_faulting_init(void)
>>>> +{
>>>> + if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && +
>>>> cpu_has_cpuid_faulting ) { +  cpuid_faulting_flip =
>>>> intel_cpuid_faulting_flip; + } +
>>>> + return 0;
>>>> +}
>>>> +__initcall(cpuid_faulting_init);
>>>> +
>>>>  /*
>>>>   * cpu_init() initializes state that is per-CPU. Some data is
>>>> already 
>>>>   * initialized (naturally) in the bootstrap process, such as the
>>>> GDT diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
>>>> --- a/xen/arch/x86/cpu/cpu.h Sun Jun 12 22:27:01 2011 +0800
>>>> +++ b/xen/arch/x86/cpu/cpu.h Fri Jul 01 19:04:41 2011 +0800
>>>> @@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
>>>>  extern void generic_identify(struct cpuinfo_x86 * c);
>>>> 
>>>>  extern void early_intel_workaround(struct cpuinfo_x86 *c); -
>>>> +extern void intel_cpuid_faulting_flip(unsigned int enable);
>>>> diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
>>>> --- a/xen/arch/x86/cpu/intel.c Sun Jun 12 22:27:01 2011 +0800
>>>> +++ b/xen/arch/x86/cpu/intel.c Fri Jul 01 19:04:41 2011 +0800 @@  
>>>>  -24,6 +24,39 @@ */ struct movsl_mask movsl_mask __read_mostly;
>>>>  #endif
>>>> +
>>>> +static unsigned int intel_cpuid_faulting_enumerate(void) +{ +
>>>> uint32_t hi, lo; + struct cpuinfo_x86 *c =
>>>> &cpu_data[smp_processor_id()]; + + /* + * Currently only one type
>>>> of intel processor support cpuid faulting. + * FIXME when needed
>>>> in the future. + */
>>>> + if (!((c->x86 == 6) && (c->x86_model == 58) && (c->x86_mask ==
>>>> 2))) +  return 0; + + rdmsr(MSR_INTEL_PLATFORM_INFO, lo, hi);
>>>> + if (lo & (1 << 31))
>>>> +  return 1;
>>>> +
>>>> + return 0;
>>>> +}
>>>> +
>>>> +void intel_cpuid_faulting_flip(unsigned int enable) +{ + uint32_t
>>>> hi, lo; +
>>>> + rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>>>> + if (enable)
>>>> +  lo |= 1;
>>>> + else
>>>> +  lo &= ~1;
>>>> + wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
>>>> +
>>>> + per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable; +}
>>>> 
>>>>  /*
>>>>   * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
>>>> @@ -194,7 +227,10 @@ static void __devinit init_intel(struct
>>>> detect_ht(c); }
>>>> 
>>>> - set_cpuidmask(c);
>>>> + if (intel_cpuid_faulting_enumerate())
>>>> +  set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability); + else
>>>> +  set_cpuidmask(c); 
>>>> 
>>>> /* Work around errata */
>>>> Intel_errata_workarounds(c);
>>>> diff -r 593d51c5f4ee xen/arch/x86/domain.c
>>>> --- a/xen/arch/x86/domain.c Sun Jun 12 22:27:01 2011 +0800
>>>> +++ b/xen/arch/x86/domain.c Fri Jul 01 19:04:41 2011 +0800 @@
>>>> -63,6 +63,9 @@ 
>>>> 
>>>>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>>>>  DEFINE_PER_CPU(unsigned long, cr4);
>>>> +DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>>>> +void (*cpuid_faulting_flip)(unsigned int enable);
>>>> 
>>>>  static void default_idle(void);
>>>>  static void default_dead_idle(void);
>>>> @@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s
>>>>              load_LDT(next); load_segments(next);
>>>>          }
>>>> +
>>>> + if ( cpuid_faulting_flip )
>>>> + {
>>>> +  unsigned int enable;
>>>> +
>>>> +  enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
>>>> +  if ( enable ^ this_cpu(cpuid_faulting_enabled) )
>>>> +   cpuid_faulting_flip(enable);
>>>> + }
>>>>      }
>>>> 
>>>>      context_saved(prev);
>>>> diff -r 593d51c5f4ee xen/arch/x86/traps.c
>>>> --- a/xen/arch/x86/traps.c Sun Jun 12 22:27:01 2011 +0800
>>>> +++ b/xen/arch/x86/traps.c Fri Jul 01 19:04:41 2011 +0800
>>>> @@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct
>>>> 
>>>>   twobyte_opcode:
>>>>      /*
>>>> -     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP
>>>> (0x1,0xF9) 
>>>> -     * are executable only from guest kernel mode (virtual ring
>>>> 0). +     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP
>>>> (0x1,0xF9), +     * and CPUID (0xa2), are executable only from
>>>>       guest kernel mode +     * (virtual ring 0). */
>>>>      opcode = insn_fetch(u8, code_base, eip, code_limit);
>>>> -    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode
>>>> != 0x31) ) +    if ( !guest_kernel_mode(v, regs) &&
>>>> +        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )
>>>> goto fail; 
>>>> 
>>>>      if ( lock && (opcode & ~3) != 0x20 )
>>>> @@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct
>>>>              regs->edx = (uint32_t)(msr_content >> 32);         
>>>> break; } +        break;
>>>> +
>>>> +    case 0xa2: /* CPUID */
>>>> +        pv_cpuid(regs);
>>>>          break;
>>>> 
>>>>      default:
>>>> diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
>>>> --- a/xen/include/asm-x86/cpufeature.h Sun Jun 12 22:27:01 2011
>>>> +0800 +++ b/xen/include/asm-x86/cpufeature.h Fri Jul 01 19:04:41
>>>>  2011 +0800 @@ -79,6 +79,7 @@ #define X86_FEATURE_ARCH_PERFMON
>>>>  (3*32+11) /* Intel Architectural PerfMon */ #define
>>>>  X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable
>>>> */ #define X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum
>>>> extensions */ +#define X86_FEATURE_CPUID_FAULTING (3*32+14) /*
>>>> cpuid faulting */ 
>>>> 
>>>>  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word
>>>>  4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD
>>>>  Extensions-3 */ @@ -175,6 +176,7 @@ #define cpu_has_page1gb  0
>>>>  #define cpu_has_efer  (boot_cpu_data.x86_capability[1] &
>>>> 0x20100800)  #define cpu_has_fsgsbase 0 +#define
>>>>  cpu_has_cpuid_faulting  0 #else /* __x86_64__ */
>>>>  #define cpu_has_vme  0
>>>>  #define cpu_has_de  1
>>>> @@ -201,6 +203,7 @@
>>>>  #define cpu_has_page1gb  boot_cpu_has(X86_FEATURE_PAGE1GB) 
>>>>  #define cpu_has_efer  1 #define cpu_has_fsgsbase
>>>> boot_cpu_has(X86_FEATURE_FSGSBASE) +#define cpu_has_cpuid_faulting
>>>> boot_cpu_has(X86_FEATURE_CPUID_FAULTING)  #endif
>>>> 
>>>>  #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
>>>> diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
>>>> --- a/xen/include/asm-x86/msr-index.h Sun Jun 12 22:27:01 2011
>>>> +0800 +++ b/xen/include/asm-x86/msr-index.h Fri Jul 01 19:04:41
>>>>  2011 +0800 @@ -155,11 +155,6 @@ #define MSR_P6_PERFCTR1  
>>>>  0x000000c2 #define MSR_P6_EVNTSEL0   0x00000186
>>>>  #define MSR_P6_EVNTSEL1   0x00000187
>>>> -
>>>> -/* MSRs for Intel cpuid feature mask */
>>>> -#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
>>>> -#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
>>>> -#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
>>>> 
>>>>  /* MSRs & bits used for VMX enabling */
>>>>  #define MSR_IA32_VMX_BASIC                      0x480 @@ -492,6
>>>>  +487,15 @@ #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
>>>>  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
>>>> 
>>>> +/* Intel cpuid spoofing MSRs */
>>>> +#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
>>>> +#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
>>>> +#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131 +
>>>> +/* Intel cpuid faulting MSRs */
>>>> +#define MSR_INTEL_PLATFORM_INFO  0x000000ce
>>>> +#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140 +  /* Geode
>>>>  defined MSRs */ #define MSR_GEODE_BUSCONT_CONF0  0x00001900
>>>> 
>>>> diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
>>>> --- a/xen/include/asm-x86/processor.h Sun Jun 12 22:27:01 2011
>>>> +0800 +++ b/xen/include/asm-x86/processor.h Fri Jul 01 19:04:41
>>>> 2011 +0800 @@ -192,6 +192,10 @@ extern struct cpuinfo_x86
>>>>  cpu_data[]; #define cpu_data (&boot_cpu_data)
>>>>  #define current_cpu_data boot_cpu_data
>>>>  #endif
>>>> +
>>>> +extern DEFINE_PER_CPU(unsigned int, cpuid_faulting_enabled); +
>>>> +extern void (*cpuid_faulting_flip)(unsigned int enable);
>>>> 
>>>>  extern u64 host_pat;
>>>>  extern int phys_proc_id[NR_CPUS];

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] X86: cpuid faulting feature enable
  2011-07-01 19:41 Liu, Jinsong
@ 2011-07-01 21:32 ` Keir Fraser
  0 siblings, 0 replies; 19+ messages in thread
From: Keir Fraser @ 2011-07-01 21:32 UTC (permalink / raw)
  To: Liu, Jinsong, Jan Beulich, xen-devel; +Cc: Tian, Kevin, Shan, Haitao, Li, Xin

On 01/07/2011 20:41, "Liu, Jinsong" <jinsong.liu@intel.com> wrote:

> Updated patch according to Keir and Jan's comments.

As I just this moment posted, I already applied a fixed up version, which
you should take a look at.

 Thanks,
 Keir

> Thanks,
> Jinsong
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH] X86: cpuid faulting feature enable
@ 2011-07-01 19:41 Liu, Jinsong
  2011-07-01 21:32 ` Keir Fraser
  0 siblings, 1 reply; 19+ messages in thread
From: Liu, Jinsong @ 2011-07-01 19:41 UTC (permalink / raw)
  To: Keir Fraser, Jan Beulich, xen-devel; +Cc: Tian, Kevin, Shan, Haitao, Li, Xin

[-- Attachment #1: Type: text/plain, Size: 71 bytes --]

Updated patch according to Keir and Jan's comments.

Thanks,
Jinsong

[-- Attachment #2: cpuid_faulting.patch --]
[-- Type: application/octet-stream, Size: 7472 bytes --]

X86: cpuid faulting feature enable

Latest Intel processor add cpuid faulting feature. This patch is used to support cpuid faulting in Xen.
Like cpuid spoofing, cpuid faulting mainly used to support live migration. When cpuid faulting enabled, cpuid instruction runs at cpl>0 will produce GP, vmm then emulate execution of the cpuid instruction. Hence will appear to guest software the value chosen by the vmm.

Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>

diff -r 593d51c5f4ee xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/cpu/common.c	Sat Jul 02 03:13:41 2011 +0800
@@ -603,6 +603,18 @@ void __init early_cpu_init(void)
 #endif
 	early_cpu_detect();
 }
+
+static int __init cpuid_faulting_init(void)
+{
+	if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 
+		cpu_has_cpuid_faulting ) {
+		cpuid_faulting_flip = intel_cpuid_faulting_flip;
+	}
+
+	return 0;
+}
+__initcall(cpuid_faulting_init);
+
 /*
  * cpu_init() initializes state that is per-CPU. Some data is already
  * initialized (naturally) in the bootstrap process, such as the GDT
diff -r 593d51c5f4ee xen/arch/x86/cpu/cpu.h
--- a/xen/arch/x86/cpu/cpu.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/cpu/cpu.h	Sat Jul 02 03:13:41 2011 +0800
@@ -30,4 +30,4 @@ extern void generic_identify(struct cpui
 extern void generic_identify(struct cpuinfo_x86 * c);
 
 extern void early_intel_workaround(struct cpuinfo_x86 *c);
-
+extern void intel_cpuid_faulting_flip(bool_t enable);
diff -r 593d51c5f4ee xen/arch/x86/cpu/intel.c
--- a/xen/arch/x86/cpu/intel.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/cpu/intel.c	Sat Jul 02 03:13:41 2011 +0800
@@ -24,6 +24,31 @@
  */
 struct movsl_mask movsl_mask __read_mostly;
 #endif
+
+static unsigned int intel_cpuid_faulting_enumerate(void)
+{
+	uint64_t msr_content;
+
+	if (!rdmsr_safe(MSR_INTEL_PLATFORM_INFO, msr_content) &&
+	   (msr_content & (1<<31)))
+		return 1;
+
+	return 0;
+}
+
+void intel_cpuid_faulting_flip(bool_t enable)
+{
+	uint32_t hi, lo;
+
+	rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+	if (enable)
+		lo |= 1;
+	else
+		lo &= ~1;
+	wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+
+	per_cpu(cpuid_faulting_enabled, smp_processor_id()) = enable;
+}
 
 /*
  * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
@@ -194,7 +219,10 @@ static void __devinit init_intel(struct 
 		detect_ht(c);
 	}
 
-	set_cpuidmask(c);
+	if (intel_cpuid_faulting_enumerate())
+		set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+	else
+		set_cpuidmask(c);
 
 	/* Work around errata */
 	Intel_errata_workarounds(c);
diff -r 593d51c5f4ee xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/domain.c	Sat Jul 02 03:13:41 2011 +0800
@@ -63,6 +63,9 @@
 
 DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
 DEFINE_PER_CPU(unsigned long, cr4);
+DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
+
+void (*cpuid_faulting_flip)(bool_t enable);
 
 static void default_idle(void);
 static void default_dead_idle(void);
@@ -1680,6 +1683,15 @@ void context_switch(struct vcpu *prev, s
             load_LDT(next);
             load_segments(next);
         }
+
+	if ( cpuid_faulting_flip )
+	{
+		bool_t enable;
+
+		enable = !is_hvm_vcpu(next) && (next->domain->domain_id != 0);
+		if ( enable ^ this_cpu(cpuid_faulting_enabled) )
+			cpuid_faulting_flip(enable);
+	}
     }
 
     context_saved(prev);
diff -r 593d51c5f4ee xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/arch/x86/traps.c	Sat Jul 02 03:13:41 2011 +0800
@@ -2113,11 +2113,13 @@ static int emulate_privileged_op(struct 
 
  twobyte_opcode:
     /*
-     * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP (0x1,0xF9)
-     * are executable only from guest kernel mode (virtual ring 0).
+     * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP (0x1,0xF9),
+     * and CPUID (0xa2), are executable only from guest kernel mode 
+     * (virtual ring 0).
      */
     opcode = insn_fetch(u8, code_base, eip, code_limit);
-    if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode != 0x31) )
+    if ( !guest_kernel_mode(v, regs) && 
+        (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )
         goto fail;
 
     if ( lock && (opcode & ~3) != 0x20 )
@@ -2550,6 +2552,10 @@ static int emulate_privileged_op(struct 
             regs->edx = (uint32_t)(msr_content >> 32);
             break;
         }
+        break;
+
+    case 0xa2: /* CPUID */
+        pv_cpuid(regs);
         break;
 
     default:
diff -r 593d51c5f4ee xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/include/asm-x86/cpufeature.h	Sat Jul 02 03:13:41 2011 +0800
@@ -79,6 +79,7 @@
 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
 #define X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable */
 #define X86_FEATURE_XTOPOLOGY    (3*32+13) /* cpu topology enum extensions */
+#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -175,6 +176,7 @@
 #define cpu_has_page1gb		0
 #define cpu_has_efer		(boot_cpu_data.x86_capability[1] & 0x20100800)
 #define cpu_has_fsgsbase	0
+#define cpu_has_cpuid_faulting  boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
 #else /* __x86_64__ */
 #define cpu_has_vme		0
 #define cpu_has_de		1
@@ -201,6 +203,7 @@
 #define cpu_has_page1gb		boot_cpu_has(X86_FEATURE_PAGE1GB)
 #define cpu_has_efer		1
 #define cpu_has_fsgsbase	boot_cpu_has(X86_FEATURE_FSGSBASE)
+#define cpu_has_cpuid_faulting  boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
 #endif
 
 #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
diff -r 593d51c5f4ee xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/include/asm-x86/msr-index.h	Sat Jul 02 03:13:41 2011 +0800
@@ -155,11 +155,6 @@
 #define MSR_P6_PERFCTR1			0x000000c2
 #define MSR_P6_EVNTSEL0			0x00000186
 #define MSR_P6_EVNTSEL1			0x00000187
-
-/* MSRs for Intel cpuid feature mask */
-#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
-#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
-#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
 
 /* MSRs & bits used for VMX enabling */
 #define MSR_IA32_VMX_BASIC                      0x480
@@ -492,6 +487,15 @@
 #define MSR_CORE_PERF_GLOBAL_CTRL	0x0000038f
 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x00000390
 
+/* Intel cpuid spoofing MSRs */
+#define MSR_INTEL_CPUID_FEATURE_MASK	0x00000478
+#define MSR_INTEL_CPUID1_FEATURE_MASK	0x00000130
+#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
+
+/* Intel cpuid faulting MSRs */
+#define MSR_INTEL_PLATFORM_INFO		0x000000ce
+#define MSR_INTEL_MISC_FEATURES_ENABLES	0x00000140
+
 /* Geode defined MSRs */
 #define MSR_GEODE_BUSCONT_CONF0		0x00001900
 
diff -r 593d51c5f4ee xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h	Sun Jun 12 22:27:01 2011 +0800
+++ b/xen/include/asm-x86/processor.h	Sat Jul 02 03:13:41 2011 +0800
@@ -192,6 +192,10 @@ extern struct cpuinfo_x86 cpu_data[];
 #define cpu_data (&boot_cpu_data)
 #define current_cpu_data boot_cpu_data
 #endif
+
+extern DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
+
+extern void (*cpuid_faulting_flip)(bool_t enable);
 
 extern u64 host_pat;
 extern int phys_proc_id[NR_CPUS];

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2011-07-04  8:10 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-07-01 14:32 [PATCH] X86: cpuid faulting feature enable Liu, Jinsong
2011-07-01 15:48 ` Jan Beulich
2011-07-01 16:07   ` Keir Fraser
2011-07-02  3:04     ` Tian, Kevin
2011-07-01 17:48   ` Liu, Jinsong
2011-07-01 18:02     ` Keir Fraser
2011-07-01 18:12       ` Liu, Jinsong
2011-07-01 18:14       ` Liu, Jinsong
2011-07-01 21:31 ` Keir Fraser
2011-07-02  3:15   ` Tian, Kevin
2011-07-02  7:49     ` Keir Fraser
2011-07-04  1:06       ` Shan, Haitao
2011-07-04  6:58         ` Keir Fraser
2011-07-03  9:40   ` Liu, Jinsong
2011-07-04  3:19   ` Liu, Jinsong
2011-07-04  6:59     ` Keir Fraser
2011-07-04  8:10       ` Liu, Jinsong
2011-07-01 19:41 Liu, Jinsong
2011-07-01 21:32 ` Keir Fraser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.