From mboxrd@z Thu Jan 1 00:00:00 1970 From: Shuai Ruan Subject: [V11 2/3] x86/xsaves: enable xsaves/xrstors for hvm guest Date: Fri, 20 Nov 2015 09:18:01 +0800 Message-ID: <1447982282-7437-3-git-send-email-shuai.ruan@linux.intel.com> References: <1447982282-7437-1-git-send-email-shuai.ruan@linux.intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1447982282-7437-1-git-send-email-shuai.ruan@linux.intel.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: kevin.tian@intel.com, wei.liu2@citrix.com, Ian.Campbell@citrix.com, stefano.stabellini@eu.citrix.com, jun.nakajima@intel.com, andrew.cooper3@citrix.com, ian.jackson@eu.citrix.com, jbeulich@suse.com, keir@xen.org List-Id: xen-devel@lists.xenproject.org This patch enables xsaves for hvm guest, includes: 1.handle xsaves vmcs init and vmexit. 2.add logic to write/read the XSS msr. Add IA32_XSS_MSR save/rstore support. Signed-off-by: Shuai Ruan Reviewed-by: Jan Beulich --- xen/arch/x86/hvm/hvm.c | 27 +++++++++++++++++++++++++++ xen/arch/x86/hvm/vmx/vmcs.c | 5 ++++- xen/arch/x86/hvm/vmx/vmx.c | 35 ++++++++++++++++++++++++++++++++++- xen/arch/x86/xstate.c | 2 +- xen/include/asm-x86/hvm/vmx/vmcs.h | 4 ++++ xen/include/asm-x86/hvm/vmx/vmx.h | 2 ++ xen/include/asm-x86/xstate.h | 1 + 7 files changed, 73 insertions(+), 3 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 20148f5..108e3e4 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4606,6 +4606,20 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, *ebx = _eax + _ebx; } } + if ( count == 1 ) + { + if ( cpu_has_xsaves && cpu_has_vmx_xsaves ) + { + *ebx = XSTATE_AREA_MIN_SIZE; + if ( v->arch.xcr0 | v->arch.hvm_vcpu.msr_xss ) + for ( sub_leaf = 2; sub_leaf < 63; sub_leaf++ ) + if ( (v->arch.xcr0 | v->arch.hvm_vcpu.msr_xss) & + (1ULL << sub_leaf) ) + *ebx += xstate_sizes[sub_leaf]; + } + else + *ebx = *ecx = *edx = 0; + } break; case 0x80000001: @@ -4705,6 +4719,12 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) *msr_content = v->arch.hvm_vcpu.guest_efer; break; + case MSR_IA32_XSS: + if ( !cpu_has_xsaves ) + goto gp_fault; + *msr_content = v->arch.hvm_vcpu.msr_xss; + break; + case MSR_IA32_TSC: *msr_content = _hvm_rdtsc_intercept(); break; @@ -4837,6 +4857,13 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, return X86EMUL_EXCEPTION; break; + case MSR_IA32_XSS: + /* No XSS features currently supported for guests. */ + if ( !cpu_has_xsaves || msr_content != 0 ) + goto gp_fault; + v->arch.hvm_vcpu.msr_xss = msr_content; + break; + case MSR_IA32_TSC: hvm_set_guest_tsc(v, msr_content); break; diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 53207e6..cf9c14d 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -240,7 +240,8 @@ static int vmx_init_vmcs_config(void) SECONDARY_EXEC_PAUSE_LOOP_EXITING | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_ENABLE_VM_FUNCTIONS | - SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS); + SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS | + SECONDARY_EXEC_XSAVES); rdmsrl(MSR_IA32_VMX_MISC, _vmx_misc_cap); if ( _vmx_misc_cap & VMX_MISC_VMWRITE_ALL ) opt |= SECONDARY_EXEC_ENABLE_VMCS_SHADOWING; @@ -1249,6 +1250,8 @@ static int construct_vmcs(struct vcpu *v) __vmwrite(HOST_PAT, host_pat); __vmwrite(GUEST_PAT, guest_pat); } + if ( cpu_has_vmx_xsaves ) + __vmwrite(XSS_EXIT_BITMAP, 0); vmx_vmcs_exit(v); diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index eb6248e..f4a6cca 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -625,7 +625,7 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) static unsigned int __init vmx_init_msr(void) { - return !!cpu_has_mpx; + return !!cpu_has_mpx + !!cpu_has_xsaves; } static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt) @@ -640,6 +640,13 @@ static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt) } vmx_vmcs_exit(v); + + if ( cpu_has_xsaves ) + { + ctxt->msr[ctxt->count].val = v->arch.hvm_vcpu.msr_xss; + if ( ctxt->msr[ctxt->count].val ) + ctxt->msr[ctxt->count++].index = MSR_IA32_XSS; + } } static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt) @@ -659,6 +666,12 @@ static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt) else err = -ENXIO; break; + case MSR_IA32_XSS: + if ( cpu_has_xsaves ) + v->arch.hvm_vcpu.msr_xss = ctxt->msr[i].val; + else + err = -ENXIO; + break; default: continue; } @@ -2809,6 +2822,18 @@ static void vmx_idtv_reinject(unsigned long idtv_info) } } +static void vmx_handle_xsaves(void) +{ + gdprintk(XENLOG_ERR, "xsaves should not cause vmexit\n"); + domain_crash(current->domain); +} + +static void vmx_handle_xrstors(void) +{ + gdprintk(XENLOG_ERR, "xrstors should not cause vmexit\n"); + domain_crash(current->domain); +} + static int vmx_handle_apic_write(void) { unsigned long exit_qualification; @@ -3386,6 +3411,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) vmx_vcpu_flush_pml_buffer(v); break; + case EXIT_REASON_XSAVES: + vmx_handle_xsaves(); + break; + + case EXIT_REASON_XRSTORS: + vmx_handle_xrstors(); + break; + case EXIT_REASON_ACCESS_GDTR_OR_IDTR: case EXIT_REASON_ACCESS_LDTR_OR_TR: case EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED: diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index f3ae285..195544c 100644 --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -25,7 +25,7 @@ static u32 __read_mostly xsave_cntxt_size; u64 __read_mostly xfeature_mask; static unsigned int * __read_mostly xstate_offsets; -static unsigned int * __read_mostly xstate_sizes; +unsigned int * __read_mostly xstate_sizes; static unsigned int __read_mostly xstate_features; static unsigned int __read_mostly xstate_comp_offsets[sizeof(xfeature_mask)*8]; diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 865d9fc..1a5d827 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -225,6 +225,7 @@ extern u32 vmx_vmentry_control; #define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING 0x00004000 #define SECONDARY_EXEC_ENABLE_PML 0x00020000 #define SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS 0x00040000 +#define SECONDARY_EXEC_XSAVES 0x00100000 extern u32 vmx_secondary_exec_control; #define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001 @@ -290,6 +291,8 @@ extern u64 vmx_ept_vpid_cap; (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS) #define cpu_has_vmx_pml \ (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML) +#define cpu_has_vmx_xsaves \ + (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES) #define VMCS_RID_TYPE_MASK 0x80000000 @@ -364,6 +367,7 @@ enum vmcs_field { VMREAD_BITMAP = 0x00002026, VMWRITE_BITMAP = 0x00002028, VIRT_EXCEPTION_INFO = 0x0000202a, + XSS_EXIT_BITMAP = 0x0000202c, GUEST_PHYSICAL_ADDRESS = 0x00002400, VMCS_LINK_POINTER = 0x00002800, GUEST_IA32_DEBUGCTL = 0x00002802, diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h index f16a306..0491750 100644 --- a/xen/include/asm-x86/hvm/vmx/vmx.h +++ b/xen/include/asm-x86/hvm/vmx/vmx.h @@ -188,6 +188,8 @@ static inline unsigned long pi_get_pir(struct pi_desc *pi_desc, int group) #define EXIT_REASON_INVPCID 58 #define EXIT_REASON_VMFUNC 59 #define EXIT_REASON_PML_FULL 62 +#define EXIT_REASON_XSAVES 63 +#define EXIT_REASON_XRSTORS 64 /* * Interruption-information format diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h index 414cc99..12d939b 100644 --- a/xen/include/asm-x86/xstate.h +++ b/xen/include/asm-x86/xstate.h @@ -45,6 +45,7 @@ #define XSTATE_COMPACTION_ENABLED (1ULL << 63) extern u64 xfeature_mask; +extern unsigned int *xstate_sizes; /* extended state save area */ struct __packed __attribute__((aligned (64))) xsave_struct -- 1.9.1