From mboxrd@z Thu Jan 1 00:00:00 1970 From: Ed White Subject: [PATCH v5 07/15] VMX: add VMFUNC leaf 0 (EPTP switching) to emulator. Date: Mon, 13 Jul 2015 17:14:55 -0700 Message-ID: <1436832903-12639-8-git-send-email-edmund.h.white@intel.com> References: <1436832903-12639-1-git-send-email-edmund.h.white@intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1436832903-12639-1-git-send-email-edmund.h.white@intel.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: Ravi Sahita , Wei Liu , George Dunlap , Ian Jackson , Tim Deegan , Jan Beulich , Andrew Cooper , tlengyel@novetta.com, Daniel De Graaf List-Id: xen-devel@lists.xenproject.org From: Ravi Sahita Signed-off-by: Ravi Sahita --- xen/arch/x86/hvm/emulate.c | 19 +++++++++++++++-- xen/arch/x86/hvm/vmx/vmx.c | 38 ++++++++++++++++++++++++++++++++++ xen/arch/x86/x86_emulate/x86_emulate.c | 19 +++++++++++------ xen/arch/x86/x86_emulate/x86_emulate.h | 4 ++++ xen/include/asm-x86/hvm/hvm.h | 2 ++ 5 files changed, 74 insertions(+), 8 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index fe5661d..1aa8af4 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1436,6 +1436,19 @@ static int hvmemul_invlpg( return rc; } +static int hvmemul_vmfunc( + struct x86_emulate_ctxt *ctxt) +{ + int rc; + + rc = hvm_funcs.altp2m_vcpu_emulate_vmfunc(ctxt->regs); + if ( rc != X86EMUL_OKAY ) + { + hvmemul_inject_hw_exception(TRAP_invalid_op, 0, ctxt); + } + return rc; +} + static const struct x86_emulate_ops hvm_emulate_ops = { .read = hvmemul_read, .insn_fetch = hvmemul_insn_fetch, @@ -1459,7 +1472,8 @@ static const struct x86_emulate_ops hvm_emulate_ops = { .inject_sw_interrupt = hvmemul_inject_sw_interrupt, .get_fpu = hvmemul_get_fpu, .put_fpu = hvmemul_put_fpu, - .invlpg = hvmemul_invlpg + .invlpg = hvmemul_invlpg, + .vmfunc = hvmemul_vmfunc, }; static const struct x86_emulate_ops hvm_emulate_ops_no_write = { @@ -1485,7 +1499,8 @@ static const struct x86_emulate_ops hvm_emulate_ops_no_write = { .inject_sw_interrupt = hvmemul_inject_sw_interrupt, .get_fpu = hvmemul_get_fpu, .put_fpu = hvmemul_put_fpu, - .invlpg = hvmemul_invlpg + .invlpg = hvmemul_invlpg, + .vmfunc = hvmemul_vmfunc, }; static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 38dba6b..845cdbc 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -82,6 +82,7 @@ static void vmx_fpu_dirty_intercept(void); static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content); static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content); static void vmx_invlpg_intercept(unsigned long vaddr); +static int vmx_vmfunc_intercept(struct cpu_user_regs *regs); uint8_t __read_mostly posted_intr_vector; @@ -1830,6 +1831,19 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v) vmx_vmcs_exit(v); } +static int vmx_vcpu_emulate_vmfunc(struct cpu_user_regs *regs) +{ + int rc = X86EMUL_EXCEPTION; + struct vcpu *curr = current; + + if ( !cpu_has_vmx_vmfunc && altp2m_active(curr->domain) && + regs->eax == 0 && + p2m_switch_vcpu_altp2m_by_id(curr, (uint16_t)regs->ecx) ) + rc = X86EMUL_OKAY; + + return rc; +} + static bool_t vmx_vcpu_emulate_ve(struct vcpu *v) { bool_t rc = 0; @@ -1898,6 +1912,7 @@ static struct hvm_function_table __initdata vmx_function_table = { .msr_read_intercept = vmx_msr_read_intercept, .msr_write_intercept = vmx_msr_write_intercept, .invlpg_intercept = vmx_invlpg_intercept, + .vmfunc_intercept = vmx_vmfunc_intercept, .handle_cd = vmx_handle_cd, .set_info_guest = vmx_set_info_guest, .set_rdtsc_exiting = vmx_set_rdtsc_exiting, @@ -1924,6 +1939,7 @@ static struct hvm_function_table __initdata vmx_function_table = { .altp2m_vcpu_update_eptp = vmx_vcpu_update_eptp, .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve, .altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve, + .altp2m_vcpu_emulate_vmfunc = vmx_vcpu_emulate_vmfunc, }; const struct hvm_function_table * __init start_vmx(void) @@ -2095,6 +2111,19 @@ static void vmx_invlpg_intercept(unsigned long vaddr) vpid_sync_vcpu_gva(curr, vaddr); } +static int vmx_vmfunc_intercept(struct cpu_user_regs *regs) +{ + /* + * This handler is a placeholder for future where Xen may + * want to handle VMFUNC exits and resume a domain normally without + * injecting a #UD to the guest - for example, in a VT-nested + * scenario where Xen may want to lazily shadow the alternate + * EPTP list. + */ + gdprintk(XENLOG_ERR, "Failed guest VMFUNC execution\n"); + return X86EMUL_EXCEPTION; +} + static int vmx_cr_access(unsigned long exit_qualification) { struct vcpu *curr = current; @@ -3234,6 +3263,15 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) update_guest_eip(); break; + case EXIT_REASON_VMFUNC: + if ( (vmx_vmfunc_intercept(regs) == X86EMUL_EXCEPTION) || + (vmx_vmfunc_intercept(regs) == X86EMUL_UNHANDLEABLE) || + (vmx_vmfunc_intercept(regs) == X86EMUL_RETRY) ) + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); + else + update_guest_eip(); + break; + case EXIT_REASON_MWAIT_INSTRUCTION: case EXIT_REASON_MONITOR_INSTRUCTION: case EXIT_REASON_GETSEC: diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c index c017c69..e596131 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -3786,6 +3786,7 @@ x86_emulate( break; } + no_writeback: /* Inject #DB if single-step tracing was enabled at instruction start. */ if ( (ctxt->regs->eflags & EFLG_TF) && (rc == X86EMUL_OKAY) && (ops->inject_hw_exception != NULL) ) @@ -3816,19 +3817,17 @@ x86_emulate( struct segment_register reg; unsigned long base, limit, cr0, cr0w; - if ( modrm == 0xdf ) /* invlpga */ + switch( modrm ) { + case 0xdf: /* invlpga */ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1); generate_exception_if(!mode_ring0(), EXC_GP, 0); fail_if(ops->invlpg == NULL); if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.eax), ctxt)) ) goto done; - break; - } - - if ( modrm == 0xf9 ) /* rdtscp */ - { + goto no_writeback; + case 0xf9: /* rdtscp */ { uint64_t tsc_aux; fail_if(ops->read_msr == NULL); if ( (rc = ops->read_msr(MSR_TSC_AUX, &tsc_aux, ctxt)) != 0 ) @@ -3836,6 +3835,14 @@ x86_emulate( _regs.ecx = (uint32_t)tsc_aux; goto rdtsc; } + case 0xd4: /* vmfunc */ + generate_exception_if(lock_prefix | rep_prefix() | (vex.pfx == vex_66), + EXC_UD, -1); + fail_if(ops->vmfunc == NULL); + if ( (rc = ops->vmfunc(ctxt) != X86EMUL_OKAY) ) + goto done; + goto no_writeback; + } switch ( modrm_reg & 7 ) { diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h index 064b8f4..a4d4ec8 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.h +++ b/xen/arch/x86/x86_emulate/x86_emulate.h @@ -397,6 +397,10 @@ struct x86_emulate_ops enum x86_segment seg, unsigned long offset, struct x86_emulate_ctxt *ctxt); + + /* vmfunc: Emulate VMFUNC via given set of EAX ECX inputs */ + int (*vmfunc)( + struct x86_emulate_ctxt *ctxt); }; struct cpu_user_regs; diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 473ac30..335b7a6 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -167,6 +167,7 @@ struct hvm_function_table { int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content); int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content); void (*invlpg_intercept)(unsigned long vaddr); + int (*vmfunc_intercept)(struct cpu_user_regs *regs); void (*handle_cd)(struct vcpu *v, unsigned long value); void (*set_info_guest)(struct vcpu *v); void (*set_rdtsc_exiting)(struct vcpu *v, bool_t); @@ -215,6 +216,7 @@ struct hvm_function_table { void (*altp2m_vcpu_update_eptp)(struct vcpu *v); void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v); bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v); + int (*altp2m_vcpu_emulate_vmfunc)(struct cpu_user_regs *regs); }; extern struct hvm_function_table hvm_funcs; -- 1.9.1