From mboxrd@z Thu Jan 1 00:00:00 1970 From: Qing He Subject: [PATCH 13/16] vmx: nest: L2 tsc Date: Wed, 8 Sep 2010 23:22:21 +0800 Message-ID: <1283959344-3837-14-git-send-email-qing.he@intel.com> References: <1283959344-3837-1-git-send-email-qing.he@intel.com> Return-path: In-Reply-To: <1283959344-3837-1-git-send-email-qing.he@intel.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xensource.com Errors-To: xen-devel-bounces@lists.xensource.com To: xen-devel@lists.xensource.com Cc: Qing He List-Id: xen-devel@lists.xenproject.org L2 TSC needs special handling, either rdtsc exiting is turned on or off Signed-off-by: Qing He Signed-off-by: Eddie Dong --- diff -r 0f6400481299 xen/arch/x86/hvm/vmx/nest.c --- a/xen/arch/x86/hvm/vmx/nest.c Wed Sep 08 18:43:13 2010 +0800 +++ b/xen/arch/x86/hvm/vmx/nest.c Wed Sep 08 18:52:00 2010 +0800 @@ -647,6 +647,18 @@ * Nested VMX context switch */ +u64 vmx_nest_get_tsc_offset(struct vcpu *v) +{ + u64 offset = 0; + struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest; + + if ( __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL) & + CPU_BASED_USE_TSC_OFFSETING ) + offset = __get_vvmcs(nest->vvmcs, TSC_OFFSET); + + return offset; +} + static unsigned long vmcs_gstate_field[] = { /* 16 BITS */ GUEST_ES_SELECTOR, @@ -818,6 +830,7 @@ static void load_vvmcs_guest_state(struct vmx_nest_struct *nest) { + struct vcpu *v = current; int i; /* vvmcs.gstate to svmcs.gstate */ @@ -828,6 +841,8 @@ hvm_set_cr4(__get_vvmcs(nest->vvmcs, GUEST_CR4)); hvm_set_cr3(__get_vvmcs(nest->vvmcs, GUEST_CR3)); + hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); + vvmcs_to_shadow(nest->vvmcs, VM_ENTRY_INTR_INFO); vvmcs_to_shadow(nest->vvmcs, VM_ENTRY_EXCEPTION_ERROR_CODE); vvmcs_to_shadow(nest->vvmcs, VM_ENTRY_INSTRUCTION_LEN); @@ -936,6 +951,7 @@ static void load_vvmcs_host_state(struct vmx_nest_struct *nest) { + struct vcpu *v = current; int i; u64 r; @@ -949,6 +965,8 @@ hvm_set_cr4(__get_vvmcs(nest->vvmcs, HOST_CR4)); hvm_set_cr3(__get_vvmcs(nest->vvmcs, HOST_CR3)); + hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); + __set_vvmcs(nest->vvmcs, VM_ENTRY_INTR_INFO, 0); } @@ -1205,6 +1223,21 @@ if ( ctrl & CPU_BASED_RDTSC_EXITING ) nest->vmexit_pending = 1; + else + { + uint64_t tsc; + + /* + * special handler is needed if L1 doesn't intercept rdtsc, + * avoiding changing guest_tsc and messing up timekeeping in L1 + */ + tsc = hvm_get_guest_tsc(v); + tsc += __get_vvmcs(nest->vvmcs, TSC_OFFSET); + regs->eax = (uint32_t)tsc; + regs->edx = (uint32_t)(tsc >> 32); + + bypass_l0 = 1; + } break; } diff -r 0f6400481299 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Sep 08 18:43:13 2010 +0800 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Sep 08 18:52:00 2010 +0800 @@ -969,6 +969,10 @@ static void vmx_set_tsc_offset(struct vcpu *v, u64 offset) { vmx_vmcs_enter(v); + + if ( v->arch.hvm_vcpu.in_nesting ) + offset += vmx_nest_get_tsc_offset(v); + __vmwrite(TSC_OFFSET, offset); #if defined (__i386__) __vmwrite(TSC_OFFSET_HIGH, offset >> 32); diff -r 0f6400481299 xen/include/asm-x86/hvm/vmx/nest.h --- a/xen/include/asm-x86/hvm/vmx/nest.h Wed Sep 08 18:43:13 2010 +0800 +++ b/xen/include/asm-x86/hvm/vmx/nest.h Wed Sep 08 18:52:00 2010 +0800 @@ -69,6 +69,8 @@ unsigned long value); void vmx_nest_update_exception_bitmap(struct vcpu *v, unsigned long value); +u64 vmx_nest_get_tsc_offset(struct vcpu *v); + void vmx_nest_idtv_handling(void); int vmx_nest_l2_vmexit_handler(struct cpu_user_regs *regs,