From mboxrd@z Thu Jan 1 00:00:00 1970 From: Avi Kivity Subject: Re: [PATCH 16/24] Implement VMLAUNCH and VMRESUME Date: Mon, 14 Jun 2010 14:41:29 +0300 Message-ID: <4C161569.3000602@redhat.com> References: <1276431753-nyh@il.ibm.com> <201006131230.o5DCUk2i013070@rice.haifa.ibm.com> Mime-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-1; format=flowed Content-Transfer-Encoding: 7bit Cc: kvm@vger.kernel.org To: "Nadav Har'El" Return-path: Received: from mx1.redhat.com ([209.132.183.28]:11885 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751500Ab0FNLle (ORCPT ); Mon, 14 Jun 2010 07:41:34 -0400 In-Reply-To: <201006131230.o5DCUk2i013070@rice.haifa.ibm.com> Sender: kvm-owner@vger.kernel.org List-ID: On 06/13/2010 03:30 PM, Nadav Har'El wrote: > Implement the VMLAUNCH and VMRESUME instructions, allowing a guest > hypervisor to run its own guests. > > Signed-off-by: Nadav Har'El > --- > --- .before/arch/x86/kvm/vmx.c 2010-06-13 15:01:29.000000000 +0300 > +++ .after/arch/x86/kvm/vmx.c 2010-06-13 15:01:29.000000000 +0300 > @@ -272,6 +272,9 @@ struct __attribute__ ((__packed__)) vmcs > struct shadow_vmcs shadow_vmcs; > > bool launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ > + > + int cpu; > Not sure cpu should be here. It's certainly won't survive live migration. Perhaps in struct vmcs_list (which should be renamed, perhaps struct cached_vmcs). > + int launched; > }; > What's the difference between this and launch_state? > > struct vmcs_list { > @@ -297,6 +300,24 @@ struct nested_vmx { > /* list of real (hardware) VMCS, one for each L2 guest of L1 */ > struct list_head l2_vmcs_list; /* a vmcs_list */ > int l2_vmcs_num; > + > + /* Are we running a nested guest now */ > + bool nested_mode; > + /* Level 1 state for switching to level 2 and back */ > + struct { > + u64 efer; > + unsigned long cr3; > + unsigned long cr4; > + u64 io_bitmap_a; > + u64 io_bitmap_b; > + u64 msr_bitmap; > + int cpu; > + int launched; > + } l1_state; > This state needs save/restore support (as well as the current vmptr and vmxon state). > + /* Level 1 shadow vmcs for switching to level 2 and back */ > + struct shadow_vmcs *l1_shadow_vmcs; > Again, not really happy about shadowing the non-nested vmcs. > + /* Level 1 vmcs loaded into the processor */ > + struct vmcs *l1_vmcs; > }; > > enum vmcs_field_type { > @@ -1407,6 +1428,19 @@ static void vmx_vcpu_load(struct kvm_vcp > new_offset = vmcs_read64(TSC_OFFSET) + delta; > vmcs_write64(TSC_OFFSET, new_offset); > } > + > + if (vmx->nested.l1_shadow_vmcs != NULL) { > + struct shadow_vmcs *l1svmcs = > + vmx->nested.l1_shadow_vmcs; > + l1svmcs->host_tr_base = vmcs_readl(HOST_TR_BASE); > + l1svmcs->host_gdtr_base = vmcs_readl(HOST_GDTR_BASE); > + l1svmcs->host_ia32_sysenter_esp = > + vmcs_readl(HOST_IA32_SYSENTER_ESP); > These are all static (at least on a single cpu. No need to read them from a vmcs. > + if (tsc_this< vcpu->arch.host_tsc) > + l1svmcs->tsc_offset = vmcs_read64(TSC_OFFSET); > + if (vmx->nested.nested_mode) > + load_vmcs_host_state(l1svmcs); > + } > } > } > > > @@ -4348,6 +4392,42 @@ static int handle_vmclear(struct kvm_vcp > return 1; > } > > +static int nested_vmx_run(struct kvm_vcpu *vcpu); > + > +static int handle_launch_or_resume(struct kvm_vcpu *vcpu, bool launch) > +{ > + if (!nested_vmx_check_permission(vcpu)) > + return 1; > + > + if (!nested_map_current(vcpu)) > + return 1; > Better error handling needed, perhaps triple fault. > + if (to_vmx(vcpu)->nested.current_l2_page->launch_state == launch) { > + /* Must use VMLAUNCH for the first time, VMRESUME later */ > + set_rflags_to_vmx_fail_valid(vcpu); > + nested_unmap_current(vcpu); > skip_emulted_instruction(); > + return 1; > + } > + nested_unmap_current(vcpu); > + > + skip_emulated_instruction(vcpu); > + > + nested_vmx_run(vcpu); > + return 1; > +} > > @@ -4958,7 +5038,8 @@ static int vmx_handle_exit(struct kvm_vc > "(0x%x) and exit reason is 0x%x\n", > __func__, vectoring_info, exit_reason); > > - if (unlikely(!cpu_has_virtual_nmis()&& vmx->soft_vnmi_blocked)) { > + if (!vmx->nested.nested_mode&& > + unlikely(!cpu_has_virtual_nmis()&& vmx->soft_vnmi_blocked)) { > Too much indent. the unlikely() looks like the first statement of the block. I think it isn't enough to check for nested mode. If the guest hasn't enabled virtual NMIs, then the nested guest should behave exactly like the guest. > > +static int nested_vmx_run(struct kvm_vcpu *vcpu) > +{ > + struct vcpu_vmx *vmx = to_vmx(vcpu); > + > + vmx->nested.nested_mode = 1; > true > + sync_cached_regs_to_vmcs(vcpu); > + save_vmcs(vmx->nested.l1_shadow_vmcs); > + > + vmx->nested.l1_state.efer = vcpu->arch.efer; > Not sure why you need to save efer. Ordinarily, vmx reconstructs it from the guest efer and the host size exit control, you can do the same. > + if (!enable_ept) > + vmx->nested.l1_state.cr3 = vcpu->arch.cr3; > Ditto, isn't that HOST_CR3? > + vmx->nested.l1_state.cr4 = vcpu->arch.cr4; > Ditto. > + > + if (!nested_map_current(vcpu)) { > + set_rflags_to_vmx_fail_valid(vcpu); > + return 1; > + } > + > + if (cpu_has_vmx_msr_bitmap()) > + vmx->nested.l1_state.msr_bitmap = vmcs_read64(MSR_BITMAP); > + else > + vmx->nested.l1_state.msr_bitmap = 0; > + > + vmx->nested.l1_state.io_bitmap_a = vmcs_read64(IO_BITMAP_A); > + vmx->nested.l1_state.io_bitmap_b = vmcs_read64(IO_BITMAP_B); > + vmx->nested.l1_vmcs = vmx->vmcs; > + vmx->nested.l1_state.cpu = vcpu->cpu; > + vmx->nested.l1_state.launched = vmx->launched; > + > + vmx->vmcs = nested_get_current_vmcs(vcpu); > + if (!vmx->vmcs) { > + printk(KERN_ERR "Missing VMCS\n"); > + set_rflags_to_vmx_fail_valid(vcpu); > + return 1; > + } > + > + vcpu->cpu = vmx->nested.current_l2_page->cpu; > How can this change? It must remain constant between kvm_arch_vcpu_load() and kvm_arch_vcpu_put(). > + vmx->launched = vmx->nested.current_l2_page->launched; > + > + if (!vmx->nested.current_l2_page->launch_state || !vmx->launched) { > + vmcs_clear(vmx->vmcs); > + vmx->launched = 0; > + vmx->nested.current_l2_page->launch_state = 1; > + } > + > + vmx_vcpu_load(vcpu, get_cpu()); > + put_cpu(); > + > + prepare_vmcs_02(vcpu, > + get_shadow_vmcs(vcpu), vmx->nested.l1_shadow_vmcs); > + > + if (get_shadow_vmcs(vcpu)->vm_entry_controls& > + VM_ENTRY_IA32E_MODE) { > + if (!((vcpu->arch.efer& EFER_LMA)&& > + (vcpu->arch.efer& EFER_LME))) > + vcpu->arch.efer |= (EFER_LMA | EFER_LME); > + } else { > + if ((vcpu->arch.efer& EFER_LMA) || > + (vcpu->arch.efer& EFER_LME)) > + vcpu->arch.efer = 0; > + } > + > + /* vmx_set_cr0() sets the cr0 that L2 will read, to be the one that L1 > + * dictated, and takes appropriate actions for special cr0 bits (like > + * real mode, etc.). > + */ > + vmx_set_cr0(vcpu, > + (get_shadow_vmcs(vcpu)->guest_cr0& > + ~get_shadow_vmcs(vcpu)->cr0_guest_host_mask) | > + (get_shadow_vmcs(vcpu)->cr0_read_shadow& > + get_shadow_vmcs(vcpu)->cr0_guest_host_mask)); > + > + /* However, vmx_set_cr0 incorrectly enforces KVM's relationship between > + * GUEST_CR0 and CR0_READ_SHADOW, e.g., that the former is the same as > + * the latter with with TS added if !fpu_active. We need to take the > + * actual GUEST_CR0 that L1 wanted, just with added TS if !fpu_active > + * like KVM wants (for the "lazy fpu" feature, to avoid the costly > + * restoration of fpu registers until the FPU is really used). > + */ > + vmcs_writel(GUEST_CR0, get_shadow_vmcs(vcpu)->guest_cr0 | > + (vcpu->fpu_active ? 0 : X86_CR0_TS)); > Please update vmx_set_cr0() instead. > + > + vmx_set_cr4(vcpu, get_shadow_vmcs(vcpu)->guest_cr4); > Note: kvm_set_cr4() does some stuff that vmx_set_cr4() doesn't. Esp. the kvm_mmu_reset_context(). > + vmcs_writel(CR4_READ_SHADOW, > + get_shadow_vmcs(vcpu)->cr4_read_shadow); > + > + /* we have to set the X86_CR0_PG bit of the cached cr0, because > + * kvm_mmu_reset_context enables paging only if X86_CR0_PG is set in > + * CR0 (we need the paging so that KVM treat this guest as a paging > + * guest so we can easly forward page faults to L1.) > + */ > + vcpu->arch.cr0 |= X86_CR0_PG; > Since this version doesn't support unrestricted nested guests, cr0.pg will be already set or we will have failed vmentry. > + > + if (enable_ept&& !nested_cpu_has_vmx_ept(vcpu)) { > We don't support nested ept yet, yes? > + vmcs_write32(GUEST_CR3, get_shadow_vmcs(vcpu)->guest_cr3); > + vmx->vcpu.arch.cr3 = get_shadow_vmcs(vcpu)->guest_cr3; > Should be via kvm_set_cr3(). > + } else { > + int r; > + kvm_set_cr3(vcpu, get_shadow_vmcs(vcpu)->guest_cr3); > + kvm_mmu_reset_context(vcpu); > + > + nested_unmap_current(vcpu); > + > + r = kvm_mmu_load(vcpu); > Ordinary guest entry will load the mmu. Failures here can only be memory allocation and should not be visible to the guest anyway (we return -ENOMEM to userspace and that's it). > + if (unlikely(r)) { > + printk(KERN_ERR "Error in kvm_mmu_load r %d\n", r); > + set_rflags_to_vmx_fail_valid(vcpu); > + /* switch back to L1 */ > + vmx->nested.nested_mode = 0; > + vmx->vmcs = vmx->nested.l1_vmcs; > + vcpu->cpu = vmx->nested.l1_state.cpu; > + vmx->launched = vmx->nested.l1_state.launched; > + > + vmx_vcpu_load(vcpu, get_cpu()); > + put_cpu(); > + > + return 1; > + } > + > + nested_map_current(vcpu); > + } > + > + kvm_register_write(vcpu, VCPU_REGS_RSP, > + get_shadow_vmcs(vcpu)->guest_rsp); > + kvm_register_write(vcpu, VCPU_REGS_RIP, > + get_shadow_vmcs(vcpu)->guest_rip); > + > + nested_unmap_current(vcpu); > + > + return 1; > +} > + > -- error compiling committee.c: too many arguments to function