From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751812AbdE3OeO (ORCPT ); Tue, 30 May 2017 10:34:14 -0400 Received: from mx1.redhat.com ([209.132.183.28]:39066 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751210AbdE3OeL (ORCPT ); Tue, 30 May 2017 10:34:11 -0400 DMARC-Filter: OpenDMARC Filter v1.3.2 mx1.redhat.com 44156C0B39F2 Authentication-Results: ext-mx08.extmail.prod.ext.phx2.redhat.com; dmarc=none (p=none dis=none) header.from=redhat.com Authentication-Results: ext-mx08.extmail.prod.ext.phx2.redhat.com; spf=pass smtp.mailfrom=pbonzini@redhat.com DKIM-Filter: OpenDKIM Filter v2.11.0 mx1.redhat.com 44156C0B39F2 Subject: Re: [PATCH v2] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging To: =?UTF-8?B?UmFkaW0gS3LEjW3DocWZ?= , linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Dan Carpenter References: <20170518173732.12185-2-rkrcmar@redhat.com> <20170519134851.10616-1-rkrcmar@redhat.com> From: Paolo Bonzini Message-ID: <8572fc73-7d3c-8e87-4e3f-d2645e24dbef@redhat.com> Date: Tue, 30 May 2017 16:34:08 +0200 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Thunderbird/52.1.0 MIME-Version: 1.0 In-Reply-To: <20170519134851.10616-1-rkrcmar@redhat.com> Content-Type: text/plain; charset=utf-8 Content-Language: en-US Content-Transfer-Encoding: 8bit X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.32]); Tue, 30 May 2017 14:34:11 +0000 (UTC) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On 19/05/2017 15:48, Radim Krčmář wrote: > kvm_skip_emulated_instruction() will return 0 if userspace is > single-stepping the guest. > > kvm_skip_emulated_instruction() uses return status convention of exit > handler: 0 means "exit to userspace" and 1 means "continue vm entries". > The problem is that nested_vmx_check_vmptr() return status means > something else: 0 is ok, 1 is error. > > This means we would continue executing after a failure. Static checker > noticed it because vmptr was not initialized. > > Reported-by: Dan Carpenter > Fixes: 6affcbedcac7 ("KVM: x86: Add kvm_skip_emulated_instruction and use it.") > Signed-off-by: Radim Krčmář > --- > Second try -- moves a lot of code around to make it less ugly and keep > the same behavior as v1, hopefully. > > arch/x86/kvm/vmx.c | 140 ++++++++++++++++++++++------------------------------- > 1 file changed, 57 insertions(+), 83 deletions(-) > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index 9ff1b04502c9..dd274db9bf77 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, > return 0; > } > > -/* > - * This function performs the various checks including > - * - if it's 4KB aligned > - * - No bits beyond the physical address width are set > - * - Returns 0 on success or else 1 > - * (Intel SDM Section 30.3) > - */ > -static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, > - gpa_t *vmpointer) > +static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) > { > gva_t gva; > - gpa_t vmptr; > struct x86_exception e; > - struct page *page; > - struct vcpu_vmx *vmx = to_vmx(vcpu); > - int maxphyaddr = cpuid_maxphyaddr(vcpu); > > if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), > vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) > return 1; > > - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, > - sizeof(vmptr), &e)) { > + if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, > + sizeof(*vmpointer), &e)) { > kvm_inject_page_fault(vcpu, &e); > return 1; > } > > - switch (exit_reason) { > - case EXIT_REASON_VMON: > - /* > - * SDM 3: 24.11.5 > - * The first 4 bytes of VMXON region contain the supported > - * VMCS revision identifier > - * > - * Note - IA32_VMX_BASIC[48] will never be 1 > - * for the nested case; > - * which replaces physical address width with 32 > - * > - */ > - if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { > - nested_vmx_failInvalid(vcpu); > - return kvm_skip_emulated_instruction(vcpu); > - } > - > - page = nested_get_page(vcpu, vmptr); > - if (page == NULL) { > - nested_vmx_failInvalid(vcpu); > - return kvm_skip_emulated_instruction(vcpu); > - } > - if (*(u32 *)kmap(page) != VMCS12_REVISION) { > - kunmap(page); > - nested_release_page_clean(page); > - nested_vmx_failInvalid(vcpu); > - return kvm_skip_emulated_instruction(vcpu); > - } > - kunmap(page); > - nested_release_page_clean(page); > - vmx->nested.vmxon_ptr = vmptr; > - break; > - case EXIT_REASON_VMCLEAR: > - if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { > - nested_vmx_failValid(vcpu, > - VMXERR_VMCLEAR_INVALID_ADDRESS); > - return kvm_skip_emulated_instruction(vcpu); > - } > - > - if (vmptr == vmx->nested.vmxon_ptr) { > - nested_vmx_failValid(vcpu, > - VMXERR_VMCLEAR_VMXON_POINTER); > - return kvm_skip_emulated_instruction(vcpu); > - } > - break; > - case EXIT_REASON_VMPTRLD: > - if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { > - nested_vmx_failValid(vcpu, > - VMXERR_VMPTRLD_INVALID_ADDRESS); > - return kvm_skip_emulated_instruction(vcpu); > - } > - > - if (vmptr == vmx->nested.vmxon_ptr) { > - nested_vmx_failValid(vcpu, > - VMXERR_VMPTRLD_VMXON_POINTER); > - return kvm_skip_emulated_instruction(vcpu); > - } > - break; > - default: > - return 1; /* shouldn't happen */ > - } > - > - if (vmpointer) > - *vmpointer = vmptr; > return 0; > } > > @@ -7066,6 +6990,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) > static int handle_vmon(struct kvm_vcpu *vcpu) > { > int ret; > + gpa_t vmptr; > + struct page *page; > struct vcpu_vmx *vmx = to_vmx(vcpu); > const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED > | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; > @@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu) > return 1; > } > > - if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) > + if (nested_vmx_get_vmptr(vcpu, &vmptr)) > return 1; > - > + > + /* > + * SDM 3: 24.11.5 > + * The first 4 bytes of VMXON region contain the supported > + * VMCS revision identifier > + * > + * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; > + * which replaces physical address width with 32 > + */ > + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { > + nested_vmx_failInvalid(vcpu); > + return kvm_skip_emulated_instruction(vcpu); > + } > + > + page = nested_get_page(vcpu, vmptr); > + if (page == NULL) { > + nested_vmx_failInvalid(vcpu); > + return kvm_skip_emulated_instruction(vcpu); > + } > + if (*(u32 *)kmap(page) != VMCS12_REVISION) { > + kunmap(page); > + nested_release_page_clean(page); > + nested_vmx_failInvalid(vcpu); > + return kvm_skip_emulated_instruction(vcpu); > + } > + kunmap(page); > + nested_release_page_clean(page); > + > + vmx->nested.vmxon_ptr = vmptr; > ret = enter_vmx_operation(vcpu); > if (ret) > return ret; > @@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) > if (!nested_vmx_check_permission(vcpu)) > return 1; > > - if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) > + if (nested_vmx_get_vmptr(vcpu, &vmptr)) > return 1; > > + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { > + nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); > + return kvm_skip_emulated_instruction(vcpu); > + } > + > + if (vmptr == vmx->nested.vmxon_ptr) { > + nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); > + return kvm_skip_emulated_instruction(vcpu); > + } > + > if (vmptr == vmx->nested.current_vmptr) > nested_release_vmcs12(vmx); > > @@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) > if (!nested_vmx_check_permission(vcpu)) > return 1; > > - if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) > + if (nested_vmx_get_vmptr(vcpu, &vmptr)) > return 1; > > + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { > + nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); > + return kvm_skip_emulated_instruction(vcpu); > + } > + > + if (vmptr == vmx->nested.vmxon_ptr) { > + nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); > + return kvm_skip_emulated_instruction(vcpu); > + } > + > if (vmx->nested.current_vmptr != vmptr) { > struct vmcs12 *new_vmcs12; > struct page *page; > Nice, diffstat speaks for itself. Queuing it. Thanks, Paolo