kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] KVM/nVMX: Move nested_vmx_check_vmentry_hw inline assembly to vmenter.S
@ 2020-10-07 14:43 Uros Bizjak
  2020-10-13 15:19 ` Sean Christopherson
  0 siblings, 1 reply; 3+ messages in thread
From: Uros Bizjak @ 2020-10-07 14:43 UTC (permalink / raw)
  To: kvm; +Cc: Uros Bizjak, Paolo Bonzini, Sean Christopherson

Move the big inline assembly block from nested_vmx_check_vmentry_hw
to vmenter.S assembly file, taking into account all ABI requirements.

The new function is modelled after __vmx_vcpu_run, and also calls
vmx_update_host_rsp instead of open-coding the function in assembly.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
---
 arch/x86/kvm/vmx/nested.c  | 32 +++-----------------------------
 arch/x86/kvm/vmx/vmenter.S | 36 ++++++++++++++++++++++++++++++++++++
 2 files changed, 39 insertions(+), 29 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 1bb6b31eb646..7b26e983e31c 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3012,6 +3012,8 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
+bool __nested_vmx_check_vmentry_hw(struct vcpu_vmx *vmx, bool launched);
+
 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -3050,35 +3052,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 		vmx->loaded_vmcs->host_state.cr4 = cr4;
 	}
 
-	asm(
-		"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
-		"cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
-		"je 1f \n\t"
-		__ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
-		"mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
-		"1: \n\t"
-		"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
-
-		/* Check if vmlaunch or vmresume is needed */
-		"cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
-
-		/*
-		 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
-		 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
-		 * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
-		 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
-		 */
-		"call vmx_vmenter\n\t"
-
-		CC_SET(be)
-	      : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
-	      :	[HOST_RSP]"r"((unsigned long)HOST_RSP),
-		[loaded_vmcs]"r"(vmx->loaded_vmcs),
-		[launched]"i"(offsetof(struct loaded_vmcs, launched)),
-		[host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
-		[wordsize]"i"(sizeof(ulong))
-	      : "memory"
-	);
+	vm_fail = __nested_vmx_check_vmentry_hw(vmx, vmx->loaded_vmcs->launched);
 
 	if (vmx->msr_autoload.host.nr)
 		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 799db084a336..9fdcbd9320dc 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -234,6 +234,42 @@ SYM_FUNC_START(__vmx_vcpu_run)
 	jmp 1b
 SYM_FUNC_END(__vmx_vcpu_run)
 
+/**
+ * __nested_vmx_check_vmentry_hw - Run a vCPU via a transition to
+ *				   a nested VMX guest mode
+ * @vmx:	struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
+ * @launched:	%true if the VMCS has been launched
+ *
+ * Returns:
+ *	0 on VM-Exit, 1 on VM-Fail
+ */
+SYM_FUNC_START(__nested_vmx_check_vmentry_hw)
+	push %_ASM_BP
+	mov  %_ASM_SP, %_ASM_BP
+
+	push %_ASM_BX
+
+	/* Copy @launched to BL, _ASM_ARG2 is volatile. */
+	mov %_ASM_ARG2B, %bl
+
+	/* Adjust RSP to account for the CALL to vmx_vmenter(). */
+	lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
+	call vmx_update_host_rsp
+
+	/* Check if vmlaunch or vmresume is needed */
+	cmpb $0, %bl
+
+	/* Enter guest mode */
+	call vmx_vmenter
+
+	/* Return 0 on VM-Exit, 1 on VM-Fail */
+	setbe %al
+
+	pop %_ASM_BX
+
+	pop %_ASM_BP
+	ret
+SYM_FUNC_END(__nested_vmx_check_vmentry_hw)
 
 .section .text, "ax"
 
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] KVM/nVMX: Move nested_vmx_check_vmentry_hw inline assembly to vmenter.S
  2020-10-07 14:43 [PATCH] KVM/nVMX: Move nested_vmx_check_vmentry_hw inline assembly to vmenter.S Uros Bizjak
@ 2020-10-13 15:19 ` Sean Christopherson
  2020-10-14 13:33   ` Uros Bizjak
  0 siblings, 1 reply; 3+ messages in thread
From: Sean Christopherson @ 2020-10-13 15:19 UTC (permalink / raw)
  To: Uros Bizjak; +Cc: kvm, Paolo Bonzini

On Wed, Oct 07, 2020 at 04:43:12PM +0200, Uros Bizjak wrote:
> Move the big inline assembly block from nested_vmx_check_vmentry_hw
> to vmenter.S assembly file, taking into account all ABI requirements.
> 
> The new function is modelled after __vmx_vcpu_run, and also calls
> vmx_update_host_rsp instead of open-coding the function in assembly.

Is there specific motivation for this change?  The inline asm is ugly, but
it's contained.

If we really want to get rid of the inline asm, I'd probably vote to simply
use __vmx_vcpu_run() instead of adding another assembly helper.  The (double)
GPR save/restore is wasteful, but this flow is basically anti-performance
anyways.  Outside of KVM developers, I doubt anyone actually enables this path.

> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> ---
>  arch/x86/kvm/vmx/nested.c  | 32 +++-----------------------------
>  arch/x86/kvm/vmx/vmenter.S | 36 ++++++++++++++++++++++++++++++++++++
>  2 files changed, 39 insertions(+), 29 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 1bb6b31eb646..7b26e983e31c 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -3012,6 +3012,8 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
>  	return 0;
>  }
>  
> +bool __nested_vmx_check_vmentry_hw(struct vcpu_vmx *vmx, bool launched);
> +
>  static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> @@ -3050,35 +3052,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
>  		vmx->loaded_vmcs->host_state.cr4 = cr4;
>  	}
>  
> -	asm(
> -		"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
> -		"cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
> -		"je 1f \n\t"
> -		__ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
> -		"mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
> -		"1: \n\t"
> -		"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
> -
> -		/* Check if vmlaunch or vmresume is needed */
> -		"cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
> -
> -		/*
> -		 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
> -		 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
> -		 * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
> -		 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
> -		 */
> -		"call vmx_vmenter\n\t"
> -
> -		CC_SET(be)
> -	      : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
> -	      :	[HOST_RSP]"r"((unsigned long)HOST_RSP),
> -		[loaded_vmcs]"r"(vmx->loaded_vmcs),
> -		[launched]"i"(offsetof(struct loaded_vmcs, launched)),
> -		[host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
> -		[wordsize]"i"(sizeof(ulong))
> -	      : "memory"
> -	);
> +	vm_fail = __nested_vmx_check_vmentry_hw(vmx, vmx->loaded_vmcs->launched);
>  
>  	if (vmx->msr_autoload.host.nr)
>  		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
> diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> index 799db084a336..9fdcbd9320dc 100644
> --- a/arch/x86/kvm/vmx/vmenter.S
> +++ b/arch/x86/kvm/vmx/vmenter.S
> @@ -234,6 +234,42 @@ SYM_FUNC_START(__vmx_vcpu_run)
>  	jmp 1b
>  SYM_FUNC_END(__vmx_vcpu_run)
>  
> +/**
> + * __nested_vmx_check_vmentry_hw - Run a vCPU via a transition to
> + *				   a nested VMX guest mode

This function comment is incorrect, this helper doesn't run the vCPU, it
simply executes VMLAUNCH or VMRESUME, which are expected to fail (and we're
in BUG_ON territory if they don't).

> + * @vmx:	struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
> + * @launched:	%true if the VMCS has been launched
> + *
> + * Returns:
> + *	0 on VM-Exit, 1 on VM-Fail
> + */
> +SYM_FUNC_START(__nested_vmx_check_vmentry_hw)
> +	push %_ASM_BP
> +	mov  %_ASM_SP, %_ASM_BP
> +
> +	push %_ASM_BX
> +
> +	/* Copy @launched to BL, _ASM_ARG2 is volatile. */
> +	mov %_ASM_ARG2B, %bl
> +
> +	/* Adjust RSP to account for the CALL to vmx_vmenter(). */
> +	lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
> +	call vmx_update_host_rsp
> +
> +	/* Check if vmlaunch or vmresume is needed */
> +	cmpb $0, %bl
> +
> +	/* Enter guest mode */
> +	call vmx_vmenter
> +
> +	/* Return 0 on VM-Exit, 1 on VM-Fail */
> +	setbe %al
> +
> +	pop %_ASM_BX
> +
> +	pop %_ASM_BP
> +	ret
> +SYM_FUNC_END(__nested_vmx_check_vmentry_hw)
>  
>  .section .text, "ax"
>  
> -- 
> 2.26.2
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] KVM/nVMX: Move nested_vmx_check_vmentry_hw inline assembly to vmenter.S
  2020-10-13 15:19 ` Sean Christopherson
@ 2020-10-14 13:33   ` Uros Bizjak
  0 siblings, 0 replies; 3+ messages in thread
From: Uros Bizjak @ 2020-10-14 13:33 UTC (permalink / raw)
  To: Sean Christopherson; +Cc: kvm, Paolo Bonzini

On Tue, Oct 13, 2020 at 5:19 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Wed, Oct 07, 2020 at 04:43:12PM +0200, Uros Bizjak wrote:
> > Move the big inline assembly block from nested_vmx_check_vmentry_hw
> > to vmenter.S assembly file, taking into account all ABI requirements.
> >
> > The new function is modelled after __vmx_vcpu_run, and also calls
> > vmx_update_host_rsp instead of open-coding the function in assembly.
>
> Is there specific motivation for this change?  The inline asm is ugly, but
> it's contained.

The motivation was mostly the removal of open-coded asm implementation
of vmx_update_host_rsp, with corresponding asm arguments. Later, I
also noticed, that the resulting asm is surprisingly similar to
__vmx_vcpu_run, so I assumed that it would be nice to put the whole
low-level function to vmenter.S
>
> If we really want to get rid of the inline asm, I'd probably vote to simply
> use __vmx_vcpu_run() instead of adding another assembly helper.  The (double)
> GPR save/restore is wasteful, but this flow is basically anti-performance
> anyways.  Outside of KVM developers, I doubt anyone actually enables this path.

If this is the case, then the removal of a bunch of bytes at the
expense of a few extra cycles is a good deal.

Uros.

> > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > ---
> >  arch/x86/kvm/vmx/nested.c  | 32 +++-----------------------------
> >  arch/x86/kvm/vmx/vmenter.S | 36 ++++++++++++++++++++++++++++++++++++
> >  2 files changed, 39 insertions(+), 29 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> > index 1bb6b31eb646..7b26e983e31c 100644
> > --- a/arch/x86/kvm/vmx/nested.c
> > +++ b/arch/x86/kvm/vmx/nested.c
> > @@ -3012,6 +3012,8 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
> >       return 0;
> >  }
> >
> > +bool __nested_vmx_check_vmentry_hw(struct vcpu_vmx *vmx, bool launched);
> > +
> >  static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
> >  {
> >       struct vcpu_vmx *vmx = to_vmx(vcpu);
> > @@ -3050,35 +3052,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
> >               vmx->loaded_vmcs->host_state.cr4 = cr4;
> >       }
> >
> > -     asm(
> > -             "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
> > -             "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
> > -             "je 1f \n\t"
> > -             __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
> > -             "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
> > -             "1: \n\t"
> > -             "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
> > -
> > -             /* Check if vmlaunch or vmresume is needed */
> > -             "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
> > -
> > -             /*
> > -              * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
> > -              * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
> > -              * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
> > -              * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
> > -              */
> > -             "call vmx_vmenter\n\t"
> > -
> > -             CC_SET(be)
> > -           : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
> > -           : [HOST_RSP]"r"((unsigned long)HOST_RSP),
> > -             [loaded_vmcs]"r"(vmx->loaded_vmcs),
> > -             [launched]"i"(offsetof(struct loaded_vmcs, launched)),
> > -             [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
> > -             [wordsize]"i"(sizeof(ulong))
> > -           : "memory"
> > -     );
> > +     vm_fail = __nested_vmx_check_vmentry_hw(vmx, vmx->loaded_vmcs->launched);
> >
> >       if (vmx->msr_autoload.host.nr)
> >               vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
> > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > index 799db084a336..9fdcbd9320dc 100644
> > --- a/arch/x86/kvm/vmx/vmenter.S
> > +++ b/arch/x86/kvm/vmx/vmenter.S
> > @@ -234,6 +234,42 @@ SYM_FUNC_START(__vmx_vcpu_run)
> >       jmp 1b
> >  SYM_FUNC_END(__vmx_vcpu_run)
> >
> > +/**
> > + * __nested_vmx_check_vmentry_hw - Run a vCPU via a transition to
> > + *                              a nested VMX guest mode
>
> This function comment is incorrect, this helper doesn't run the vCPU, it
> simply executes VMLAUNCH or VMRESUME, which are expected to fail (and we're
> in BUG_ON territory if they don't).
>
> > + * @vmx:     struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
> > + * @launched:        %true if the VMCS has been launched
> > + *
> > + * Returns:
> > + *   0 on VM-Exit, 1 on VM-Fail
> > + */
> > +SYM_FUNC_START(__nested_vmx_check_vmentry_hw)
> > +     push %_ASM_BP
> > +     mov  %_ASM_SP, %_ASM_BP
> > +
> > +     push %_ASM_BX
> > +
> > +     /* Copy @launched to BL, _ASM_ARG2 is volatile. */
> > +     mov %_ASM_ARG2B, %bl
> > +
> > +     /* Adjust RSP to account for the CALL to vmx_vmenter(). */
> > +     lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
> > +     call vmx_update_host_rsp
> > +
> > +     /* Check if vmlaunch or vmresume is needed */
> > +     cmpb $0, %bl
> > +
> > +     /* Enter guest mode */
> > +     call vmx_vmenter
> > +
> > +     /* Return 0 on VM-Exit, 1 on VM-Fail */
> > +     setbe %al
> > +
> > +     pop %_ASM_BX
> > +
> > +     pop %_ASM_BP
> > +     ret
> > +SYM_FUNC_END(__nested_vmx_check_vmentry_hw)
> >
> >  .section .text, "ax"
> >
> > --
> > 2.26.2
> >

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-10-14 13:34 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-07 14:43 [PATCH] KVM/nVMX: Move nested_vmx_check_vmentry_hw inline assembly to vmenter.S Uros Bizjak
2020-10-13 15:19 ` Sean Christopherson
2020-10-14 13:33   ` Uros Bizjak

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).