All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] SVM: re-work VMCB sync-ing
@ 2018-04-26 12:01 Jan Beulich
  2018-04-26 12:43 ` Andrew Cooper
  0 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2018-04-26 12:01 UTC (permalink / raw)
  To: xen-devel
  Cc: Juergen Gross, Andrew Cooper, Boris Ostrovsky, Razvan Cojocaru,
	Suravee Suthikulpanit

While the main problem to be addressed here is the issue of what so far
was named "vmcb_in_sync" starting out with the wrong value (should have
been true instead of false, to prevent performing a VMSAVE without ever
having VMLOADed the vCPU's state), go a step further and make the
sync-ed state a tristate: CPU and memory may be in sync or an update
may be required in either direction. Rename the field and introduce an
enum. Callers of svm_sync_vmcb() now indicate the intended new state.
With that, there's no need to VMLOAD the state perhaps multiple times;
all that's needed is loading it once before VM entry.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
I've been considering to put the VMLOAD invocation in
svm_asid_handle_vmrun() (instead of the two copies in svm_do_resume()
and svm_vmexit_handler()), but that seemed a little too abusive of the
function. Perhaps a more general helper function would be wanted, but
perhaps that's something to be done when we've decided whether to put
both VMSAVE and VMLOAD inside the CLGI protected region.
I'm also not really certain about svm_vmexit_do_vmload(): All I'm doing
here is a 1:1 change from previous behavior, but I'm unconvinced this
was/is really correct.

--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -112,7 +112,6 @@ UNLIKELY_END(svm_trace)
         /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
 
         mov  VCPU_svm_vmcb(%rbx),%rcx
-        movb $0,VCPU_svm_vmcb_in_sync(%rbx)
         mov  VMCB_rax(%rcx),%rax
         mov  %rax,UREGS_rax(%rsp)
         mov  VMCB_rip(%rcx),%rax
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -680,16 +680,15 @@ static void svm_cpuid_policy_changed(str
                       cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
 }
 
-static void svm_sync_vmcb(struct vcpu *v)
+static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
 {
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
 
-    if ( arch_svm->vmcb_in_sync )
-        return;
-
-    arch_svm->vmcb_in_sync = 1;
+    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
+        svm_vmsave(arch_svm->vmcb);
 
-    svm_vmsave(arch_svm->vmcb);
+    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
+        arch_svm->vmcb_sync_state = new_state;
 }
 
 static unsigned int svm_get_cpl(struct vcpu *v)
@@ -707,7 +706,7 @@ static void svm_get_segment_register(str
     switch ( seg )
     {
     case x86_seg_fs ... x86_seg_gs:
-        svm_sync_vmcb(v);
+        svm_sync_vmcb(v, vmcb_in_sync);
 
         /* Fallthrough. */
     case x86_seg_es ... x86_seg_ds:
@@ -718,7 +717,7 @@ static void svm_get_segment_register(str
         break;
 
     case x86_seg_tr:
-        svm_sync_vmcb(v);
+        svm_sync_vmcb(v, vmcb_in_sync);
         *reg = vmcb->tr;
         break;
 
@@ -731,7 +730,7 @@ static void svm_get_segment_register(str
         break;
 
     case x86_seg_ldtr:
-        svm_sync_vmcb(v);
+        svm_sync_vmcb(v, vmcb_in_sync);
         *reg = vmcb->ldtr;
         break;
 
@@ -746,7 +745,6 @@ static void svm_set_segment_register(str
                                      struct segment_register *reg)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    bool sync = false;
 
     ASSERT((v == current) || !vcpu_runnable(v));
 
@@ -768,7 +766,8 @@ static void svm_set_segment_register(str
     case x86_seg_gs:
     case x86_seg_tr:
     case x86_seg_ldtr:
-        sync = (v == current);
+        if ( v == current )
+            svm_sync_vmcb(v, vmcb_needs_vmload);
         break;
 
     default:
@@ -777,9 +776,6 @@ static void svm_set_segment_register(str
         return;
     }
 
-    if ( sync )
-        svm_sync_vmcb(v);
-
     switch ( seg )
     {
     case x86_seg_ss:
@@ -813,9 +809,6 @@ static void svm_set_segment_register(str
         ASSERT_UNREACHABLE();
         break;
     }
-
-    if ( sync )
-        svm_vmload(vmcb);
 }
 
 static unsigned long svm_get_shadow_gs_base(struct vcpu *v)
@@ -1086,7 +1079,7 @@ static void svm_ctxt_switch_from(struct
     svm_lwp_save(v);
     svm_tsc_ratio_save(v);
 
-    svm_sync_vmcb(v);
+    svm_sync_vmcb(v, vmcb_needs_vmload);
     svm_vmload_pa(per_cpu(host_vmcb, cpu));
 
     /* Resume use of ISTs now that the host TR is reinstated. */
@@ -1114,7 +1107,6 @@ static void svm_ctxt_switch_to(struct vc
     svm_restore_dr(v);
 
     svm_vmsave_pa(per_cpu(host_vmcb, cpu));
-    svm_vmload(vmcb);
     vmcb->cleanbits.bytes = 0;
     svm_lwp_load(v);
     svm_tsc_ratio_load(v);
@@ -1168,6 +1160,12 @@ static void noreturn svm_do_resume(struc
 
     hvm_do_resume(v);
 
+    if ( v->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
+    {
+        svm_vmload(vmcb);
+        v->arch.hvm_svm.vmcb_sync_state = vmcb_in_sync;
+    }
+
     reset_stack_and_jump(svm_asm_do_resume);
 }
 
@@ -1895,7 +1893,7 @@ static int svm_msr_read_intercept(unsign
     case MSR_FS_BASE:
     case MSR_GS_BASE:
     case MSR_SHADOW_GS_BASE:
-        svm_sync_vmcb(v);
+        svm_sync_vmcb(v, vmcb_in_sync);
         break;
     }
 
@@ -2067,7 +2065,6 @@ static int svm_msr_write_intercept(unsig
     int ret, result = X86EMUL_OKAY;
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    bool sync = false;
 
     switch ( msr )
     {
@@ -2081,13 +2078,10 @@ static int svm_msr_write_intercept(unsig
     case MSR_FS_BASE:
     case MSR_GS_BASE:
     case MSR_SHADOW_GS_BASE:
-        sync = true;
+        svm_sync_vmcb(v, vmcb_needs_vmload);
         break;
     }
 
-    if ( sync )
-        svm_sync_vmcb(v);
-
     switch ( msr )
     {
     case MSR_IA32_SYSENTER_ESP:
@@ -2261,9 +2255,6 @@ static int svm_msr_write_intercept(unsig
         break;
     }
 
-    if ( sync )
-        svm_vmload(vmcb);
-
     return result;
 
  gpf:
@@ -2413,7 +2404,7 @@ svm_vmexit_do_vmload(struct vmcb_struct
     put_page(page);
 
     /* State in L1 VMCB is stale now */
-    v->arch.hvm_svm.vmcb_in_sync = 0;
+    v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
 
     __update_guest_eip(regs, inst_len);
 }
@@ -2623,6 +2614,7 @@ void svm_vmexit_handler(struct cpu_user_
     bool_t vcpu_guestmode = 0;
     struct vlapic *vlapic = vcpu_vlapic(v);
 
+    v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
     hvm_invalidate_regs_fields(regs);
 
     if ( paging_mode_hap(v->domain) )
@@ -3109,6 +3101,12 @@ void svm_vmexit_handler(struct cpu_user_
     }
 
   out:
+    if ( v->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
+    {
+        svm_vmload(vmcb);
+        v->arch.hvm_svm.vmcb_sync_state = vmcb_in_sync;
+    }
+
     if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) )
         return;
 
@@ -3117,6 +3115,7 @@ void svm_vmexit_handler(struct cpu_user_
     intr.fields.tpr =
         (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
     vmcb_set_vintr(vmcb, intr);
+    ASSERT(v->arch.hvm_svm.vmcb_sync_state != vmcb_needs_vmload);
 }
 
 void svm_trace_vmentry(void)
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -84,6 +84,8 @@ static int construct_vmcb(struct vcpu *v
                              CR_INTERCEPT_CR8_READ |
                              CR_INTERCEPT_CR8_WRITE);
 
+    arch_svm->vmcb_sync_state = vmcb_needs_vmload;
+
     /* I/O and MSR permission bitmaps. */
     arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
     if ( arch_svm->msrpm == NULL )
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -102,7 +102,6 @@ void __dummy__(void)
 
     OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
     OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
-    OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
     BLANK();
 
     OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -495,12 +495,19 @@ struct vmcb_struct {
 struct svm_domain {
 };
 
+enum vmcb_sync_state {
+    vmcb_in_sync,
+    vmcb_needs_vmsave,    /* VMCB out of sync (VMSAVE needed)? */
+    vmcb_needs_vmload     /* VMCB dirty (VMLOAD needed)? */
+};
+
 struct arch_svm_struct {
     struct vmcb_struct *vmcb;
     u64    vmcb_pa;
     unsigned long *msrpm;
     int    launch_core;
-    bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
+
+    uint8_t vmcb_sync_state; /* enum vmcb_sync_state */
 
     /* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
     uint8_t cached_insn_len; /* Zero if no cached instruction. */


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] SVM: re-work VMCB sync-ing
  2018-04-26 12:01 [PATCH] SVM: re-work VMCB sync-ing Jan Beulich
@ 2018-04-26 12:43 ` Andrew Cooper
  2018-04-26 13:33   ` Jan Beulich
  0 siblings, 1 reply; 9+ messages in thread
From: Andrew Cooper @ 2018-04-26 12:43 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Juergen Gross, Boris Ostrovsky, Razvan Cojocaru, Suravee Suthikulpanit

On 26/04/18 13:01, Jan Beulich wrote:
> While the main problem to be addressed here is the issue of what so far
> was named "vmcb_in_sync" starting out with the wrong value (should have
> been true instead of false, to prevent performing a VMSAVE without ever
> having VMLOADed the vCPU's state), go a step further and make the
> sync-ed state a tristate: CPU and memory may be in sync or an update
> may be required in either direction. Rename the field and introduce an
> enum. Callers of svm_sync_vmcb() now indicate the intended new state.
> With that, there's no need to VMLOAD the state perhaps multiple times;
> all that's needed is loading it once before VM entry.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> I've been considering to put the VMLOAD invocation in
> svm_asid_handle_vmrun() (instead of the two copies in svm_do_resume()
> and svm_vmexit_handler()), but that seemed a little too abusive of the
> function. Perhaps a more general helper function would be wanted, but
> perhaps that's something to be done when we've decided whether to put
> both VMSAVE and VMLOAD inside the CLGI protected region.
> I'm also not really certain about svm_vmexit_do_vmload(): All I'm doing
> here is a 1:1 change from previous behavior, but I'm unconvinced this
> was/is really correct.

FWIW, In my not-yet-complete patch for the issue, I'd gone with

@@ -90,7 +91,14 @@ UNLIKELY_END(svm_trace)
         pop  %r13
         pop  %r12
         pop  %rbp
+
         mov  VCPU_svm_vmcb_pa(%rbx),%rax
+        cmpb $0, VCPU_svm_vmload_needed(%rbx)
+        je  1f
+        VMLOAD
+        movb $0, VCPU_svm_vmload_needed(%rbx)
+1:
+
         pop  %rbx
         pop  %r11
         pop  %r10

(albeit with a slightly different state structure) and was considering
putting the VMLOAD in an unlikely section to get the static prediction
the correct way around.

> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -680,16 +680,15 @@ static void svm_cpuid_policy_changed(str
>                        cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
>  }
>  
> -static void svm_sync_vmcb(struct vcpu *v)
> +static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
>  {
>      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
>  
> -    if ( arch_svm->vmcb_in_sync )
> -        return;
> -
> -    arch_svm->vmcb_in_sync = 1;
> +    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
> +        svm_vmsave(arch_svm->vmcb);
>  
> -    svm_vmsave(arch_svm->vmcb);
> +    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
> +        arch_svm->vmcb_sync_state = new_state;

This is slightly awkward for a couple of reasons.  First, passing
vmcb_in_sync in forget the fact that a vmload is needed.

In my patch, I introduced svm_sync_vmcb_for_update(), rather than
requiring a parameter to be passed in.  I think this is a better API,
and it shrinks the size of the patch.

Also, we currently need segment register codepaths to work from
non-current context, and will very shortly want the MSR paths to do the
same.

By putting an ASSERT(v == current) beside the
svm_vmsave(arch_svm->vmcb), we can drop the (v == current) check in the
callers, and catch corner cases where state isn't in sync for a remote
update.

> @@ -1086,7 +1079,7 @@ static void svm_ctxt_switch_from(struct
>      svm_lwp_save(v);
>      svm_tsc_ratio_save(v);
>  
> -    svm_sync_vmcb(v);
> +    svm_sync_vmcb(v, vmcb_needs_vmload);
>      svm_vmload_pa(per_cpu(host_vmcb, cpu));
>  
>      /* Resume use of ISTs now that the host TR is reinstated. */
> @@ -1114,7 +1107,6 @@ static void svm_ctxt_switch_to(struct vc
>      svm_restore_dr(v);
>  
>      svm_vmsave_pa(per_cpu(host_vmcb, cpu));

As an observation, this vmsave isn't needed.  All state in host_vmcb is
either constant after the AP bringup-path, or loaded properly in the PV
ctxt_switch_to() path.

This can be some future perf improvements though.

> -    svm_vmload(vmcb);
>      vmcb->cleanbits.bytes = 0;
>      svm_lwp_load(v);
>      svm_tsc_ratio_load(v);
> --- a/xen/include/asm-x86/hvm/svm/vmcb.h
> +++ b/xen/include/asm-x86/hvm/svm/vmcb.h
> @@ -495,12 +495,19 @@ struct vmcb_struct {
>  struct svm_domain {
>  };
>  
> +enum vmcb_sync_state {
> +    vmcb_in_sync,
> +    vmcb_needs_vmsave,    /* VMCB out of sync (VMSAVE needed)? */
> +    vmcb_needs_vmload     /* VMCB dirty (VMLOAD needed)? */
> +};

If I were you, I'd go with

enum vmcb_sync_state {
    vmcb_needs_vmsave,
    vmcb_in_sync,
    vmcb_needs_vmload,
};

So it numerically follows the logical progression.

> +
>  struct arch_svm_struct {
>      struct vmcb_struct *vmcb;
>      u64    vmcb_pa;
>      unsigned long *msrpm;
>      int    launch_core;
> -    bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
> +

/*
 * VMRUN doesn't switch fs/gs/tr/ldtr and SHADOWGS/SYSCALL/SYSENTER state.
 * Therefore, guest state is in the hardware registers when servicing a
 * VMExit.
 *
 * Immediately after a VMExit, the vmcb is stale, and needs to be brought
 * into sync by VMSAVE.  If state in the vmcb is modified, a VMLOAD is
 * needed before the following VMRUN.
 */

~Andrew

> +    uint8_t vmcb_sync_state; /* enum vmcb_sync_state */
>  
>      /* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
>      uint8_t cached_insn_len; /* Zero if no cached instruction. */
>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] SVM: re-work VMCB sync-ing
  2018-04-26 12:43 ` Andrew Cooper
@ 2018-04-26 13:33   ` Jan Beulich
  2018-04-26 15:20     ` Boris Ostrovsky
  0 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2018-04-26 13:33 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Juergen Gross, xen-devel, Boris Ostrovsky, Razvan Cojocaru,
	Suravee Suthikulpanit

>>> On 26.04.18 at 14:43, <andrew.cooper3@citrix.com> wrote:
> FWIW, In my not-yet-complete patch for the issue, I'd gone with
> 
> @@ -90,7 +91,14 @@ UNLIKELY_END(svm_trace)
>          pop  %r13
>          pop  %r12
>          pop  %rbp
> +
>          mov  VCPU_svm_vmcb_pa(%rbx),%rax
> +        cmpb $0, VCPU_svm_vmload_needed(%rbx)
> +        je  1f
> +        VMLOAD
> +        movb $0, VCPU_svm_vmload_needed(%rbx)
> +1:
> +
>          pop  %rbx
>          pop  %r11
>          pop  %r10
> 
> (albeit with a slightly different state structure) and was considering
> putting the VMLOAD in an unlikely section to get the static prediction
> the correct way around.

Well, I'd really like to avoid putting this in assembly. In fact I've meanwhile
coded up a follow-on patch introducing svm_vmenter_helper(), where
this as well as the register value copying (currently also done in
assembly for no reason) now lives.

>> --- a/xen/arch/x86/hvm/svm/svm.c
>> +++ b/xen/arch/x86/hvm/svm/svm.c
>> @@ -680,16 +680,15 @@ static void svm_cpuid_policy_changed(str
>>                        cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
>>  }
>>  
>> -static void svm_sync_vmcb(struct vcpu *v)
>> +static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
>>  {
>>      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
>>  
>> -    if ( arch_svm->vmcb_in_sync )
>> -        return;
>> -
>> -    arch_svm->vmcb_in_sync = 1;
>> +    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
>> +        svm_vmsave(arch_svm->vmcb);
>>  
>> -    svm_vmsave(arch_svm->vmcb);
>> +    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
>> +        arch_svm->vmcb_sync_state = new_state;
> 
> This is slightly awkward for a couple of reasons.  First, passing
> vmcb_in_sync in forget the fact that a vmload is needed.

Certainly not - that's the purpose of the if() around it.

> In my patch, I introduced svm_sync_vmcb_for_update(), rather than
> requiring a parameter to be passed in.  I think this is a better API,
> and it shrinks the size of the patch.

I'm not convinced of the "better", and even less so of the "shrinks". But
I'll wait to see what the SVM maintainers say.

> Also, we currently need segment register codepaths to work from
> non-current context, and will very shortly want the MSR paths to do the
> same.
> 
> By putting an ASSERT(v == current) beside the
> svm_vmsave(arch_svm->vmcb), we can drop the (v == current) check in the
> callers, and catch corner cases where state isn't in sync for a remote
> update.

That's an option, but imo not a requrirement.

>> @@ -1086,7 +1079,7 @@ static void svm_ctxt_switch_from(struct
>>      svm_lwp_save(v);
>>      svm_tsc_ratio_save(v);
>>  
>> -    svm_sync_vmcb(v);
>> +    svm_sync_vmcb(v, vmcb_needs_vmload);
>>      svm_vmload_pa(per_cpu(host_vmcb, cpu));
>>  
>>      /* Resume use of ISTs now that the host TR is reinstated. */
>> @@ -1114,7 +1107,6 @@ static void svm_ctxt_switch_to(struct vc
>>      svm_restore_dr(v);
>>  
>>      svm_vmsave_pa(per_cpu(host_vmcb, cpu));
> 
> As an observation, this vmsave isn't needed.  All state in host_vmcb is
> either constant after the AP bringup-path, or loaded properly in the PV
> ctxt_switch_to() path.
> 
> This can be some future perf improvements though.

Yeah, I was wondering about the need for this, but since it's orthogonal
to the purpose of the patch, I didn't want to touch it here.

>> --- a/xen/include/asm-x86/hvm/svm/vmcb.h
>> +++ b/xen/include/asm-x86/hvm/svm/vmcb.h
>> @@ -495,12 +495,19 @@ struct vmcb_struct {
>>  struct svm_domain {
>>  };
>>  
>> +enum vmcb_sync_state {
>> +    vmcb_in_sync,
>> +    vmcb_needs_vmsave,    /* VMCB out of sync (VMSAVE needed)? */
>> +    vmcb_needs_vmload     /* VMCB dirty (VMLOAD needed)? */
>> +};
> 
> If I were you, I'd go with
> 
> enum vmcb_sync_state {
>     vmcb_needs_vmsave,
>     vmcb_in_sync,
>     vmcb_needs_vmload,
> };
> 
> So it numerically follows the logical progression.

Much depends on what you consider the "beginning" state. It could
equally well be the inverse of what you suggest, taking the initial
VMCB as the first step in the life cycle.

>> +
>>  struct arch_svm_struct {
>>      struct vmcb_struct *vmcb;
>>      u64    vmcb_pa;
>>      unsigned long *msrpm;
>>      int    launch_core;
>> -    bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
>> +
> 
> /*
>  * VMRUN doesn't switch fs/gs/tr/ldtr and SHADOWGS/SYSCALL/SYSENTER state.
>  * Therefore, guest state is in the hardware registers when servicing a
>  * VMExit.
>  *
>  * Immediately after a VMExit, the vmcb is stale, and needs to be brought
>  * into sync by VMSAVE.  If state in the vmcb is modified, a VMLOAD is
>  * needed before the following VMRUN.
>  */

Well, yes, I can certainly add this, but if I do, then next to the enum I think.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] SVM: re-work VMCB sync-ing
  2018-04-26 13:33   ` Jan Beulich
@ 2018-04-26 15:20     ` Boris Ostrovsky
  2018-04-26 15:55       ` Jan Beulich
  0 siblings, 1 reply; 9+ messages in thread
From: Boris Ostrovsky @ 2018-04-26 15:20 UTC (permalink / raw)
  To: Jan Beulich, Andrew Cooper
  Cc: Juergen Gross, xen-devel, Razvan Cojocaru, Suravee Suthikulpanit

On 04/26/2018 09:33 AM, Jan Beulich wrote:
>>> -static void svm_sync_vmcb(struct vcpu *v)
>>> +static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
>>>  {
>>>      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
>>>  
>>> -    if ( arch_svm->vmcb_in_sync )
>>> -        return;
>>> -
>>> -    arch_svm->vmcb_in_sync = 1;
>>> +    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
>>> +        svm_vmsave(arch_svm->vmcb);
>>>  
>>> -    svm_vmsave(arch_svm->vmcb);
>>> +    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
>>> +        arch_svm->vmcb_sync_state = new_state;
>> This is slightly awkward for a couple of reasons.  First, passing
>> vmcb_in_sync in forget the fact that a vmload is needed.
> Certainly not - that's the purpose of the if() around it.
>
>> In my patch, I introduced svm_sync_vmcb_for_update(), rather than
>> requiring a parameter to be passed in.  I think this is a better API,
>> and it shrinks the size of the patch.
> I'm not convinced of the "better", and even less so of the "shrinks". But
> I'll wait to see what the SVM maintainers say.


I think a single function is better. In fact, I was wondering whether
svm_vmload() could also be folded into svm_sync_vmcb() since it is also
a syncing operation.

-boris




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] SVM: re-work VMCB sync-ing
  2018-04-26 15:20     ` Boris Ostrovsky
@ 2018-04-26 15:55       ` Jan Beulich
  2018-04-26 17:27         ` Boris Ostrovsky
  0 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2018-04-26 15:55 UTC (permalink / raw)
  To: Boris Ostrovsky
  Cc: Juergen Gross, Andrew Cooper, Razvan Cojocaru,
	Suravee Suthikulpanit, xen-devel

>>> On 26.04.18 at 17:20, <boris.ostrovsky@oracle.com> wrote:
> On 04/26/2018 09:33 AM, Jan Beulich wrote:
>>>> -static void svm_sync_vmcb(struct vcpu *v)
>>>> +static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
>>>>  {
>>>>      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
>>>>  
>>>> -    if ( arch_svm->vmcb_in_sync )
>>>> -        return;
>>>> -
>>>> -    arch_svm->vmcb_in_sync = 1;
>>>> +    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
>>>> +        svm_vmsave(arch_svm->vmcb);
>>>>  
>>>> -    svm_vmsave(arch_svm->vmcb);
>>>> +    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
>>>> +        arch_svm->vmcb_sync_state = new_state;
>>> This is slightly awkward for a couple of reasons.  First, passing
>>> vmcb_in_sync in forget the fact that a vmload is needed.
>> Certainly not - that's the purpose of the if() around it.
>>
>>> In my patch, I introduced svm_sync_vmcb_for_update(), rather than
>>> requiring a parameter to be passed in.  I think this is a better API,
>>> and it shrinks the size of the patch.
>> I'm not convinced of the "better", and even less so of the "shrinks". But
>> I'll wait to see what the SVM maintainers say.
> 
> 
> I think a single function is better. In fact, I was wondering whether
> svm_vmload() could also be folded into svm_sync_vmcb() since it is also
> a syncing operation.

That doesn't look like it would produce a usable interface: How would
you envision the state transition to be specified by the caller? Right
now the intended new state gets passed in, but in your model
vmcb_in_sync could mean either vmload or vmsave is needed. The
two svm_vmload() uses right now would pass that value in addition
to the svm_sync_vmcb() calls already doing so. And the function
couldn't tell what to do from the current state (if it's
vmcb_needs_vmload, a load is only needed in the cases where
svm_vmload() is called right now). Adding a 3rd parameter or a
second enum doesn't look like a good idea either. But maybe I'm not
seeing your intentions; I'm certainly open to (more specific)
suggestions.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] SVM: re-work VMCB sync-ing
  2018-04-26 15:55       ` Jan Beulich
@ 2018-04-26 17:27         ` Boris Ostrovsky
  2018-04-27 15:52           ` Jan Beulich
  0 siblings, 1 reply; 9+ messages in thread
From: Boris Ostrovsky @ 2018-04-26 17:27 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Juergen Gross, Andrew Cooper, Razvan Cojocaru,
	Suravee Suthikulpanit, xen-devel

On 04/26/2018 11:55 AM, Jan Beulich wrote:
>>>> On 26.04.18 at 17:20, <boris.ostrovsky@oracle.com> wrote:
>> On 04/26/2018 09:33 AM, Jan Beulich wrote:
>>>>> -static void svm_sync_vmcb(struct vcpu *v)
>>>>> +static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
>>>>>  {
>>>>>      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
>>>>>  
>>>>> -    if ( arch_svm->vmcb_in_sync )
>>>>> -        return;
>>>>> -
>>>>> -    arch_svm->vmcb_in_sync = 1;
>>>>> +    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
>>>>> +        svm_vmsave(arch_svm->vmcb);
>>>>>  
>>>>> -    svm_vmsave(arch_svm->vmcb);
>>>>> +    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
>>>>> +        arch_svm->vmcb_sync_state = new_state;
>>>> This is slightly awkward for a couple of reasons.  First, passing
>>>> vmcb_in_sync in forget the fact that a vmload is needed.
>>> Certainly not - that's the purpose of the if() around it.
>>>
>>>> In my patch, I introduced svm_sync_vmcb_for_update(), rather than
>>>> requiring a parameter to be passed in.  I think this is a better API,
>>>> and it shrinks the size of the patch.
>>> I'm not convinced of the "better", and even less so of the "shrinks". But
>>> I'll wait to see what the SVM maintainers say.
>>
>> I think a single function is better. In fact, I was wondering whether
>> svm_vmload() could also be folded into svm_sync_vmcb() since it is also
>> a syncing operation.
> That doesn't look like it would produce a usable interface: How would
> you envision the state transition to be specified by the caller? Right
> now the intended new state gets passed in, but in your model
> vmcb_in_sync could mean either vmload or vmsave is needed. The
> two svm_vmload() uses right now would pass that value in addition
> to the svm_sync_vmcb() calls already doing so. And the function
> couldn't tell what to do from the current state (if it's
> vmcb_needs_vmload, a load is only needed in the cases where
> svm_vmload() is called right now). Adding a 3rd parameter or a
> second enum 

I was thinking about another enum value, e.g. sync_to_cpu (and
sync_to_vmcb replacing vmcb_needs_vmsave).

This will allow us to hide (v->arch.hvm_svm.vmcb_sync_state ==
vmcb_needs_vmload) test.


-boris


> doesn't look like a good idea either. But maybe I'm not
> seeing your intentions; I'm certainly open to (more specific)
> suggestions.
>
> Jan
>
>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] SVM: re-work VMCB sync-ing
  2018-04-26 17:27         ` Boris Ostrovsky
@ 2018-04-27 15:52           ` Jan Beulich
  2018-04-27 18:29             ` Boris Ostrovsky
  0 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2018-04-27 15:52 UTC (permalink / raw)
  To: Boris Ostrovsky
  Cc: Juergen Gross, Andrew Cooper, Razvan Cojocaru,
	Suravee Suthikulpanit, xen-devel

>>> On 26.04.18 at 19:27, <boris.ostrovsky@oracle.com> wrote:
> On 04/26/2018 11:55 AM, Jan Beulich wrote:
>>>>> On 26.04.18 at 17:20, <boris.ostrovsky@oracle.com> wrote:
>>> On 04/26/2018 09:33 AM, Jan Beulich wrote:
>>>>>> -static void svm_sync_vmcb(struct vcpu *v)
>>>>>> +static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
>>>>>>  {
>>>>>>      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
>>>>>>  
>>>>>> -    if ( arch_svm->vmcb_in_sync )
>>>>>> -        return;
>>>>>> -
>>>>>> -    arch_svm->vmcb_in_sync = 1;
>>>>>> +    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
>>>>>> +        svm_vmsave(arch_svm->vmcb);
>>>>>>  
>>>>>> -    svm_vmsave(arch_svm->vmcb);
>>>>>> +    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
>>>>>> +        arch_svm->vmcb_sync_state = new_state;
>>>>> This is slightly awkward for a couple of reasons.  First, passing
>>>>> vmcb_in_sync in forget the fact that a vmload is needed.
>>>> Certainly not - that's the purpose of the if() around it.
>>>>
>>>>> In my patch, I introduced svm_sync_vmcb_for_update(), rather than
>>>>> requiring a parameter to be passed in.  I think this is a better API,
>>>>> and it shrinks the size of the patch.
>>>> I'm not convinced of the "better", and even less so of the "shrinks". But
>>>> I'll wait to see what the SVM maintainers say.
>>>
>>> I think a single function is better. In fact, I was wondering whether
>>> svm_vmload() could also be folded into svm_sync_vmcb() since it is also
>>> a syncing operation.
>> That doesn't look like it would produce a usable interface: How would
>> you envision the state transition to be specified by the caller? Right
>> now the intended new state gets passed in, but in your model
>> vmcb_in_sync could mean either vmload or vmsave is needed. The
>> two svm_vmload() uses right now would pass that value in addition
>> to the svm_sync_vmcb() calls already doing so. And the function
>> couldn't tell what to do from the current state (if it's
>> vmcb_needs_vmload, a load is only needed in the cases where
>> svm_vmload() is called right now). Adding a 3rd parameter or a
>> second enum 
> 
> I was thinking about another enum value, e.g. sync_to_cpu (and
> sync_to_vmcb replacing vmcb_needs_vmsave).
> 
> This will allow us to hide (v->arch.hvm_svm.vmcb_sync_state ==
> vmcb_needs_vmload) test.

I'm still not entirely clear how you want that to look like. At the example
of svm_get_segment_register() and svm_set_segment_register(), how
would the svm_sync_vmcb() calls look like? I.e. how do you distinguish
the sync_to_vmcb/transition-to-clean case from the
sync_to_vmcb/transition-to-dirty one?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] SVM: re-work VMCB sync-ing
  2018-04-27 15:52           ` Jan Beulich
@ 2018-04-27 18:29             ` Boris Ostrovsky
  2018-04-30  7:46               ` Jan Beulich
  0 siblings, 1 reply; 9+ messages in thread
From: Boris Ostrovsky @ 2018-04-27 18:29 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Juergen Gross, Andrew Cooper, Razvan Cojocaru,
	Suravee Suthikulpanit, xen-devel

On 04/27/2018 11:52 AM, Jan Beulich wrote:
>>>> On 26.04.18 at 19:27, <boris.ostrovsky@oracle.com> wrote:
>> On 04/26/2018 11:55 AM, Jan Beulich wrote:
>>>>>> On 26.04.18 at 17:20, <boris.ostrovsky@oracle.com> wrote:
>>>> On 04/26/2018 09:33 AM, Jan Beulich wrote:
>>>>>>> -static void svm_sync_vmcb(struct vcpu *v)
>>>>>>> +static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
>>>>>>>  {
>>>>>>>      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
>>>>>>>  
>>>>>>> -    if ( arch_svm->vmcb_in_sync )
>>>>>>> -        return;
>>>>>>> -
>>>>>>> -    arch_svm->vmcb_in_sync = 1;
>>>>>>> +    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
>>>>>>> +        svm_vmsave(arch_svm->vmcb);
>>>>>>>  
>>>>>>> -    svm_vmsave(arch_svm->vmcb);
>>>>>>> +    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
>>>>>>> +        arch_svm->vmcb_sync_state = new_state;
>>>>>> This is slightly awkward for a couple of reasons.  First, passing
>>>>>> vmcb_in_sync in forget the fact that a vmload is needed.
>>>>> Certainly not - that's the purpose of the if() around it.
>>>>>
>>>>>> In my patch, I introduced svm_sync_vmcb_for_update(), rather than
>>>>>> requiring a parameter to be passed in.  I think this is a better API,
>>>>>> and it shrinks the size of the patch.
>>>>> I'm not convinced of the "better", and even less so of the "shrinks". But
>>>>> I'll wait to see what the SVM maintainers say.
>>>> I think a single function is better. In fact, I was wondering whether
>>>> svm_vmload() could also be folded into svm_sync_vmcb() since it is also
>>>> a syncing operation.
>>> That doesn't look like it would produce a usable interface: How would
>>> you envision the state transition to be specified by the caller? Right
>>> now the intended new state gets passed in, but in your model
>>> vmcb_in_sync could mean either vmload or vmsave is needed. The
>>> two svm_vmload() uses right now would pass that value in addition
>>> to the svm_sync_vmcb() calls already doing so. And the function
>>> couldn't tell what to do from the current state (if it's
>>> vmcb_needs_vmload, a load is only needed in the cases where
>>> svm_vmload() is called right now). Adding a 3rd parameter or a
>>> second enum 
>> I was thinking about another enum value, e.g. sync_to_cpu (and
>> sync_to_vmcb replacing vmcb_needs_vmsave).
>>
>> This will allow us to hide (v->arch.hvm_svm.vmcb_sync_state ==
>> vmcb_needs_vmload) test.
> I'm still not entirely clear how you want that to look like. At the example
> of svm_get_segment_register() and svm_set_segment_register(), how
> would the svm_sync_vmcb() calls look like? I.e. how do you distinguish
> the sync_to_vmcb/transition-to-clean case from the
> sync_to_vmcb/transition-to-dirty one?


Something like

static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
{
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;

     
    if ( new_state == vmcb_sync_to_cpu )
    {
	if (v->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
        {     
            svm_vmload(vmcb);
            v->arch.hvm_svm.vmcb_sync_state = vmcb_in_sync;
        }
	return;
    }

    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
        svm_vmsave(arch_svm->vmcb);
 
    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
        arch_svm->vmcb_sync_state = new_state;
 }


-boris


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] SVM: re-work VMCB sync-ing
  2018-04-27 18:29             ` Boris Ostrovsky
@ 2018-04-30  7:46               ` Jan Beulich
  0 siblings, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2018-04-30  7:46 UTC (permalink / raw)
  To: Boris Ostrovsky
  Cc: Juergen Gross, Andrew Cooper, Razvan Cojocaru,
	Suravee Suthikulpanit, xen-devel

>>> On 27.04.18 at 20:29, <boris.ostrovsky@oracle.com> wrote:
> On 04/27/2018 11:52 AM, Jan Beulich wrote:
>>>>> On 26.04.18 at 19:27, <boris.ostrovsky@oracle.com> wrote:
>>> On 04/26/2018 11:55 AM, Jan Beulich wrote:
>>>>>>> On 26.04.18 at 17:20, <boris.ostrovsky@oracle.com> wrote:
>>>>> On 04/26/2018 09:33 AM, Jan Beulich wrote:
>>>>>>>> -static void svm_sync_vmcb(struct vcpu *v)
>>>>>>>> +static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
>>>>>>>>  {
>>>>>>>>      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
>>>>>>>>  
>>>>>>>> -    if ( arch_svm->vmcb_in_sync )
>>>>>>>> -        return;
>>>>>>>> -
>>>>>>>> -    arch_svm->vmcb_in_sync = 1;
>>>>>>>> +    if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
>>>>>>>> +        svm_vmsave(arch_svm->vmcb);
>>>>>>>>  
>>>>>>>> -    svm_vmsave(arch_svm->vmcb);
>>>>>>>> +    if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
>>>>>>>> +        arch_svm->vmcb_sync_state = new_state;
>>>>>>> This is slightly awkward for a couple of reasons.  First, passing
>>>>>>> vmcb_in_sync in forget the fact that a vmload is needed.
>>>>>> Certainly not - that's the purpose of the if() around it.
>>>>>>
>>>>>>> In my patch, I introduced svm_sync_vmcb_for_update(), rather than
>>>>>>> requiring a parameter to be passed in.  I think this is a better API,
>>>>>>> and it shrinks the size of the patch.
>>>>>> I'm not convinced of the "better", and even less so of the "shrinks". But
>>>>>> I'll wait to see what the SVM maintainers say.
>>>>> I think a single function is better. In fact, I was wondering whether
>>>>> svm_vmload() could also be folded into svm_sync_vmcb() since it is also
>>>>> a syncing operation.
>>>> That doesn't look like it would produce a usable interface: How would
>>>> you envision the state transition to be specified by the caller? Right
>>>> now the intended new state gets passed in, but in your model
>>>> vmcb_in_sync could mean either vmload or vmsave is needed. The
>>>> two svm_vmload() uses right now would pass that value in addition
>>>> to the svm_sync_vmcb() calls already doing so. And the function
>>>> couldn't tell what to do from the current state (if it's
>>>> vmcb_needs_vmload, a load is only needed in the cases where
>>>> svm_vmload() is called right now). Adding a 3rd parameter or a
>>>> second enum 
>>> I was thinking about another enum value, e.g. sync_to_cpu (and
>>> sync_to_vmcb replacing vmcb_needs_vmsave).
>>>
>>> This will allow us to hide (v->arch.hvm_svm.vmcb_sync_state ==
>>> vmcb_needs_vmload) test.
>> I'm still not entirely clear how you want that to look like. At the example
>> of svm_get_segment_register() and svm_set_segment_register(), how
>> would the svm_sync_vmcb() calls look like? I.e. how do you distinguish
>> the sync_to_vmcb/transition-to-clean case from the
>> sync_to_vmcb/transition-to-dirty one?
> 
> 
> Something like
> 
> static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
> {
>      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
> 
>      
>     if ( new_state == vmcb_sync_to_cpu )

But that's not a state, but a transition. An enum should consist of only
states or only transitions. I'll produce a variant of this as v2, but I'm
afraid you won't be overly happy with it.

Jan

>     {
> 	if (v->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
>         {     
>             svm_vmload(vmcb);
>             v->arch.hvm_svm.vmcb_sync_state = vmcb_in_sync;
>         }
> 	return;
>     }
> 
>     if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
>         svm_vmsave(arch_svm->vmcb);
>  
>     if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
>         arch_svm->vmcb_sync_state = new_state;
>  }
> 
> 
> -boris





_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-04-30  7:46 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-26 12:01 [PATCH] SVM: re-work VMCB sync-ing Jan Beulich
2018-04-26 12:43 ` Andrew Cooper
2018-04-26 13:33   ` Jan Beulich
2018-04-26 15:20     ` Boris Ostrovsky
2018-04-26 15:55       ` Jan Beulich
2018-04-26 17:27         ` Boris Ostrovsky
2018-04-27 15:52           ` Jan Beulich
2018-04-27 18:29             ` Boris Ostrovsky
2018-04-30  7:46               ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.