On 23/06/2018 15:54, speck for konrad.wilk_at_oracle.com wrote: > x86/KVM/VMX: Use MSR save list for IA32_FLUSH_CMD if required. > > If the module parameter is to flush the L1D cache on every VMENTER > then we can optimize by using the MSR save list to have the CPU > poke the MSR with the proper value right at VMENTER boundary. > > Signed-off-by: Konrad Rzeszutek Wilk > --- > v3: Actually engage the MSR save list > Move it to function that frobs VMCS > --- > arch/x86/kvm/vmx.c | 26 +++++++++++++++++++++----- > 1 file changed, 21 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index 9b18848ccaba..020145adc546 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -2649,15 +2649,22 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) > vmx->guest_msrs[i].mask); > } > > -static void vmx_prepare_guest_switch(struct kvm_vcpu *vcpu) > +static bool vmx_l1d_cache_flush_req(struct kvm_vcpu *vcpu) > { > - vmx_save_host_state(vcpu); > - > if (!enable_ept || static_cpu_has(X86_FEATURE_HYPERVISOR) || > !static_cpu_has(X86_BUG_L1TF)) { > vcpu->arch.flush_cache_req = false; This assignment is strange, the function would be pure if it was not for it. Let's remove it, since vmx_vcpu_setup doesn't need it, and rename the function to vmx_has_bug_l1tf. Thanks, Paolo > - return; > + return false; > } > + return true; > +} > + > +static void vmx_prepare_guest_switch(struct kvm_vcpu *vcpu) > +{ > + vmx_save_host_state(vcpu); > + > + if (!vmx_l1d_cache_flush_req(vcpu)) > + return; > > switch (vmentry_l1d_flush) { > case 0:> @@ -6352,6 +6359,15 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) > vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); > vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); > } > + > + /* > + * If we enforce flushing the L1D cache on every VMENTER lets use the > + * MSR save list. > + */ > + if (vmx_l1d_cache_flush_req(&vmx->vcpu)) > + if (vmentry_l1d_flush == 2) > + add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD, > + L1D_FLUSH, 0, true); > } > > static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) > @@ -10079,7 +10095,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) > evmcs_rsp = static_branch_unlikely(&enable_evmcs) ? > (unsigned long)¤t_evmcs->host_rsp : 0; > > - if (vcpu->arch.flush_cache_req) > + if (vcpu->arch.flush_cache_req && vmentry_l1d_flush != 1) > kvm_l1d_flush(); >