All of lore.kernel.org
 help / color / mirror / Atom feed
* [MODERATED] [PATCH v4 8/8] [PATCH v4 8/8] Linux Patch #8
@ 2018-06-23 13:54 konrad.wilk
  2018-06-25 14:32 ` [MODERATED] " Paolo Bonzini
  2018-06-27 13:05 ` Thomas Gleixner
  0 siblings, 2 replies; 6+ messages in thread
From: konrad.wilk @ 2018-06-23 13:54 UTC (permalink / raw)
  To: speck

If the module parameter is to flush the L1D cache on every VMENTER
then we can optimize by using the MSR save list to have the CPU
poke the MSR with the proper value right at VMENTER boundary.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
v3: Actually engage the MSR save list
    Move it to function that frobs VMCS
---
 arch/x86/kvm/vmx.c | 26 +++++++++++++++++++++-----
 1 file changed, 21 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9b18848ccaba..020145adc546 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2649,15 +2649,22 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 				   vmx->guest_msrs[i].mask);
 }
 
-static void vmx_prepare_guest_switch(struct kvm_vcpu *vcpu)
+static bool vmx_l1d_cache_flush_req(struct kvm_vcpu *vcpu)
 {
-	vmx_save_host_state(vcpu);
-
 	if (!enable_ept || static_cpu_has(X86_FEATURE_HYPERVISOR) ||
 	    !static_cpu_has(X86_BUG_L1TF)) {
 		vcpu->arch.flush_cache_req = false;
-		return;
+		return false;
 	}
+	return true;
+}
+
+static void vmx_prepare_guest_switch(struct kvm_vcpu *vcpu)
+{
+	vmx_save_host_state(vcpu);
+
+	if (!vmx_l1d_cache_flush_req(vcpu))
+		return;
 
 	switch (vmentry_l1d_flush) {
 	case 0:
@@ -6352,6 +6359,15 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
 		vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
 		vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
 	}
+
+	/*
+	 * If we enforce flushing the L1D cache on every VMENTER lets use the
+	 * MSR save list.
+	 */
+	if (vmx_l1d_cache_flush_req(&vmx->vcpu))
+		if (vmentry_l1d_flush == 2)
+			add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD,
+					      L1D_FLUSH, 0, true);
 }
 
 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -10079,7 +10095,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
 		(unsigned long)&current_evmcs->host_rsp : 0;
 
-	if (vcpu->arch.flush_cache_req)
+	if (vcpu->arch.flush_cache_req && vmentry_l1d_flush != 1)
 		kvm_l1d_flush();
 
 	asm(
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-06-28 16:41 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-23 13:54 [MODERATED] [PATCH v4 8/8] [PATCH v4 8/8] Linux Patch #8 konrad.wilk
2018-06-25 14:32 ` [MODERATED] " Paolo Bonzini
2018-06-27 13:05 ` Thomas Gleixner
2018-06-27 14:43   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-06-27 17:00     ` Thomas Gleixner
2018-06-28 16:40       ` [MODERATED] " Konrad Rzeszutek Wilk

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.