From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail.linutronix.de (146.0.238.70:993) by crypto-ml.lab.linutronix.de with IMAP4-SSL for ; 12 Jul 2018 14:40:14 -0000 Received: from localhost ([127.0.0.1] helo=nanos.tec.linutronix.de) by Galois.linutronix.de with esmtp (Exim 4.80) (envelope-from ) id 1fdcfu-0006vZ-Qf for speck@linutronix.de; Thu, 12 Jul 2018 16:34:30 +0200 Message-ID: <20180712142957.375130728@linutronix.de> Date: Thu, 12 Jul 2018 16:19:07 +0200 From: Thomas Gleixner References: <20180712141902.576562442@linutronix.de> Subject: [patch V10 05/10] Control knobs and Documentation 5 Mime-Version: 1 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit To: speck@linutronix.de List-ID: Subject: [patch V10 05/10] x86/kvm: Add static key for flush always From: Thomas Gleixner Avoid the conditional in the L1D flush control path. Signed-off-by: Thomas Gleixner --- arch/x86/kvm/vmx.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -192,6 +192,7 @@ module_param(ple_window_max, uint, 0444) extern const ulong vmx_return; static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); +static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always); /* Storage for pre module init parameter parsing */ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; @@ -232,8 +233,12 @@ static int vmx_setup_l1d_flush(enum vmx_ l1tf_vmx_mitigation = l1tf; - if (l1tf != VMENTER_L1D_FLUSH_NEVER) - static_branch_enable(&vmx_l1d_should_flush); + if (l1tf == VMENTER_L1D_FLUSH_NEVER) + return 0; + + static_branch_enable(&vmx_l1d_should_flush); + if (l1tf == VMENTER_L1D_FLUSH_ALWAYS) + static_branch_enable(&vmx_l1d_flush_always); return 0; } @@ -9651,7 +9656,6 @@ static void *vmx_l1d_flush_pages; static void vmx_l1d_flush(struct kvm_vcpu *vcpu) { int size = PAGE_SIZE << L1D_CACHE_ORDER; - bool always; /* * This code is only executed when the the flush mode is 'cond' or @@ -9661,8 +9665,10 @@ static void vmx_l1d_flush(struct kvm_vcp * it. The flush bit gets set again either from vcpu_run() or from * one of the unsafe VMEXIT handlers. */ - always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS; - vcpu->arch.l1tf_flush_l1d = always; + if (static_branch_unlikely(&vmx_l1d_flush_always)) + vcpu->arch.l1tf_flush_l1d = true; + else + vcpu->arch.l1tf_flush_l1d = false; vcpu->stat.l1d_flush++;