On 13/07/2018 19:28, speck for Konrad Rzeszutek Wilk wrote: > Perhaps: > > diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h > index 0e75170..f03ec33 100644 > --- a/arch/x86/include/asm/msr-index.h > +++ b/arch/x86/include/asm/msr-index.h > @@ -70,6 +70,7 @@ > #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a > #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ > #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ > +#define ARCH_CAP_SKIP_L1DFL_VMENTRY (1 << 3) /* Skip L1DF on VMENTRY */ If this bit is set, KVM is effectively not vulnerable. I just sent a more complete follow-up, replacing what I had cooked up last Friday. Thanks, Paolo > #define ARCH_CAP_SSB_NO (1 << 4) /* > * Not susceptible to Speculative Store Bypass > * attack, so no Speculative Store Bypass > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index c5c0118..5209252 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -216,6 +216,15 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) > l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; > return 0; > } > + if (static_cpu_has(X86_FEATURE_HYPERVISOR) && > + static_cpu_has(X86_FEATURE_FLUSH_L1D) && > + boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { > + u64 msr; > + > + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); > + if (msr & ARCH_CAP_SKIP_L1DFL_VMENTRY) > + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NESTED_VM; > + } > > /* If set to auto use the default l1tf mitigation method */ > if (l1tf == VMENTER_L1D_FLUSH_AUTO) { >