> On Sep 3, 2019, at 5:16 PM, speck for Pawan Gupta wrote: > > From: Pawan Gupta > Subject: [PATCH v4 06/10] x86/speculation/taa: Add mitigation for TSX Async > Abort > > TSX Async Abort (TAA) is a side channel attack on internal buffers in > some Intel processors similar to Microachitectural Data Sampling (MDS). > In this case certain loads may speculatively pass invalid data to > dependent operations when an asynchronous abort condition is pending in > a TSX transaction. This includes loads with no fault or assist > condition. Such loads may speculatively expose stale data from the > uarch data structures as in MDS. Scope of exposure is within the > same-thread and cross-thread. This issue affects all current processors > that support TSX. > > On CPUs which have their IA32_ARCH_CAPABILITIES MSR bit MDS_NO=0 and the > MDS mitigation is clearing the CPU buffers using VERW, there is no > additional mitigation needed for TAA. Could you please explicitly state that for the processors that enumerate MD_CLEAR and using the VERW instruction or L1D_FLUSH command to mitigate MDS, no additional mitigation is required. Thanks. > > On affected CPUs with MDS_NO=1 this issue can be mitigated by disabling > Transactional Synchronization Extensions (TSX) feature. A new MSR > IA32_TSX_CTRL in future and current processors after a microcode update > can be used to control TSX feature. TSX_CTRL_RTM_DISABLE bit disables > the TSX sub-feature Restricted Transactional Memory (RTM). > TSX_CTRL_CPUID_CLEAR bit clears the RTM enumeration in CPUID. The other > TSX sub-feature, Hardware Lock Elision (HLE), is unconditionally > disabled but still enumerated as present by CPUID(EAX=7).EBX{bit4}. > > The second mitigation approach is similar to MDS which is clearing the > affected CPU buffers on return to user space and when entering a guest. > Relevant microcode update is required for the mitigation to work. More > details on this approach can be found here: > https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html > > TSX feature can be controlled by the "tsx" command line parameter. If > the TSX feature is forced to be enabled then "Clear CPU buffers" (MDS > mitigation) is deployed. The effective mitigation state can be read from > sysfs. > > Signed-off-by: Pawan Gupta > --- > arch/x86/include/asm/cpufeatures.h | 1 + > arch/x86/include/asm/msr-index.h | 4 + > arch/x86/include/asm/nospec-branch.h | 4 +- > arch/x86/include/asm/processor.h | 7 ++ > arch/x86/kernel/cpu/bugs.c | 110 ++++++++++++++++++++++++++- > arch/x86/kernel/cpu/common.c | 3 + > 6 files changed, 125 insertions(+), 4 deletions(-) > > diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h > index e880f2408e29..138512ecc975 100644 > --- a/arch/x86/include/asm/cpufeatures.h > +++ b/arch/x86/include/asm/cpufeatures.h > @@ -397,5 +397,6 @@ > #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ > #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ > #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ > +#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ > > #endif /* _ASM_X86_CPUFEATURES_H */ > diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h > index 9163eb67962e..506056cb4db8 100644 > --- a/arch/x86/include/asm/msr-index.h > +++ b/arch/x86/include/asm/msr-index.h > @@ -94,6 +94,10 @@ > * Sampling (MDS) vulnerabilities. > */ > #define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */ > +#define ARCH_CAP_TAA_NO BIT(8) /* > + * Not susceptible to > + * TSX Async Abort (TAA) vulnerabilities. > + */ > > #define MSR_IA32_FLUSH_CMD 0x0000010b > #define L1D_FLUSH BIT(0) /* > diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h > index dbbf22be900b..def152e0cf42 100644 > --- a/arch/x86/include/asm/nospec-branch.h > +++ b/arch/x86/include/asm/nospec-branch.h > @@ -314,7 +314,7 @@ DECLARE_STATIC_KEY_FALSE(verw_idle_clear); > #include > > /** > - * verw_clear_cpu_buffers - Mitigation for MDS vulnerability > + * verw_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability > * > * This uses the otherwise unused and obsolete VERW instruction in > * combination with microcode which triggers a CPU buffer flush when the > @@ -337,7 +337,7 @@ static inline void verw_clear_cpu_buffers(void) > } > > /** > - * verw_user_clear_cpu_buffers - Mitigation for MDS vulnerability > + * verw_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability > * > * Clear CPU buffers if the corresponding static key is enabled > */ > diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h > index 6e0a3b43d027..999b85039128 100644 > --- a/arch/x86/include/asm/processor.h > +++ b/arch/x86/include/asm/processor.h > @@ -988,4 +988,11 @@ enum mds_mitigations { > MDS_MITIGATION_VMWERV, > }; > > +enum taa_mitigations { > + TAA_MITIGATION_OFF, > + TAA_MITIGATION_UCODE_NEEDED, > + TAA_MITIGATION_VERW, > + TAA_MITIGATION_TSX_DISABLE, > +}; > + > #endif /* _ASM_X86_PROCESSOR_H */ > diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c > index 3bb8564da271..1c0e670c3262 100644 > --- a/arch/x86/kernel/cpu/bugs.c > +++ b/arch/x86/kernel/cpu/bugs.c > @@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void); > static void __init ssb_select_mitigation(void); > static void __init l1tf_select_mitigation(void); > static void __init mds_select_mitigation(void); > +static void __init taa_select_mitigation(void); > > /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ > u64 x86_spec_ctrl_base; > @@ -105,6 +106,7 @@ void __init check_bugs(void) > ssb_select_mitigation(); > l1tf_select_mitigation(); > mds_select_mitigation(); > + taa_select_mitigation(); > > arch_smt_update(); > > @@ -268,6 +270,94 @@ static int __init mds_cmdline(char *str) > } > early_param("mds", mds_cmdline); > > +#undef pr_fmt > +#define pr_fmt(fmt) "TAA: " fmt > + > +/* Default mitigation for TAA-affected CPUs */ > +static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; > +static bool taa_nosmt __ro_after_init; > + > +static const char * const taa_strings[] = { > + [TAA_MITIGATION_OFF] = "Vulnerable", > + [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", > + [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", > + [TAA_MITIGATION_TSX_DISABLE] = "Mitigation: TSX disabled", > +}; > + > +static void __init taa_select_mitigation(void) > +{ > + u64 ia32_cap = 0; > + > + /* > + * Turn off TAA mitigation if X86_BUG_TAA was not set during arch setup > + * or the global mitigation switch is off. > + */ > + if (!boot_cpu_has_bug(X86_BUG_TAA) || cpu_mitigations_off()) { > + taa_mitigation = TAA_MITIGATION_OFF; > + return; > + } > + > + if (taa_mitigation == TAA_MITIGATION_OFF) { > + pr_info("%s\n", taa_strings[taa_mitigation]); > + return; > + } > + > + /* > + * TSX is supported by the hardware but was disabled during boot, > + * select TSX_DISABLE as mitigation. > + */ > + if (!boot_cpu_has(X86_FEATURE_RTM)) { > + taa_mitigation = TAA_MITIGATION_TSX_DISABLE; > + pr_info("%s\n", taa_strings[taa_mitigation]); > + return; > + } > + > + if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) > + taa_mitigation = TAA_MITIGATION_VERW; > + else > + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; > + > + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) > + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); > + > + /* > + * If CPU is not vulnerable to MDS, and TSX control is not supported, > + * microcode update is required. > + */ > + if ((ia32_cap & ARCH_CAP_MDS_NO) && > + !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) > + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; > + > + /* Enable VERW static branch for CPU buffer clearing */ > + static_branch_enable(&verw_user_clear); > + > + if (taa_nosmt || cpu_mitigations_auto_nosmt()) > + cpu_smt_disable(false); > + > + pr_info("%s\n", taa_strings[taa_mitigation]); > +} > + > +static int __init taa_cmdline(char *str) > +{ > + if (!boot_cpu_has_bug(X86_BUG_TAA)) > + return 0; > + > + if (!str) > + return -EINVAL; > + > + if (!strcmp(str, "off")) { > + taa_mitigation = TAA_MITIGATION_OFF; > + } else if (!strcmp(str, "full")) { > + taa_mitigation = TAA_MITIGATION_VERW; > + } else if (!strcmp(str, "full,nosmt")) { > + taa_mitigation = TAA_MITIGATION_VERW; > + taa_nosmt = true; > + } > + > + return 0; > +} > +early_param("taa", taa_cmdline); > + > #undef pr_fmt > #define pr_fmt(fmt) "Spectre V1 : " fmt > > @@ -765,7 +855,7 @@ static void update_indir_branch_cond(void) > #undef pr_fmt > #define pr_fmt(fmt) fmt > > -/* Update the static key controlling the MDS CPU buffer clear in idle */ > +/* Update the static key controlling the MDS and TAA CPU buffer clear in idle */ > static void update_verw_branch_idle(void) > { > /* > @@ -775,8 +865,11 @@ static void update_verw_branch_idle(void) > * The other variants cannot be mitigated when SMT is enabled, so > * clearing the buffers on idle just to prevent the Store Buffer > * repartitioning leak would be a window dressing exercise. > + * > + * Apply idle buffer clearing to TAA affected CPUs also. > */ > - if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) > + if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY) && > + !boot_cpu_has_bug(X86_BUG_TAA)) > return; > > if (sched_smt_active()) > @@ -786,6 +879,7 @@ static void update_verw_branch_idle(void) > } > > #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" > +#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/taa.html for more details.\n" > > void arch_smt_update(void) > { > @@ -819,6 +913,18 @@ void arch_smt_update(void) > break; > } > > + switch (taa_mitigation) { > + case TAA_MITIGATION_VERW: > + case TAA_MITIGATION_UCODE_NEEDED: > + if (sched_smt_active()) > + pr_warn_once(TAA_MSG_SMT); > + update_verw_branch_idle(); > + break; > + case TAA_MITIGATION_TSX_DISABLE: > + case TAA_MITIGATION_OFF: > + break; > + } > + > mutex_unlock(&spec_ctrl_mutex); > } > > diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c > index f125bf7ecb6f..1b24eca685d7 100644 > --- a/arch/x86/kernel/cpu/common.c > +++ b/arch/x86/kernel/cpu/common.c > @@ -1120,6 +1120,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) > if (!cpu_matches(NO_SWAPGS)) > setup_force_cpu_bug(X86_BUG_SWAPGS); > > + if (!(ia32_cap & ARCH_CAP_TAA_NO) && boot_cpu_has(X86_FEATURE_RTM)) > + setup_force_cpu_bug(X86_BUG_TAA); > + > if (cpu_matches(NO_MELTDOWN)) > return; > > -- > 2.20.1