From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail.linutronix.de (146.0.238.70:993) by crypto-ml.lab.linutronix.de with IMAP4-SSL for ; 27 Feb 2019 15:37:40 -0000 Received: from localhost ([127.0.0.1] helo=nanos.tec.linutronix.de) by Galois.linutronix.de with esmtp (Exim 4.80) (envelope-from ) id 1gz1Gx-0001aC-RS for speck@linutronix.de; Wed, 27 Feb 2019 16:37:28 +0100 Message-Id: <20190227152037.276549415@linutronix.de> Date: Wed, 27 Feb 2019 16:09:42 +0100 From: Thomas Gleixner References: <20190227150939.605235753@linutronix.de> MIME-Version: 1.0 Subject: [patch V5 03/14] MDS basics 3 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit To: speck@linutronix.de List-ID: Subject: [patch V5 03/14] x86/speculation: Consolidate CPU whitelists From: Thomas Gleixner The CPU vulnerability whitelists have some overlap and there are more whitelists coming along. Use the driver_data field in the x86_cpu_id struct to denote the whitelisted vulnerabilities and combine all whitelists into one. Suggested-by: Linus Torvalds Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/common.c | 96 ++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 51 deletions(-) --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -948,62 +948,57 @@ static void identify_cpu_without_cpuid(s #endif } -static const __initconst struct x86_cpu_id cpu_no_speculation[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY }, - { X86_VENDOR_CENTAUR, 5 }, - { X86_VENDOR_INTEL, 5 }, - { X86_VENDOR_NSC, 5 }, - { X86_VENDOR_ANY, 4 }, +#define NO_SPECULATION BIT(0) +#define NO_MELTDOWN BIT(1) +#define NO_SSB BIT(2) +#define NO_L1TF BIT(3) + +static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + { X86_VENDOR_ANY, 4, X86_MODEL_ANY, X86_FEATURE_ANY, NO_SPECULATION }, + { X86_VENDOR_CENTAUR, 5, X86_MODEL_ANY, X86_FEATURE_ANY, NO_SPECULATION }, + { X86_VENDOR_INTEL, 5, X86_MODEL_ANY, X86_FEATURE_ANY, NO_SPECULATION }, + { X86_VENDOR_NSC, 5, X86_MODEL_ANY, X86_FEATURE_ANY, NO_SPECULATION }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY, NO_SPECULATION }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY, NO_SPECULATION }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY, NO_SPECULATION }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY, NO_SPECULATION }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY, NO_SPECULATION }, + + { X86_VENDOR_AMD, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ANY, NO_MELTDOWN | NO_L1TF }, + { X86_VENDOR_HYGON, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ANY, NO_MELTDOWN | NO_L1TF }, + + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, NO_SSB | NO_L1TF }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X, X86_FEATURE_ANY, NO_SSB | NO_L1TF }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID, X86_FEATURE_ANY, NO_SSB | NO_L1TF }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT, X86_FEATURE_ANY, NO_SSB | NO_L1TF }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID, X86_FEATURE_ANY, NO_SSB | NO_L1TF }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH, X86_FEATURE_ANY, NO_SSB | NO_L1TF }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL, X86_FEATURE_ANY, NO_SSB | NO_L1TF }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM, X86_FEATURE_ANY, NO_SSB | NO_L1TF }, + + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, X86_FEATURE_ANY, NO_L1TF }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, X86_FEATURE_ANY, NO_L1TF }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS, X86_FEATURE_ANY, NO_L1TF }, + + { X86_VENDOR_AMD, 0x0f, X86_MODEL_ANY, X86_FEATURE_ANY, NO_SSB }, + { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, NO_SSB }, + { X86_VENDOR_AMD, 0x11, X86_MODEL_ANY, X86_FEATURE_ANY, NO_SSB }, + { X86_VENDOR_AMD, 0x12, X86_MODEL_ANY, X86_FEATURE_ANY, NO_SSB }, {} }; -static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { - { X86_VENDOR_AMD }, - { X86_VENDOR_HYGON }, - {} -}; - -/* Only list CPUs which speculate but are non susceptible to SSB */ -static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, - { X86_VENDOR_AMD, 0x12, }, - { X86_VENDOR_AMD, 0x11, }, - { X86_VENDOR_AMD, 0x10, }, - { X86_VENDOR_AMD, 0xf, }, - {} -}; +static bool __init cpu_matches(unsigned long which) +{ + const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); -static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { - /* in addition to cpu_no_speculation */ - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, - {} -}; + return m && !!(m->driver_data & which); +} static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 ia32_cap = 0; - if (x86_match_cpu(cpu_no_speculation)) + if (cpu_matches(NO_SPECULATION)) return; setup_force_cpu_bug(X86_BUG_SPECTRE_V1); @@ -1012,15 +1007,14 @@ static void __init cpu_set_bug_bits(stru if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); - if (!x86_match_cpu(cpu_no_spec_store_bypass) && - !(ia32_cap & ARCH_CAP_SSB_NO) && + if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); if (ia32_cap & ARCH_CAP_IBRS_ALL) setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); - if (x86_match_cpu(cpu_no_meltdown)) + if (cpu_matches(NO_MELTDOWN)) return; /* Rogue Data Cache Load? No! */ @@ -1029,7 +1023,7 @@ static void __init cpu_set_bug_bits(stru setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); - if (x86_match_cpu(cpu_no_l1tf)) + if (cpu_matches(NO_L1TF)) return; setup_force_cpu_bug(X86_BUG_L1TF);