x86/speculation: Consolidate CPU whitelists

The CPU vulnerability whitelists have some overlap and there are more
whitelists coming along.

Use the driver_data field in the x86_cpu_id struct to denote the
whitelisted vulnerabilities and combine all whitelists into one.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Jon Masters <jcm@redhat.com>
Tested-by: Jon Masters <jcm@redhat.com>
This commit is contained in:
Thomas Gleixner 2019-02-27 10:10:23 +01:00
parent d8eabc3731
commit 36ad35131a
1 changed files with 60 additions and 50 deletions

View File

@ -948,61 +948,72 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif #endif
} }
static const __initconst struct x86_cpu_id cpu_no_speculation[] = { #define NO_SPECULATION BIT(0)
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY }, #define NO_MELTDOWN BIT(1)
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY }, #define NO_SSB BIT(2)
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY }, #define NO_L1TF BIT(3)
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY }, #define VULNWL(_vendor, _family, _model, _whitelist) \
{ X86_VENDOR_CENTAUR, 5 }, { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
{ X86_VENDOR_INTEL, 5 },
{ X86_VENDOR_NSC, 5 }, #define VULNWL_INTEL(model, whitelist) \
{ X86_VENDOR_ANY, 4 }, VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
#define VULNWL_AMD(family, whitelist) \
VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
#define VULNWL_HYGON(family, whitelist) \
VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF),
VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF),
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF),
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF),
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF),
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF),
VULNWL_INTEL(CORE_YONAH, NO_SSB),
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF),
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF),
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF),
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF),
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF),
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF),
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF),
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF),
{} {}
}; };
static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { static bool __init cpu_matches(unsigned long which)
{ X86_VENDOR_AMD }, {
{ X86_VENDOR_HYGON }, const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
{}
};
/* Only list CPUs which speculate but are non susceptible to SSB */ return m && !!(m->driver_data & which);
static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { }
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
{ X86_VENDOR_AMD, 0x12, },
{ X86_VENDOR_AMD, 0x11, },
{ X86_VENDOR_AMD, 0x10, },
{ X86_VENDOR_AMD, 0xf, },
{}
};
static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
/* in addition to cpu_no_speculation */
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
{}
};
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{ {
u64 ia32_cap = 0; u64 ia32_cap = 0;
if (x86_match_cpu(cpu_no_speculation)) if (cpu_matches(NO_SPECULATION))
return; return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1); setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
@ -1011,15 +1022,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
if (!x86_match_cpu(cpu_no_spec_store_bypass) && if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
!(ia32_cap & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO)) !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (ia32_cap & ARCH_CAP_IBRS_ALL) if (ia32_cap & ARCH_CAP_IBRS_ALL)
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
if (x86_match_cpu(cpu_no_meltdown)) if (cpu_matches(NO_MELTDOWN))
return; return;
/* Rogue Data Cache Load? No! */ /* Rogue Data Cache Load? No! */
@ -1028,7 +1038,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
if (x86_match_cpu(cpu_no_l1tf)) if (cpu_matches(NO_L1TF))
return; return;
setup_force_cpu_bug(X86_BUG_L1TF); setup_force_cpu_bug(X86_BUG_L1TF);