diff options
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 52 | 
1 files changed, 49 insertions, 3 deletions
| diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c296cb1c0113..4730b0a58f24 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1211,18 +1211,42 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {  					    X86_FEATURE_ANY, issues)  #define SRBDS		BIT(0) +/* CPU is affected by X86_BUG_MMIO_STALE_DATA */ +#define MMIO		BIT(1) +/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ +#define MMIO_SBDS	BIT(2)  static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {  	VULNBL_INTEL_STEPPINGS(IVYBRIDGE,	X86_STEPPING_ANY,		SRBDS),  	VULNBL_INTEL_STEPPINGS(HASWELL,		X86_STEPPING_ANY,		SRBDS),  	VULNBL_INTEL_STEPPINGS(HASWELL_L,	X86_STEPPING_ANY,		SRBDS),  	VULNBL_INTEL_STEPPINGS(HASWELL_G,	X86_STEPPING_ANY,		SRBDS), +	VULNBL_INTEL_STEPPINGS(HASWELL_X,	BIT(2) | BIT(4),		MMIO), +	VULNBL_INTEL_STEPPINGS(BROADWELL_D,	X86_STEPPINGS(0x3, 0x5),	MMIO),  	VULNBL_INTEL_STEPPINGS(BROADWELL_G,	X86_STEPPING_ANY,		SRBDS), +	VULNBL_INTEL_STEPPINGS(BROADWELL_X,	X86_STEPPING_ANY,		MMIO),  	VULNBL_INTEL_STEPPINGS(BROADWELL,	X86_STEPPING_ANY,		SRBDS), +	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPINGS(0x3, 0x3),	SRBDS | MMIO),  	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		SRBDS), +	VULNBL_INTEL_STEPPINGS(SKYLAKE_X,	BIT(3) | BIT(4) | BIT(6) | +						BIT(7) | BIT(0xB),              MMIO), +	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPINGS(0x3, 0x3),	SRBDS | MMIO),  	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		SRBDS), -	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPINGS(0x0, 0xC),	SRBDS), -	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPINGS(0x0, 0xD),	SRBDS), +	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPINGS(0x9, 0xC),	SRBDS | MMIO), +	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPINGS(0x0, 0x8),	SRBDS), +	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPINGS(0x9, 0xD),	SRBDS | MMIO), +	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPINGS(0x0, 0x8),	SRBDS), +	VULNBL_INTEL_STEPPINGS(ICELAKE_L,	X86_STEPPINGS(0x5, 0x5),	MMIO | MMIO_SBDS), +	VULNBL_INTEL_STEPPINGS(ICELAKE_D,	X86_STEPPINGS(0x1, 0x1),	MMIO), +	VULNBL_INTEL_STEPPINGS(ICELAKE_X,	X86_STEPPINGS(0x4, 0x6),	MMIO), +	VULNBL_INTEL_STEPPINGS(COMETLAKE,	BIT(2) | BIT(3) | BIT(5),	MMIO | MMIO_SBDS), +	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPINGS(0x1, 0x1),	MMIO | MMIO_SBDS), +	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPINGS(0x0, 0x0),	MMIO), +	VULNBL_INTEL_STEPPINGS(LAKEFIELD,	X86_STEPPINGS(0x1, 0x1),	MMIO | MMIO_SBDS), +	VULNBL_INTEL_STEPPINGS(ROCKETLAKE,	X86_STEPPINGS(0x1, 0x1),	MMIO), +	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,	X86_STEPPINGS(0x1, 0x1),	MMIO | MMIO_SBDS), +	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,	X86_STEPPING_ANY,		MMIO), +	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,	X86_STEPPINGS(0x0, 0x0),	MMIO | MMIO_SBDS),  	{}  }; @@ -1243,6 +1267,13 @@ u64 x86_read_arch_cap_msr(void)  	return ia32_cap;  } +static bool arch_cap_mmio_immune(u64 ia32_cap) +{ +	return (ia32_cap & ARCH_CAP_FBSDP_NO && +		ia32_cap & ARCH_CAP_PSDP_NO && +		ia32_cap & ARCH_CAP_SBDR_SSDP_NO); +} +  static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)  {  	u64 ia32_cap = x86_read_arch_cap_msr(); @@ -1296,12 +1327,27 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)  	/*  	 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed  	 * in the vulnerability blacklist. +	 * +	 * Some of the implications and mitigation of Shared Buffers Data +	 * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as +	 * SRBDS.  	 */  	if ((cpu_has(c, X86_FEATURE_RDRAND) ||  	     cpu_has(c, X86_FEATURE_RDSEED)) && -	    cpu_matches(cpu_vuln_blacklist, SRBDS)) +	    cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))  		    setup_force_cpu_bug(X86_BUG_SRBDS); +	/* +	 * Processor MMIO Stale Data bug enumeration +	 * +	 * Affected CPU list is generally enough to enumerate the vulnerability, +	 * but for virtualization case check for ARCH_CAP MSR bits also, VMM may +	 * not want the guest to enumerate the bug. +	 */ +	if (cpu_matches(cpu_vuln_blacklist, MMIO) && +	    !arch_cap_mmio_immune(ia32_cap)) +		setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); +  	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))  		return; |