diff options
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 91 | 
1 files changed, 57 insertions, 34 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9cfca3d7d0e2..8cd4126d8253 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -567,17 +567,18 @@ static __init int setup_disable_pku(char *arg)  	return 1;  }  __setup("nopku", setup_disable_pku); -#endif /* CONFIG_X86_64 */ +#endif  #ifdef CONFIG_X86_KERNEL_IBT -__noendbr u64 ibt_save(void) +__noendbr u64 ibt_save(bool disable)  {  	u64 msr = 0;  	if (cpu_feature_enabled(X86_FEATURE_IBT)) {  		rdmsrl(MSR_IA32_S_CET, msr); -		wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN); +		if (disable) +			wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);  	}  	return msr; @@ -1093,6 +1094,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)  	if (c->extended_cpuid_level >= 0x8000001f)  		c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); +	if (c->extended_cpuid_level >= 0x80000021) +		c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); +  	init_scattered_cpuid_features(c);  	init_speculation_control(c); @@ -1226,8 +1230,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {  	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),  	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ -	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), -	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), +	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), +	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),  	/* Zhaoxin Family 7 */  	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), @@ -1256,6 +1260,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {  #define MMIO_SBDS	BIT(2)  /* CPU is affected by RETbleed, speculating where you would not expect it */  #define RETBLEED	BIT(3) +/* CPU is affected by SMT (cross-thread) return predictions */ +#define SMT_RSB		BIT(4)  static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {  	VULNBL_INTEL_STEPPINGS(IVYBRIDGE,	X86_STEPPING_ANY,		SRBDS), @@ -1287,8 +1293,8 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {  	VULNBL_AMD(0x15, RETBLEED),  	VULNBL_AMD(0x16, RETBLEED), -	VULNBL_AMD(0x17, RETBLEED), -	VULNBL_HYGON(0x18, RETBLEED), +	VULNBL_AMD(0x17, RETBLEED | SMT_RSB), +	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),  	{}  }; @@ -1338,8 +1344,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)  	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))  		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); -	if (ia32_cap & ARCH_CAP_IBRS_ALL) +	/* +	 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature +	 * flag and protect from vendor-specific bugs via the whitelist. +	 */ +	if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {  		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); +		if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && +		    !(ia32_cap & ARCH_CAP_PBRSB_NO)) +			setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); +	}  	if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&  	    !(ia32_cap & ARCH_CAP_MDS_NO)) { @@ -1401,10 +1415,8 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)  			setup_force_cpu_bug(X86_BUG_RETBLEED);  	} -	if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) && -	    !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && -	    !(ia32_cap & ARCH_CAP_PBRSB_NO)) -		setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); +	if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) +		setup_force_cpu_bug(X86_BUG_SMT_RSB);  	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))  		return; @@ -1682,9 +1694,7 @@ void check_null_seg_clears_base(struct cpuinfo_x86 *c)  	if (!IS_ENABLED(CONFIG_X86_64))  		return; -	/* Zen3 CPUs advertise Null Selector Clears Base in CPUID. */ -	if (c->extended_cpuid_level >= 0x80000021 && -	    cpuid_eax(0x80000021) & BIT(6)) +	if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE))  		return;  	/* @@ -1953,13 +1963,13 @@ void __init identify_boot_cpu(void)  	if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))  		pr_info("CET detected: Indirect Branch Tracking enabled\n");  #ifdef CONFIG_X86_32 -	sysenter_setup();  	enable_sep_cpu();  #endif  	cpu_detect_tlb(&boot_cpu_data);  	setup_cr_pinning();  	tsx_init(); +	lkgs_init();  }  void identify_secondary_cpu(struct cpuinfo_x86 *c) @@ -2125,7 +2135,6 @@ static void wait_for_master_cpu(int cpu)  #endif  } -#ifdef CONFIG_X86_64  static inline void setup_getcpu(int cpu)  {  	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); @@ -2147,6 +2156,7 @@ static inline void setup_getcpu(int cpu)  	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);  } +#ifdef CONFIG_X86_64  static inline void ucode_cpu_init(int cpu)  {  	if (cpu) @@ -2166,8 +2176,6 @@ static inline void tss_setup_ist(struct tss_struct *tss)  #else /* CONFIG_X86_64 */ -static inline void setup_getcpu(int cpu) { } -  static inline void ucode_cpu_init(int cpu)  {  	show_ucode_info_early(); @@ -2297,30 +2305,45 @@ void cpu_init_secondary(void)  #endif  #ifdef CONFIG_MICROCODE_LATE_LOADING -/* +/** + * store_cpu_caps() - Store a snapshot of CPU capabilities + * @curr_info: Pointer where to store it + * + * Returns: None + */ +void store_cpu_caps(struct cpuinfo_x86 *curr_info) +{ +	/* Reload CPUID max function as it might've changed. */ +	curr_info->cpuid_level = cpuid_eax(0); + +	/* Copy all capability leafs and pick up the synthetic ones. */ +	memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability, +	       sizeof(curr_info->x86_capability)); + +	/* Get the hardware CPUID leafs */ +	get_cpu_cap(curr_info); +} + +/** + * microcode_check() - Check if any CPU capabilities changed after an update. + * @prev_info:	CPU capabilities stored before an update. + *   * The microcode loader calls this upon late microcode load to recheck features,   * only when microcode has been updated. Caller holds microcode_mutex and CPU   * hotplug lock. + * + * Return: None   */ -void microcode_check(void) +void microcode_check(struct cpuinfo_x86 *prev_info)  { -	struct cpuinfo_x86 info; +	struct cpuinfo_x86 curr_info;  	perf_check_microcode(); -	/* Reload CPUID max function as it might've changed. */ -	info.cpuid_level = cpuid_eax(0); - -	/* -	 * Copy all capability leafs to pick up the synthetic ones so that -	 * memcmp() below doesn't fail on that. The ones coming from CPUID will -	 * get overwritten in get_cpu_cap(). -	 */ -	memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); - -	get_cpu_cap(&info); +	store_cpu_caps(&curr_info); -	if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) +	if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, +		    sizeof(prev_info->x86_capability)))  		return;  	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");  |