diff options
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
| -rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 475 | 
1 files changed, 406 insertions, 69 deletions
| diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 74c62cc47a5f..aa34f908c39f 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -38,6 +38,8 @@  static void __init spectre_v1_select_mitigation(void);  static void __init spectre_v2_select_mitigation(void); +static void __init retbleed_select_mitigation(void); +static void __init spectre_v2_user_select_mitigation(void);  static void __init ssb_select_mitigation(void);  static void __init l1tf_select_mitigation(void);  static void __init mds_select_mitigation(void); @@ -48,16 +50,40 @@ static void __init mmio_select_mitigation(void);  static void __init srbds_select_mitigation(void);  static void __init l1d_flush_select_mitigation(void); -/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ +/* The base value of the SPEC_CTRL MSR without task-specific bits set */  u64 x86_spec_ctrl_base;  EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); + +/* The current value of the SPEC_CTRL MSR with task-specific bits set */ +DEFINE_PER_CPU(u64, x86_spec_ctrl_current); +EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); +  static DEFINE_MUTEX(spec_ctrl_mutex);  /* - * The vendor and possibly platform specific bits which can be modified in - * x86_spec_ctrl_base. + * Keep track of the SPEC_CTRL MSR value for the current task, which may differ + * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().   */ -static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; +void write_spec_ctrl_current(u64 val, bool force) +{ +	if (this_cpu_read(x86_spec_ctrl_current) == val) +		return; + +	this_cpu_write(x86_spec_ctrl_current, val); + +	/* +	 * When KERNEL_IBRS this MSR is written on return-to-user, unless +	 * forced the update can be delayed until that time. +	 */ +	if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) +		wrmsrl(MSR_IA32_SPEC_CTRL, val); +} + +u64 spec_ctrl_current(void) +{ +	return this_cpu_read(x86_spec_ctrl_current); +} +EXPORT_SYMBOL_GPL(spec_ctrl_current);  /*   * AMD specific MSR info for Speculative Store Bypass control. @@ -114,13 +140,21 @@ void __init check_bugs(void)  	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))  		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); -	/* Allow STIBP in MSR_SPEC_CTRL if supported */ -	if (boot_cpu_has(X86_FEATURE_STIBP)) -		x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; -  	/* Select the proper CPU mitigations before patching alternatives: */  	spectre_v1_select_mitigation();  	spectre_v2_select_mitigation(); +	/* +	 * retbleed_select_mitigation() relies on the state set by +	 * spectre_v2_select_mitigation(); specifically it wants to know about +	 * spectre_v2=ibrs. +	 */ +	retbleed_select_mitigation(); +	/* +	 * spectre_v2_user_select_mitigation() relies on the state set by +	 * retbleed_select_mitigation(); specifically the STIBP selection is +	 * forced for UNRET. +	 */ +	spectre_v2_user_select_mitigation();  	ssb_select_mitigation();  	l1tf_select_mitigation();  	md_clear_select_mitigation(); @@ -161,31 +195,17 @@ void __init check_bugs(void)  #endif  } +/* + * NOTE: This function is *only* called for SVM.  VMX spec_ctrl handling is + * done in vmenter.S. + */  void  x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)  { -	u64 msrval, guestval, hostval = x86_spec_ctrl_base; +	u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();  	struct thread_info *ti = current_thread_info(); -	/* Is MSR_SPEC_CTRL implemented ? */  	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { -		/* -		 * Restrict guest_spec_ctrl to supported values. Clear the -		 * modifiable bits in the host base value and or the -		 * modifiable bits from the guest value. -		 */ -		guestval = hostval & ~x86_spec_ctrl_mask; -		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; - -		/* SSBD controlled in MSR_SPEC_CTRL */ -		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || -		    static_cpu_has(X86_FEATURE_AMD_SSBD)) -			hostval |= ssbd_tif_to_spec_ctrl(ti->flags); - -		/* Conditional STIBP enabled? */ -		if (static_branch_unlikely(&switch_to_cond_stibp)) -			hostval |= stibp_tif_to_spec_ctrl(ti->flags); -  		if (hostval != guestval) {  			msrval = setguest ? guestval : hostval;  			wrmsrl(MSR_IA32_SPEC_CTRL, msrval); @@ -752,12 +772,180 @@ static int __init nospectre_v1_cmdline(char *str)  }  early_param("nospectre_v1", nospectre_v1_cmdline); -#undef pr_fmt -#define pr_fmt(fmt)     "Spectre V2 : " fmt -  static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =  	SPECTRE_V2_NONE; +#undef pr_fmt +#define pr_fmt(fmt)     "RETBleed: " fmt + +enum retbleed_mitigation { +	RETBLEED_MITIGATION_NONE, +	RETBLEED_MITIGATION_UNRET, +	RETBLEED_MITIGATION_IBPB, +	RETBLEED_MITIGATION_IBRS, +	RETBLEED_MITIGATION_EIBRS, +}; + +enum retbleed_mitigation_cmd { +	RETBLEED_CMD_OFF, +	RETBLEED_CMD_AUTO, +	RETBLEED_CMD_UNRET, +	RETBLEED_CMD_IBPB, +}; + +static const char * const retbleed_strings[] = { +	[RETBLEED_MITIGATION_NONE]	= "Vulnerable", +	[RETBLEED_MITIGATION_UNRET]	= "Mitigation: untrained return thunk", +	[RETBLEED_MITIGATION_IBPB]	= "Mitigation: IBPB", +	[RETBLEED_MITIGATION_IBRS]	= "Mitigation: IBRS", +	[RETBLEED_MITIGATION_EIBRS]	= "Mitigation: Enhanced IBRS", +}; + +static enum retbleed_mitigation retbleed_mitigation __ro_after_init = +	RETBLEED_MITIGATION_NONE; +static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = +	RETBLEED_CMD_AUTO; + +static int __ro_after_init retbleed_nosmt = false; + +static int __init retbleed_parse_cmdline(char *str) +{ +	if (!str) +		return -EINVAL; + +	while (str) { +		char *next = strchr(str, ','); +		if (next) { +			*next = 0; +			next++; +		} + +		if (!strcmp(str, "off")) { +			retbleed_cmd = RETBLEED_CMD_OFF; +		} else if (!strcmp(str, "auto")) { +			retbleed_cmd = RETBLEED_CMD_AUTO; +		} else if (!strcmp(str, "unret")) { +			retbleed_cmd = RETBLEED_CMD_UNRET; +		} else if (!strcmp(str, "ibpb")) { +			retbleed_cmd = RETBLEED_CMD_IBPB; +		} else if (!strcmp(str, "nosmt")) { +			retbleed_nosmt = true; +		} else { +			pr_err("Ignoring unknown retbleed option (%s).", str); +		} + +		str = next; +	} + +	return 0; +} +early_param("retbleed", retbleed_parse_cmdline); + +#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" +#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" + +static void __init retbleed_select_mitigation(void) +{ +	bool mitigate_smt = false; + +	if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) +		return; + +	switch (retbleed_cmd) { +	case RETBLEED_CMD_OFF: +		return; + +	case RETBLEED_CMD_UNRET: +		if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { +			retbleed_mitigation = RETBLEED_MITIGATION_UNRET; +		} else { +			pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); +			goto do_cmd_auto; +		} +		break; + +	case RETBLEED_CMD_IBPB: +		if (!boot_cpu_has(X86_FEATURE_IBPB)) { +			pr_err("WARNING: CPU does not support IBPB.\n"); +			goto do_cmd_auto; +		} else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { +			retbleed_mitigation = RETBLEED_MITIGATION_IBPB; +		} else { +			pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); +			goto do_cmd_auto; +		} +		break; + +do_cmd_auto: +	case RETBLEED_CMD_AUTO: +	default: +		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || +		    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { +			if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) +				retbleed_mitigation = RETBLEED_MITIGATION_UNRET; +			else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) +				retbleed_mitigation = RETBLEED_MITIGATION_IBPB; +		} + +		/* +		 * The Intel mitigation (IBRS or eIBRS) was already selected in +		 * spectre_v2_select_mitigation().  'retbleed_mitigation' will +		 * be set accordingly below. +		 */ + +		break; +	} + +	switch (retbleed_mitigation) { +	case RETBLEED_MITIGATION_UNRET: +		setup_force_cpu_cap(X86_FEATURE_RETHUNK); +		setup_force_cpu_cap(X86_FEATURE_UNRET); + +		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && +		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) +			pr_err(RETBLEED_UNTRAIN_MSG); + +		mitigate_smt = true; +		break; + +	case RETBLEED_MITIGATION_IBPB: +		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); +		mitigate_smt = true; +		break; + +	default: +		break; +	} + +	if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && +	    (retbleed_nosmt || cpu_mitigations_auto_nosmt())) +		cpu_smt_disable(false); + +	/* +	 * Let IBRS trump all on Intel without affecting the effects of the +	 * retbleed= cmdline option. +	 */ +	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { +		switch (spectre_v2_enabled) { +		case SPECTRE_V2_IBRS: +			retbleed_mitigation = RETBLEED_MITIGATION_IBRS; +			break; +		case SPECTRE_V2_EIBRS: +		case SPECTRE_V2_EIBRS_RETPOLINE: +		case SPECTRE_V2_EIBRS_LFENCE: +			retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; +			break; +		default: +			pr_err(RETBLEED_INTEL_MSG); +		} +	} + +	pr_info("%s\n", retbleed_strings[retbleed_mitigation]); +} + +#undef pr_fmt +#define pr_fmt(fmt)     "Spectre V2 : " fmt +  static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =  	SPECTRE_V2_USER_NONE;  static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = @@ -828,6 +1016,7 @@ enum spectre_v2_mitigation_cmd {  	SPECTRE_V2_CMD_EIBRS,  	SPECTRE_V2_CMD_EIBRS_RETPOLINE,  	SPECTRE_V2_CMD_EIBRS_LFENCE, +	SPECTRE_V2_CMD_IBRS,  };  enum spectre_v2_user_cmd { @@ -868,13 +1057,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure)  		pr_info("spectre_v2_user=%s forced on command line.\n", reason);  } +static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; +  static enum spectre_v2_user_cmd __init -spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) +spectre_v2_parse_user_cmdline(void)  {  	char arg[20];  	int ret, i; -	switch (v2_cmd) { +	switch (spectre_v2_cmd) {  	case SPECTRE_V2_CMD_NONE:  		return SPECTRE_V2_USER_CMD_NONE;  	case SPECTRE_V2_CMD_FORCE: @@ -900,15 +1091,16 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)  	return SPECTRE_V2_USER_CMD_AUTO;  } -static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode) +static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)  { -	return (mode == SPECTRE_V2_EIBRS || -		mode == SPECTRE_V2_EIBRS_RETPOLINE || -		mode == SPECTRE_V2_EIBRS_LFENCE); +	return mode == SPECTRE_V2_IBRS || +	       mode == SPECTRE_V2_EIBRS || +	       mode == SPECTRE_V2_EIBRS_RETPOLINE || +	       mode == SPECTRE_V2_EIBRS_LFENCE;  }  static void __init -spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) +spectre_v2_user_select_mitigation(void)  {  	enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;  	bool smt_possible = IS_ENABLED(CONFIG_SMP); @@ -921,7 +1113,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)  	    cpu_smt_control == CPU_SMT_NOT_SUPPORTED)  		smt_possible = false; -	cmd = spectre_v2_parse_user_cmdline(v2_cmd); +	cmd = spectre_v2_parse_user_cmdline();  	switch (cmd) {  	case SPECTRE_V2_USER_CMD_NONE:  		goto set_mode; @@ -969,12 +1161,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)  	}  	/* -	 * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not -	 * required. +	 * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible, +	 * STIBP is not required.  	 */  	if (!boot_cpu_has(X86_FEATURE_STIBP) ||  	    !smt_possible || -	    spectre_v2_in_eibrs_mode(spectre_v2_enabled)) +	    spectre_v2_in_ibrs_mode(spectre_v2_enabled))  		return;  	/* @@ -986,6 +1178,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)  	    boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))  		mode = SPECTRE_V2_USER_STRICT_PREFERRED; +	if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) { +		if (mode != SPECTRE_V2_USER_STRICT && +		    mode != SPECTRE_V2_USER_STRICT_PREFERRED) +			pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); +		mode = SPECTRE_V2_USER_STRICT_PREFERRED; +	} +  	spectre_v2_user_stibp = mode;  set_mode: @@ -999,6 +1198,7 @@ static const char * const spectre_v2_strings[] = {  	[SPECTRE_V2_EIBRS]			= "Mitigation: Enhanced IBRS",  	[SPECTRE_V2_EIBRS_LFENCE]		= "Mitigation: Enhanced IBRS + LFENCE",  	[SPECTRE_V2_EIBRS_RETPOLINE]		= "Mitigation: Enhanced IBRS + Retpolines", +	[SPECTRE_V2_IBRS]			= "Mitigation: IBRS",  };  static const struct { @@ -1016,6 +1216,7 @@ static const struct {  	{ "eibrs,lfence",	SPECTRE_V2_CMD_EIBRS_LFENCE,	  false },  	{ "eibrs,retpoline",	SPECTRE_V2_CMD_EIBRS_RETPOLINE,	  false },  	{ "auto",		SPECTRE_V2_CMD_AUTO,		  false }, +	{ "ibrs",		SPECTRE_V2_CMD_IBRS,              false },  };  static void __init spec_v2_print_cond(const char *reason, bool secure) @@ -1078,6 +1279,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)  		return SPECTRE_V2_CMD_AUTO;  	} +	if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { +		pr_err("%s selected but not compiled in. Switching to AUTO select\n", +		       mitigation_options[i].option); +		return SPECTRE_V2_CMD_AUTO; +	} + +	if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { +		pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", +		       mitigation_options[i].option); +		return SPECTRE_V2_CMD_AUTO; +	} + +	if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { +		pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", +		       mitigation_options[i].option); +		return SPECTRE_V2_CMD_AUTO; +	} + +	if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) { +		pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", +		       mitigation_options[i].option); +		return SPECTRE_V2_CMD_AUTO; +	} +  	spec_v2_print_cond(mitigation_options[i].option,  			   mitigation_options[i].secure);  	return cmd; @@ -1093,6 +1318,22 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)  	return SPECTRE_V2_RETPOLINE;  } +/* Disable in-kernel use of non-RSB RET predictors */ +static void __init spec_ctrl_disable_kernel_rrsba(void) +{ +	u64 ia32_cap; + +	if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) +		return; + +	ia32_cap = x86_read_arch_cap_msr(); + +	if (ia32_cap & ARCH_CAP_RRSBA) { +		x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; +		write_spec_ctrl_current(x86_spec_ctrl_base, true); +	} +} +  static void __init spectre_v2_select_mitigation(void)  {  	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -1117,6 +1358,15 @@ static void __init spectre_v2_select_mitigation(void)  			break;  		} +		if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && +		    boot_cpu_has_bug(X86_BUG_RETBLEED) && +		    retbleed_cmd != RETBLEED_CMD_OFF && +		    boot_cpu_has(X86_FEATURE_IBRS) && +		    boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { +			mode = SPECTRE_V2_IBRS; +			break; +		} +  		mode = spectre_v2_select_retpoline();  		break; @@ -1133,6 +1383,10 @@ static void __init spectre_v2_select_mitigation(void)  		mode = spectre_v2_select_retpoline();  		break; +	case SPECTRE_V2_CMD_IBRS: +		mode = SPECTRE_V2_IBRS; +		break; +  	case SPECTRE_V2_CMD_EIBRS:  		mode = SPECTRE_V2_EIBRS;  		break; @@ -1149,10 +1403,9 @@ static void __init spectre_v2_select_mitigation(void)  	if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())  		pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); -	if (spectre_v2_in_eibrs_mode(mode)) { -		/* Force it so VMEXIT will restore correctly */ +	if (spectre_v2_in_ibrs_mode(mode)) {  		x86_spec_ctrl_base |= SPEC_CTRL_IBRS; -		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +		write_spec_ctrl_current(x86_spec_ctrl_base, true);  	}  	switch (mode) { @@ -1160,6 +1413,10 @@ static void __init spectre_v2_select_mitigation(void)  	case SPECTRE_V2_EIBRS:  		break; +	case SPECTRE_V2_IBRS: +		setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); +		break; +  	case SPECTRE_V2_LFENCE:  	case SPECTRE_V2_EIBRS_LFENCE:  		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); @@ -1171,43 +1428,107 @@ static void __init spectre_v2_select_mitigation(void)  		break;  	} +	/* +	 * Disable alternate RSB predictions in kernel when indirect CALLs and +	 * JMPs gets protection against BHI and Intramode-BTI, but RET +	 * prediction from a non-RSB predictor is still a risk. +	 */ +	if (mode == SPECTRE_V2_EIBRS_LFENCE || +	    mode == SPECTRE_V2_EIBRS_RETPOLINE || +	    mode == SPECTRE_V2_RETPOLINE) +		spec_ctrl_disable_kernel_rrsba(); +  	spectre_v2_enabled = mode;  	pr_info("%s\n", spectre_v2_strings[mode]);  	/* -	 * If spectre v2 protection has been enabled, unconditionally fill -	 * RSB during a context switch; this protects against two independent -	 * issues: +	 * If Spectre v2 protection has been enabled, fill the RSB during a +	 * context switch.  In general there are two types of RSB attacks +	 * across context switches, for which the CALLs/RETs may be unbalanced.  	 * -	 *	- RSB underflow (and switch to BTB) on Skylake+ -	 *	- SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs +	 * 1) RSB underflow +	 * +	 *    Some Intel parts have "bottomless RSB".  When the RSB is empty, +	 *    speculated return targets may come from the branch predictor, +	 *    which could have a user-poisoned BTB or BHB entry. +	 * +	 *    AMD has it even worse: *all* returns are speculated from the BTB, +	 *    regardless of the state of the RSB. +	 * +	 *    When IBRS or eIBRS is enabled, the "user -> kernel" attack +	 *    scenario is mitigated by the IBRS branch prediction isolation +	 *    properties, so the RSB buffer filling wouldn't be necessary to +	 *    protect against this type of attack. +	 * +	 *    The "user -> user" attack scenario is mitigated by RSB filling. +	 * +	 * 2) Poisoned RSB entry +	 * +	 *    If the 'next' in-kernel return stack is shorter than 'prev', +	 *    'next' could be tricked into speculating with a user-poisoned RSB +	 *    entry. +	 * +	 *    The "user -> kernel" attack scenario is mitigated by SMEP and +	 *    eIBRS. +	 * +	 *    The "user -> user" scenario, also known as SpectreBHB, requires +	 *    RSB clearing. +	 * +	 * So to mitigate all cases, unconditionally fill RSB on context +	 * switches. +	 * +	 * FIXME: Is this pointless for retbleed-affected AMD?  	 */  	setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);  	pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");  	/* -	 * Retpoline means the kernel is safe because it has no indirect -	 * branches. Enhanced IBRS protects firmware too, so, enable restricted -	 * speculation around firmware calls only when Enhanced IBRS isn't -	 * supported. +	 * Similar to context switches, there are two types of RSB attacks +	 * after vmexit: +	 * +	 * 1) RSB underflow +	 * +	 * 2) Poisoned RSB entry +	 * +	 * When retpoline is enabled, both are mitigated by filling/clearing +	 * the RSB. +	 * +	 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch +	 * prediction isolation protections, RSB still needs to be cleared +	 * because of #2.  Note that SMEP provides no protection here, unlike +	 * user-space-poisoned RSB entries. +	 * +	 * eIBRS, on the other hand, has RSB-poisoning protections, so it +	 * doesn't need RSB clearing after vmexit. +	 */ +	if (boot_cpu_has(X86_FEATURE_RETPOLINE) || +	    boot_cpu_has(X86_FEATURE_KERNEL_IBRS)) +		setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); + +	/* +	 * Retpoline protects the kernel, but doesn't protect firmware.  IBRS +	 * and Enhanced IBRS protect firmware too, so enable IBRS around +	 * firmware calls only when IBRS / Enhanced IBRS aren't otherwise +	 * enabled.  	 *  	 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because  	 * the user might select retpoline on the kernel command line and if  	 * the CPU supports Enhanced IBRS, kernel might un-intentionally not  	 * enable IBRS around firmware calls.  	 */ -	if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) { +	if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {  		setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);  		pr_info("Enabling Restricted Speculation for firmware calls\n");  	}  	/* Set up IBPB and STIBP depending on the general spectre V2 command */ -	spectre_v2_user_select_mitigation(cmd); +	spectre_v2_cmd = cmd;  }  static void update_stibp_msr(void * __unused)  { -	wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +	u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); +	write_spec_ctrl_current(val, true);  }  /* Update x86_spec_ctrl_base in case SMT state changed. */ @@ -1424,16 +1745,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)  	}  	/* -	 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper -	 * bit in the mask to allow guests to use the mitigation even in the -	 * case where the host does not enable it. -	 */ -	if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || -	    static_cpu_has(X86_FEATURE_AMD_SSBD)) { -		x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; -	} - -	/*  	 * We have three CPU feature flags that are in play here:  	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.  	 *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass @@ -1450,7 +1761,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)  			x86_amd_ssb_disable();  		} else {  			x86_spec_ctrl_base |= SPEC_CTRL_SSBD; -			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +			write_spec_ctrl_current(x86_spec_ctrl_base, true);  		}  	} @@ -1701,7 +2012,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)  void x86_spec_ctrl_setup_ap(void)  {  	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) -		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +		write_spec_ctrl_current(x86_spec_ctrl_base, true);  	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)  		x86_amd_ssb_disable(); @@ -1938,7 +2249,7 @@ static ssize_t mmio_stale_data_show_state(char *buf)  static char *stibp_state(void)  { -	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) +	if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))  		return "";  	switch (spectre_v2_user_stibp) { @@ -1994,6 +2305,24 @@ static ssize_t srbds_show_state(char *buf)  	return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);  } +static ssize_t retbleed_show_state(char *buf) +{ +	if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) { +	    if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && +		boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) +		    return sprintf(buf, "Vulnerable: untrained return thunk on non-Zen uarch\n"); + +	    return sprintf(buf, "%s; SMT %s\n", +			   retbleed_strings[retbleed_mitigation], +			   !sched_smt_active() ? "disabled" : +			   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || +			   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? +			   "enabled with STIBP protection" : "vulnerable"); +	} + +	return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); +} +  static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,  			       char *buf, unsigned int bug)  { @@ -2039,6 +2368,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr  	case X86_BUG_MMIO_STALE_DATA:  		return mmio_stale_data_show_state(buf); +	case X86_BUG_RETBLEED: +		return retbleed_show_state(buf); +  	default:  		break;  	} @@ -2095,4 +2427,9 @@ ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *at  {  	return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);  } + +ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) +{ +	return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); +}  #endif |