diff options
Diffstat (limited to 'arch/x86/kernel/cpu/intel.c')
| -rw-r--r-- | arch/x86/kernel/cpu/intel.c | 188 | 
1 files changed, 67 insertions, 121 deletions
| diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fdf3489d92a4..08b95a35b5cb 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -72,19 +72,19 @@ static bool cpu_model_supports_sld __ro_after_init;   */  static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)  { -	switch (c->x86_model) { -	case INTEL_FAM6_CORE_YONAH: -	case INTEL_FAM6_CORE2_MEROM: -	case INTEL_FAM6_CORE2_MEROM_L: -	case INTEL_FAM6_CORE2_PENRYN: -	case INTEL_FAM6_CORE2_DUNNINGTON: -	case INTEL_FAM6_NEHALEM: -	case INTEL_FAM6_NEHALEM_G: -	case INTEL_FAM6_NEHALEM_EP: -	case INTEL_FAM6_NEHALEM_EX: -	case INTEL_FAM6_WESTMERE: -	case INTEL_FAM6_WESTMERE_EP: -	case INTEL_FAM6_SANDYBRIDGE: +	switch (c->x86_vfm) { +	case INTEL_CORE_YONAH: +	case INTEL_CORE2_MEROM: +	case INTEL_CORE2_MEROM_L: +	case INTEL_CORE2_PENRYN: +	case INTEL_CORE2_DUNNINGTON: +	case INTEL_NEHALEM: +	case INTEL_NEHALEM_G: +	case INTEL_NEHALEM_EP: +	case INTEL_NEHALEM_EX: +	case INTEL_WESTMERE: +	case INTEL_WESTMERE_EP: +	case INTEL_SANDYBRIDGE:  		setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);  	}  } @@ -106,9 +106,9 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)  	 */  	if (c->x86 != 6)  		return; -	switch (c->x86_model) { -	case INTEL_FAM6_XEON_PHI_KNL: -	case INTEL_FAM6_XEON_PHI_KNM: +	switch (c->x86_vfm) { +	case INTEL_XEON_PHI_KNL: +	case INTEL_XEON_PHI_KNM:  		break;  	default:  		return; @@ -134,32 +134,32 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)   * - Release note from 20180108 microcode release   */  struct sku_microcode { -	u8 model; +	u32 vfm;  	u8 stepping;  	u32 microcode;  };  static const struct sku_microcode spectre_bad_microcodes[] = { -	{ INTEL_FAM6_KABYLAKE,		0x0B,	0x80 }, -	{ INTEL_FAM6_KABYLAKE,		0x0A,	0x80 }, -	{ INTEL_FAM6_KABYLAKE,		0x09,	0x80 }, -	{ INTEL_FAM6_KABYLAKE_L,	0x0A,	0x80 }, -	{ INTEL_FAM6_KABYLAKE_L,	0x09,	0x80 }, -	{ INTEL_FAM6_SKYLAKE_X,		0x03,	0x0100013e }, -	{ INTEL_FAM6_SKYLAKE_X,		0x04,	0x0200003c }, -	{ INTEL_FAM6_BROADWELL,		0x04,	0x28 }, -	{ INTEL_FAM6_BROADWELL_G,	0x01,	0x1b }, -	{ INTEL_FAM6_BROADWELL_D,	0x02,	0x14 }, -	{ INTEL_FAM6_BROADWELL_D,	0x03,	0x07000011 }, -	{ INTEL_FAM6_BROADWELL_X,	0x01,	0x0b000025 }, -	{ INTEL_FAM6_HASWELL_L,		0x01,	0x21 }, -	{ INTEL_FAM6_HASWELL_G,		0x01,	0x18 }, -	{ INTEL_FAM6_HASWELL,		0x03,	0x23 }, -	{ INTEL_FAM6_HASWELL_X,		0x02,	0x3b }, -	{ INTEL_FAM6_HASWELL_X,		0x04,	0x10 }, -	{ INTEL_FAM6_IVYBRIDGE_X,	0x04,	0x42a }, +	{ INTEL_KABYLAKE,	0x0B,	0x80 }, +	{ INTEL_KABYLAKE,	0x0A,	0x80 }, +	{ INTEL_KABYLAKE,	0x09,	0x80 }, +	{ INTEL_KABYLAKE_L,	0x0A,	0x80 }, +	{ INTEL_KABYLAKE_L,	0x09,	0x80 }, +	{ INTEL_SKYLAKE_X,	0x03,	0x0100013e }, +	{ INTEL_SKYLAKE_X,	0x04,	0x0200003c }, +	{ INTEL_BROADWELL,	0x04,	0x28 }, +	{ INTEL_BROADWELL_G,	0x01,	0x1b }, +	{ INTEL_BROADWELL_D,	0x02,	0x14 }, +	{ INTEL_BROADWELL_D,	0x03,	0x07000011 }, +	{ INTEL_BROADWELL_X,	0x01,	0x0b000025 }, +	{ INTEL_HASWELL_L,	0x01,	0x21 }, +	{ INTEL_HASWELL_G,	0x01,	0x18 }, +	{ INTEL_HASWELL,	0x03,	0x23 }, +	{ INTEL_HASWELL_X,	0x02,	0x3b }, +	{ INTEL_HASWELL_X,	0x04,	0x10 }, +	{ INTEL_IVYBRIDGE_X,	0x04,	0x42a },  	/* Observed in the wild */ -	{ INTEL_FAM6_SANDYBRIDGE_X,	0x06,	0x61b }, -	{ INTEL_FAM6_SANDYBRIDGE_X,	0x07,	0x712 }, +	{ INTEL_SANDYBRIDGE_X,	0x06,	0x61b }, +	{ INTEL_SANDYBRIDGE_X,	0x07,	0x712 },  };  static bool bad_spectre_microcode(struct cpuinfo_x86 *c) @@ -173,11 +173,8 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)  	if (cpu_has(c, X86_FEATURE_HYPERVISOR))  		return false; -	if (c->x86 != 6) -		return false; -  	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { -		if (c->x86_model == spectre_bad_microcodes[i].model && +		if (c->x86_vfm == spectre_bad_microcodes[i].vfm &&  		    c->x86_stepping == spectre_bad_microcodes[i].stepping)  			return (c->microcode <= spectre_bad_microcodes[i].microcode);  	} @@ -190,83 +187,35 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)  #define TME_ACTIVATE_LOCKED(x)		(x & 0x1)  #define TME_ACTIVATE_ENABLED(x)		(x & 0x2) -#define TME_ACTIVATE_POLICY(x)		((x >> 4) & 0xf)	/* Bits 7:4 */ -#define TME_ACTIVATE_POLICY_AES_XTS_128	0 -  #define TME_ACTIVATE_KEYID_BITS(x)	((x >> 32) & 0xf)	/* Bits 35:32 */ -#define TME_ACTIVATE_CRYPTO_ALGS(x)	((x >> 48) & 0xffff)	/* Bits 63:48 */ -#define TME_ACTIVATE_CRYPTO_AES_XTS_128	1 - -/* Values for mktme_status (SW only construct) */ -#define MKTME_ENABLED			0 -#define MKTME_DISABLED			1 -#define MKTME_UNINITIALIZED		2 -static int mktme_status = MKTME_UNINITIALIZED; -  static void detect_tme_early(struct cpuinfo_x86 *c)  { -	u64 tme_activate, tme_policy, tme_crypto_algs; -	int keyid_bits = 0, nr_keyids = 0; -	static u64 tme_activate_cpu0 = 0; +	u64 tme_activate; +	int keyid_bits;  	rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); -	if (mktme_status != MKTME_UNINITIALIZED) { -		if (tme_activate != tme_activate_cpu0) { -			/* Broken BIOS? */ -			pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); -			pr_err_once("x86/tme: MKTME is not usable\n"); -			mktme_status = MKTME_DISABLED; - -			/* Proceed. We may need to exclude bits from x86_phys_bits. */ -		} -	} else { -		tme_activate_cpu0 = tme_activate; -	} -  	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {  		pr_info_once("x86/tme: not enabled by BIOS\n"); -		mktme_status = MKTME_DISABLED;  		clear_cpu_cap(c, X86_FEATURE_TME);  		return;  	} - -	if (mktme_status != MKTME_UNINITIALIZED) -		goto detect_keyid_bits; - -	pr_info("x86/tme: enabled by BIOS\n"); - -	tme_policy = TME_ACTIVATE_POLICY(tme_activate); -	if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) -		pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); - -	tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); -	if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { -		pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", -				tme_crypto_algs); -		mktme_status = MKTME_DISABLED; -	} -detect_keyid_bits: +	pr_info_once("x86/tme: enabled by BIOS\n");  	keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); -	nr_keyids = (1UL << keyid_bits) - 1; -	if (nr_keyids) { -		pr_info_once("x86/mktme: enabled by BIOS\n"); -		pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); -	} else { -		pr_info_once("x86/mktme: disabled by BIOS\n"); -	} - -	if (mktme_status == MKTME_UNINITIALIZED) { -		/* MKTME is usable */ -		mktme_status = MKTME_ENABLED; -	} +	if (!keyid_bits) +		return;  	/* -	 * KeyID bits effectively lower the number of physical address -	 * bits.  Update cpuinfo_x86::x86_phys_bits accordingly. +	 * KeyID bits are set by BIOS and can be present regardless +	 * of whether the kernel is using them. They effectively lower +	 * the number of physical address bits. +	 * +	 * Update cpuinfo_x86::x86_phys_bits accordingly.  	 */  	c->x86_phys_bits -= keyid_bits; +	pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n", +		     keyid_bits);  }  void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) @@ -320,7 +269,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)  	 * need the microcode to have already been loaded... so if it is  	 * not, recommend a BIOS update and disable large pages.  	 */ -	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && +	if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 &&  	    c->microcode < 0x20e) {  		pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");  		clear_cpu_cap(c, X86_FEATURE_PSE); @@ -352,17 +301,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)  	}  	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ -	if (c->x86 == 6) { -		switch (c->x86_model) { -		case INTEL_FAM6_ATOM_SALTWELL_MID: -		case INTEL_FAM6_ATOM_SALTWELL_TABLET: -		case INTEL_FAM6_ATOM_SILVERMONT_MID: -		case INTEL_FAM6_ATOM_AIRMONT_NP: -			set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); -			break; -		default: -			break; -		} +	switch (c->x86_vfm) { +	case INTEL_ATOM_SALTWELL_MID: +	case INTEL_ATOM_SALTWELL_TABLET: +	case INTEL_ATOM_SILVERMONT_MID: +	case INTEL_ATOM_AIRMONT_NP: +		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); +		break;  	}  	/* @@ -401,7 +346,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)  	 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE  	 * to be modified.  	 */ -	if (c->x86 == 5 && c->x86_model == 9) { +	if (c->x86_vfm == INTEL_QUARK_X1000) {  		pr_info("Disabling PGE capability bit\n");  		setup_clear_cpu_cap(X86_FEATURE_PGE);  	} @@ -633,12 +578,13 @@ static void init_intel(struct cpuinfo_x86 *c)  			set_cpu_cap(c, X86_FEATURE_PEBS);  	} -	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) && -	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) +	if (boot_cpu_has(X86_FEATURE_CLFLUSH) && +	    (c->x86_vfm == INTEL_CORE2_DUNNINGTON || +	     c->x86_vfm == INTEL_NEHALEM_EX || +	     c->x86_vfm == INTEL_WESTMERE_EX))  		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); -	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) && -		((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT))) +	if (boot_cpu_has(X86_FEATURE_MWAIT) && c->x86_vfm == INTEL_ATOM_GOLDMONT)  		set_cpu_bug(c, X86_BUG_MONITOR);  #ifdef CONFIG_X86_64 @@ -1254,9 +1200,9 @@ void handle_bus_lock(struct pt_regs *regs)   * feature even though they do not enumerate IA32_CORE_CAPABILITIES.   */  static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { -	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,	0), -	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,	0), -	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,	0), +	X86_MATCH_VFM(INTEL_ICELAKE_X,	0), +	X86_MATCH_VFM(INTEL_ICELAKE_L,	0), +	X86_MATCH_VFM(INTEL_ICELAKE_D,	0),  	{}  }; |