diff options
Diffstat (limited to 'arch/x86/kernel/cpu/amd.c')
| -rw-r--r-- | arch/x86/kernel/cpu/amd.c | 348 | 
1 files changed, 162 insertions, 186 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ce8b8ff0e0ef..60e5497681f5 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -8,6 +8,7 @@  #include <asm/processor.h>  #include <asm/apic.h>  #include <asm/cpu.h> +#include <asm/smp.h>  #include <asm/pci-direct.h>  #ifdef CONFIG_X86_64 @@ -50,7 +51,6 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)  	return wrmsr_safe_regs(gprs);  } -#ifdef CONFIG_X86_32  /*   *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause   *	misexecution of code under Linux. Owners of such processors should @@ -70,6 +70,7 @@ __asm__(".globl vide\n\t.align 4\nvide: ret");  static void init_amd_k5(struct cpuinfo_x86 *c)  { +#ifdef CONFIG_X86_32  /*   * General Systems BIOSen alias the cpu frequency registers   * of the Elan at 0x000df000. Unfortuantly, one of the Linux @@ -83,11 +84,12 @@ static void init_amd_k5(struct cpuinfo_x86 *c)  		if (inl(CBAR) & CBAR_ENB)  			outl(0 | CBAR_KEY, CBAR);  	} +#endif  } -  static void init_amd_k6(struct cpuinfo_x86 *c)  { +#ifdef CONFIG_X86_32  	u32 l, h;  	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); @@ -176,10 +178,44 @@ static void init_amd_k6(struct cpuinfo_x86 *c)  		/* placeholder for any needed mods */  		return;  	} +#endif  } -static void amd_k7_smp_check(struct cpuinfo_x86 *c) +static void init_amd_k7(struct cpuinfo_x86 *c)  { +#ifdef CONFIG_X86_32 +	u32 l, h; + +	/* +	 * Bit 15 of Athlon specific MSR 15, needs to be 0 +	 * to enable SSE on Palomino/Morgan/Barton CPU's. +	 * If the BIOS didn't enable it already, enable it here. +	 */ +	if (c->x86_model >= 6 && c->x86_model <= 10) { +		if (!cpu_has(c, X86_FEATURE_XMM)) { +			printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); +			msr_clear_bit(MSR_K7_HWCR, 15); +			set_cpu_cap(c, X86_FEATURE_XMM); +		} +	} + +	/* +	 * It's been determined by AMD that Athlons since model 8 stepping 1 +	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx +	 * As per AMD technical note 27212 0.2 +	 */ +	if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { +		rdmsr(MSR_K7_CLK_CTL, l, h); +		if ((l & 0xfff00000) != 0x20000000) { +			printk(KERN_INFO +			    "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", +					l, ((l & 0x000fffff)|0x20000000)); +			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); +		} +	} + +	set_cpu_cap(c, X86_FEATURE_K7); +  	/* calling is from identify_secondary_cpu() ? */  	if (!c->cpu_index)  		return; @@ -207,7 +243,7 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c)  	if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||  	    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||  	     (c->x86_model > 7)) -		if (cpu_has_mp) +		if (cpu_has(c, X86_FEATURE_MP))  			return;  	/* If we get here, not a certified SMP capable AMD system. */ @@ -219,45 +255,8 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c)  	WARN_ONCE(1, "WARNING: This combination of AMD"  		" processors is not suitable for SMP.\n");  	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); -} - -static void init_amd_k7(struct cpuinfo_x86 *c) -{ -	u32 l, h; - -	/* -	 * Bit 15 of Athlon specific MSR 15, needs to be 0 -	 * to enable SSE on Palomino/Morgan/Barton CPU's. -	 * If the BIOS didn't enable it already, enable it here. -	 */ -	if (c->x86_model >= 6 && c->x86_model <= 10) { -		if (!cpu_has(c, X86_FEATURE_XMM)) { -			printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); -			msr_clear_bit(MSR_K7_HWCR, 15); -			set_cpu_cap(c, X86_FEATURE_XMM); -		} -	} - -	/* -	 * It's been determined by AMD that Athlons since model 8 stepping 1 -	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx -	 * As per AMD technical note 27212 0.2 -	 */ -	if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { -		rdmsr(MSR_K7_CLK_CTL, l, h); -		if ((l & 0xfff00000) != 0x20000000) { -			printk(KERN_INFO -			    "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", -					l, ((l & 0x000fffff)|0x20000000)); -			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); -		} -	} - -	set_cpu_cap(c, X86_FEATURE_K7); - -	amd_k7_smp_check(c); -}  #endif +}  #ifdef CONFIG_NUMA  /* @@ -446,6 +445,26 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c)  static void bsp_init_amd(struct cpuinfo_x86 *c)  { + +#ifdef CONFIG_X86_64 +	if (c->x86 >= 0xf) { +		unsigned long long tseg; + +		/* +		 * Split up direct mapping around the TSEG SMM area. +		 * Don't do it for gbpages because there seems very little +		 * benefit in doing so. +		 */ +		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { +			unsigned long pfn = tseg >> PAGE_SHIFT; + +			printk(KERN_DEBUG "tseg: %010llx\n", tseg); +			if (pfn_range_is_mapped(pfn, pfn + 1)) +				set_memory_4k((unsigned long)__va(tseg), 1); +		} +	} +#endif +  	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {  		if (c->x86 > 0x10 || @@ -515,101 +534,74 @@ static const int amd_erratum_383[];  static const int amd_erratum_400[];  static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); -static void init_amd(struct cpuinfo_x86 *c) +static void init_amd_k8(struct cpuinfo_x86 *c)  { -	u32 dummy; -	unsigned long long value; +	u32 level; +	u64 value; -#ifdef CONFIG_SMP -	/* -	 * Disable TLB flush filter by setting HWCR.FFDIS on K8 -	 * bit 6 of msr C001_0015 -	 * -	 * Errata 63 for SH-B3 steppings -	 * Errata 122 for all steppings (F+ have it disabled by default) -	 */ -	if (c->x86 == 0xf) -		msr_set_bit(MSR_K7_HWCR, 6); -#endif - -	early_init_amd(c); +	/* On C+ stepping K8 rep microcode works well for copy/memset */ +	level = cpuid_eax(1); +	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) +		set_cpu_cap(c, X86_FEATURE_REP_GOOD);  	/* -	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; -	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway +	 * Some BIOSes incorrectly force this feature, but only K8 revision D +	 * (model = 0x14) and later actually support it. +	 * (AMD Erratum #110, docId: 25759).  	 */ -	clear_cpu_cap(c, 0*32+31); - -#ifdef CONFIG_X86_64 -	/* On C+ stepping K8 rep microcode works well for copy/memset */ -	if (c->x86 == 0xf) { -		u32 level; - -		level = cpuid_eax(1); -		if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) -			set_cpu_cap(c, X86_FEATURE_REP_GOOD); - -		/* -		 * Some BIOSes incorrectly force this feature, but only K8 -		 * revision D (model = 0x14) and later actually support it. -		 * (AMD Erratum #110, docId: 25759). -		 */ -		if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { -			clear_cpu_cap(c, X86_FEATURE_LAHF_LM); -			if (!rdmsrl_amd_safe(0xc001100d, &value)) { -				value &= ~(1ULL << 32); -				wrmsrl_amd_safe(0xc001100d, value); -			} +	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { +		clear_cpu_cap(c, X86_FEATURE_LAHF_LM); +		if (!rdmsrl_amd_safe(0xc001100d, &value)) { +			value &= ~BIT_64(32); +			wrmsrl_amd_safe(0xc001100d, value);  		} -  	} -	if (c->x86 >= 0x10) -		set_cpu_cap(c, X86_FEATURE_REP_GOOD); -	/* get apicid instead of initial apic id from cpuid */ -	c->apicid = hard_smp_processor_id(); -#else +	if (!c->x86_model_id[0]) +		strcpy(c->x86_model_id, "Hammer"); +} + +static void init_amd_gh(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_X86_64 +	/* do this for boot cpu */ +	if (c == &boot_cpu_data) +		check_enable_amd_mmconf_dmi(); + +	fam10h_check_enable_mmcfg(); +#endif  	/* -	 *	FIXME: We should handle the K5 here. Set up the write -	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc, -	 *	no bus pipeline) +	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this +	 * is always needed when GART is enabled, even in a kernel which has no +	 * MCE support built in. BIOS should disable GartTlbWlk Errors already. +	 * If it doesn't, we do it here as suggested by the BKDG. +	 * +	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012  	 */ +	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); -	switch (c->x86) { -	case 4: -		init_amd_k5(c); -		break; -	case 5: -		init_amd_k6(c); -		break; -	case 6: /* An Athlon/Duron */ -		init_amd_k7(c); -		break; -	} +	/* +	 * On family 10h BIOS may not have properly enabled WC+ support, causing +	 * it to be converted to CD memtype. This may result in performance +	 * degradation for certain nested-paging guests. Prevent this conversion +	 * by clearing bit 24 in MSR_AMD64_BU_CFG2. +	 * +	 * NOTE: we want to use the _safe accessors so as not to #GP kvm +	 * guests on older kvm hosts. +	 */ +	msr_clear_bit(MSR_AMD64_BU_CFG2, 24); -	/* K6s reports MCEs but don't actually have all the MSRs */ -	if (c->x86 < 6) -		clear_cpu_cap(c, X86_FEATURE_MCE); -#endif +	if (cpu_has_amd_erratum(c, amd_erratum_383)) +		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); +} -	/* Enable workaround for FXSAVE leak */ -	if (c->x86 >= 6) -		set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); - -	if (!c->x86_model_id[0]) { -		switch (c->x86) { -		case 0xf: -			/* Should distinguish Models here, but this is only -			   a fallback anyways. */ -			strcpy(c->x86_model_id, "Hammer"); -			break; -		} -	} +static void init_amd_bd(struct cpuinfo_x86 *c) +{ +	u64 value;  	/* re-enable TopologyExtensions if switched off by BIOS */ -	if ((c->x86 == 0x15) && -	    (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && +	if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&  	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {  		if (msr_set_bit(0xc0011005, 54) > 0) { @@ -625,14 +617,60 @@ static void init_amd(struct cpuinfo_x86 *c)  	 * The way access filter has a performance penalty on some workloads.  	 * Disable it on the affected CPUs.  	 */ -	if ((c->x86 == 0x15) && -	    (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { - +	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {  		if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {  			value |= 0x1E;  			wrmsrl_safe(0xc0011021, value);  		}  	} +} + +static void init_amd(struct cpuinfo_x86 *c) +{ +	u32 dummy; + +#ifdef CONFIG_SMP +	/* +	 * Disable TLB flush filter by setting HWCR.FFDIS on K8 +	 * bit 6 of msr C001_0015 +	 * +	 * Errata 63 for SH-B3 steppings +	 * Errata 122 for all steppings (F+ have it disabled by default) +	 */ +	if (c->x86 == 0xf) +		msr_set_bit(MSR_K7_HWCR, 6); +#endif + +	early_init_amd(c); + +	/* +	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; +	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway +	 */ +	clear_cpu_cap(c, 0*32+31); + +	if (c->x86 >= 0x10) +		set_cpu_cap(c, X86_FEATURE_REP_GOOD); + +	/* get apicid instead of initial apic id from cpuid */ +	c->apicid = hard_smp_processor_id(); + +	/* K6s reports MCEs but don't actually have all the MSRs */ +	if (c->x86 < 6) +		clear_cpu_cap(c, X86_FEATURE_MCE); + +	switch (c->x86) { +	case 4:    init_amd_k5(c); break; +	case 5:    init_amd_k6(c); break; +	case 6:	   init_amd_k7(c); break; +	case 0xf:  init_amd_k8(c); break; +	case 0x10: init_amd_gh(c); break; +	case 0x15: init_amd_bd(c); break; +	} + +	/* Enable workaround for FXSAVE leak */ +	if (c->x86 >= 6) +		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);  	cpu_detect_cache_sizes(c); @@ -656,33 +694,6 @@ static void init_amd(struct cpuinfo_x86 *c)  		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);  	} -#ifdef CONFIG_X86_64 -	if (c->x86 == 0x10) { -		/* do this for boot cpu */ -		if (c == &boot_cpu_data) -			check_enable_amd_mmconf_dmi(); - -		fam10h_check_enable_mmcfg(); -	} - -	if (c == &boot_cpu_data && c->x86 >= 0xf) { -		unsigned long long tseg; - -		/* -		 * Split up direct mapping around the TSEG SMM area. -		 * Don't do it for gbpages because there seems very little -		 * benefit in doing so. -		 */ -		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { -			unsigned long pfn = tseg >> PAGE_SHIFT; - -			printk(KERN_DEBUG "tseg: %010llx\n", tseg); -			if (pfn_range_is_mapped(pfn, pfn + 1)) -				set_memory_4k((unsigned long)__va(tseg), 1); -		} -	} -#endif -  	/*  	 * Family 0x12 and above processors have APIC timer  	 * running in deep C states. @@ -690,34 +701,6 @@ static void init_amd(struct cpuinfo_x86 *c)  	if (c->x86 > 0x11)  		set_cpu_cap(c, X86_FEATURE_ARAT); -	if (c->x86 == 0x10) { -		/* -		 * Disable GART TLB Walk Errors on Fam10h. We do this here -		 * because this is always needed when GART is enabled, even in a -		 * kernel which has no MCE support built in. -		 * BIOS should disable GartTlbWlk Errors already. If -		 * it doesn't, do it here as suggested by the BKDG. -		 * -		 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 -		 */ -		msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); - -		/* -		 * On family 10h BIOS may not have properly enabled WC+ support, -		 * causing it to be converted to CD memtype. This may result in -		 * performance degradation for certain nested-paging guests. -		 * Prevent this conversion by clearing bit 24 in -		 * MSR_AMD64_BU_CFG2. -		 * -		 * NOTE: we want to use the _safe accessors so as not to #GP kvm -		 * guests on older kvm hosts. -		 */ -		msr_clear_bit(MSR_AMD64_BU_CFG2, 24); - -		if (cpu_has_amd_erratum(c, amd_erratum_383)) -			set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); -	} -  	if (cpu_has_amd_erratum(c, amd_erratum_400))  		set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); @@ -741,11 +724,6 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)  }  #endif -static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) -{ -	tlb_flushall_shift = 6; -} -  static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)  {  	u32 ebx, eax, ecx, edx; @@ -793,8 +771,6 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)  		tlb_lli_2m[ENTRIES] = eax & mask;  	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; - -	cpu_set_tlb_flushall_shift(c);  }  static const struct cpu_dev amd_cpu_dev = {  |