diff options
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck/mce.c')
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 22 | 
1 files changed, 16 insertions, 6 deletions
| diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 953b3ce92dcc..8cb3c02980cf 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -270,7 +270,7 @@ static void print_mce(struct mce *m)  {  	__print_mce(m); -	if (m->cpuvendor != X86_VENDOR_AMD) +	if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)  		pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");  } @@ -508,9 +508,9 @@ static int mce_usable_address(struct mce *m)  bool mce_is_memory_error(struct mce *m)  { -	if (m->cpuvendor == X86_VENDOR_AMD) { +	if (m->cpuvendor == X86_VENDOR_AMD || +	    m->cpuvendor == X86_VENDOR_HYGON) {  		return amd_mce_is_memory_error(m); -  	} else if (m->cpuvendor == X86_VENDOR_INTEL) {  		/*  		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes @@ -539,6 +539,9 @@ static bool mce_is_correctable(struct mce *m)  	if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)  		return false; +	if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED) +		return false; +  	if (m->status & MCI_STATUS_UC)  		return false; @@ -1315,7 +1318,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)  		local_irq_disable();  		ist_end_non_atomic();  	} else { -		if (!fixup_exception(regs, X86_TRAP_MC)) +		if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))  			mce_panic("Failed kernel mode recovery", &m, NULL);  	} @@ -1705,7 +1708,7 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)   */  static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)  { -	if (c->x86_vendor == X86_VENDOR_AMD) { +	if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {  		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);  		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);  		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA); @@ -1746,6 +1749,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)  		mce_amd_feature_init(c);  		break;  		} + +	case X86_VENDOR_HYGON: +		mce_hygon_feature_init(c); +		break; +  	case X86_VENDOR_CENTAUR:  		mce_centaur_feature_init(c);  		break; @@ -1971,12 +1979,14 @@ static void mce_disable_error_reporting(void)  static void vendor_disable_error_reporting(void)  {  	/* -	 * Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide. +	 * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs +	 * are socket-wide.  	 * Disabling them for just a single offlined CPU is bad, since it will  	 * inhibit reporting for all shared resources on the socket like the  	 * last level cache (LLC), the integrated memory controller (iMC), etc.  	 */  	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || +	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||  	    boot_cpu_data.x86_vendor == X86_VENDOR_AMD)  		return; |