diff options
Diffstat (limited to 'arch/mips/kernel/traps.c')
| -rw-r--r-- | arch/mips/kernel/traps.c | 78 | 
1 files changed, 44 insertions, 34 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index bf14da9f3e33..4a1712b5abdf 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -56,6 +56,7 @@  #include <asm/pgtable.h>  #include <asm/ptrace.h>  #include <asm/sections.h> +#include <asm/siginfo.h>  #include <asm/tlbdebug.h>  #include <asm/traps.h>  #include <asm/uaccess.h> @@ -144,7 +145,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)  	if (!task)  		task = current; -	if (raw_show_trace || !__kernel_text_address(pc)) { +	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {  		show_raw_backtrace(sp);  		return;  	} @@ -398,11 +399,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)  	if (in_interrupt())  		panic("Fatal exception in interrupt"); -	if (panic_on_oops) { -		printk(KERN_EMERG "Fatal exception: panic in 5 seconds"); -		ssleep(5); +	if (panic_on_oops)  		panic("Fatal exception"); -	}  	if (regs && kexec_should_crash(current))  		crash_kexec(regs); @@ -871,7 +869,7 @@ out:  	exception_exit(prev_state);  } -void do_trap_or_bp(struct pt_regs *regs, unsigned int code, +void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,  	const char *str)  {  	siginfo_t info = { 0 }; @@ -928,7 +926,13 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,  	default:  		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);  		die_if_kernel(b, regs); -		force_sig(SIGTRAP, current); +		if (si_code) { +			info.si_signo = SIGTRAP; +			info.si_code = si_code; +			force_sig_info(SIGTRAP, &info, current); +		} else { +			force_sig(SIGTRAP, current); +		}  	}  } @@ -1012,7 +1016,7 @@ asmlinkage void do_bp(struct pt_regs *regs)  		break;  	} -	do_trap_or_bp(regs, bcode, "Break"); +	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");  out:  	set_fs(seg); @@ -1054,7 +1058,7 @@ asmlinkage void do_tr(struct pt_regs *regs)  			tcode = (opcode >> 6) & ((1 << 10) - 1);  	} -	do_trap_or_bp(regs, tcode, "Trap"); +	do_trap_or_bp(regs, tcode, 0, "Trap");  out:  	set_fs(seg); @@ -1115,19 +1119,7 @@ no_r2_instr:  	if (unlikely(compute_return_epc(regs) < 0))  		goto out; -	if (get_isa16_mode(regs->cp0_epc)) { -		unsigned short mmop[2] = { 0 }; - -		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0)) -			status = SIGSEGV; -		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0)) -			status = SIGSEGV; -		opcode = mmop[0]; -		opcode = (opcode << 16) | mmop[1]; - -		if (status < 0) -			status = simulate_rdhwr_mm(regs, opcode); -	} else { +	if (!get_isa16_mode(regs->cp0_epc)) {  		if (unlikely(get_user(opcode, epc) < 0))  			status = SIGSEGV; @@ -1142,6 +1134,18 @@ no_r2_instr:  		if (status < 0)  			status = simulate_fp(regs, opcode, old_epc, old31); +	} else if (cpu_has_mmips) { +		unsigned short mmop[2] = { 0 }; + +		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0)) +			status = SIGSEGV; +		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0)) +			status = SIGSEGV; +		opcode = mmop[0]; +		opcode = (opcode << 16) | mmop[1]; + +		if (status < 0) +			status = simulate_rdhwr_mm(regs, opcode);  	}  	if (status < 0) @@ -1242,7 +1246,7 @@ static int enable_restore_fp_context(int msa)  		err = init_fpu();  		if (msa && !err) {  			enable_msa(); -			_init_msa_upper(); +			init_msa_upper();  			set_thread_flag(TIF_USEDMSA);  			set_thread_flag(TIF_MSA_CTX_LIVE);  		} @@ -1305,7 +1309,7 @@ static int enable_restore_fp_context(int msa)  	 */  	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);  	if (!prior_msa && was_fpu_owner) { -		_init_msa_upper(); +		init_msa_upper();  		goto out;  	} @@ -1322,7 +1326,7 @@ static int enable_restore_fp_context(int msa)  		 * of each vector register such that it cannot see data left  		 * behind by another task.  		 */ -		_init_msa_upper(); +		init_msa_upper();  	} else {  		/* We need to restore the vector context. */  		restore_msa(current); @@ -1349,7 +1353,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)  	unsigned long fcr31;  	unsigned int cpid;  	int status, err; -	unsigned long __maybe_unused flags;  	int sig;  	prev_state = exception_enter(); @@ -1492,17 +1495,15 @@ asmlinkage void do_mdmx(struct pt_regs *regs)   */  asmlinkage void do_watch(struct pt_regs *regs)  { +	siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };  	enum ctx_state prev_state; -	u32 cause;  	prev_state = exception_enter();  	/*  	 * Clear WP (bit 22) bit of cause register so we don't loop  	 * forever.  	 */ -	cause = read_c0_cause(); -	cause &= ~(1 << 22); -	write_c0_cause(cause); +	clear_c0_cause(CAUSEF_WP);  	/*  	 * If the current thread has the watch registers loaded, save @@ -1512,7 +1513,7 @@ asmlinkage void do_watch(struct pt_regs *regs)  	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {  		mips_read_watch_registers();  		local_irq_enable(); -		force_sig(SIGTRAP, current); +		force_sig_info(SIGTRAP, &info, current);  	} else {  		mips_clear_watch_registers();  		local_irq_enable(); @@ -1639,6 +1640,7 @@ static inline void parity_protection_init(void)  	case CPU_P5600:  	case CPU_QEMU_GENERIC:  	case CPU_I6400: +	case CPU_P6600:  		{  #define ERRCTL_PE	0x80000000  #define ERRCTL_L2P	0x00800000 @@ -1769,7 +1771,8 @@ asmlinkage void do_ftlb(void)  	/* For the moment, report the problem and hang. */  	if ((cpu_has_mips_r2_r6) && -	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { +	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) || +	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {  		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",  		       read_c0_ecc());  		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); @@ -2111,6 +2114,13 @@ void per_cpu_trap_init(bool is_boot_cpu)  	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt  	 */  	if (cpu_has_mips_r2_r6) { +		/* +		 * We shouldn't trust a secondary core has a sane EBASE register +		 * so use the one calculated by the boot CPU. +		 */ +		if (!is_boot_cpu) +			write_c0_ebase(ebase); +  		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;  		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;  		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; @@ -2126,7 +2136,7 @@ void per_cpu_trap_init(bool is_boot_cpu)  	}  	if (!cpu_data[cpu].asid_cache) -		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; +		cpu_data[cpu].asid_cache = asid_first_version(cpu);  	atomic_inc(&init_mm.mm_count);  	current->active_mm = &init_mm; @@ -2214,7 +2224,7 @@ void __init trap_init(void)  	/*  	 * Copy the generic exception handlers to their final destination. -	 * This will be overriden later as suitable for a particular +	 * This will be overridden later as suitable for a particular  	 * configuration.  	 */  	set_handler(0x180, &except_vec3_generic, 0x80);  |