diff options
Diffstat (limited to 'arch/mips/kernel/process.c')
| -rw-r--r-- | arch/mips/kernel/process.c | 123 | 
1 files changed, 71 insertions, 52 deletions
| diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 8d85046adcc8..8fc69891e117 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -29,6 +29,8 @@  #include <linux/kallsyms.h>  #include <linux/random.h>  #include <linux/prctl.h> +#include <linux/nmi.h> +#include <linux/cpu.h>  #include <asm/asm.h>  #include <asm/bootinfo.h> @@ -655,28 +657,42 @@ unsigned long arch_align_stack(unsigned long sp)  	return sp & ALMASK;  } -static void arch_dump_stack(void *info) +static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); +static struct cpumask backtrace_csd_busy; + +static void handle_backtrace(void *info)  { -	struct pt_regs *regs; +	nmi_cpu_backtrace(get_irq_regs()); +	cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); +} -	regs = get_irq_regs(); +static void raise_backtrace(cpumask_t *mask) +{ +	call_single_data_t *csd; +	int cpu; -	if (regs) -		show_regs(regs); +	for_each_cpu(cpu, mask) { +		/* +		 * If we previously sent an IPI to the target CPU & it hasn't +		 * cleared its bit in the busy cpumask then it didn't handle +		 * our previous IPI & it's not safe for us to reuse the +		 * call_single_data_t. +		 */ +		if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { +			pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", +				cpu); +			continue; +		} -	dump_stack(); +		csd = &per_cpu(backtrace_csd, cpu); +		csd->func = handle_backtrace; +		smp_call_function_single_async(cpu, csd); +	}  }  void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)  { -	long this_cpu = get_cpu(); - -	if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) -		dump_stack(); - -	smp_call_function_many(mask, arch_dump_stack, NULL, 1); - -	put_cpu(); +	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);  }  int mips_get_process_fp_mode(struct task_struct *task) @@ -691,19 +707,25 @@ int mips_get_process_fp_mode(struct task_struct *task)  	return value;  } -static void prepare_for_fp_mode_switch(void *info) +static long prepare_for_fp_mode_switch(void *unused)  { -	struct mm_struct *mm = info; - -	if (current->mm == mm) -		lose_fpu(1); +	/* +	 * This is icky, but we use this to simply ensure that all CPUs have +	 * context switched, regardless of whether they were previously running +	 * kernel or user code. This ensures that no CPU currently has its FPU +	 * enabled, or is about to attempt to enable it through any path other +	 * than enable_restore_fp_context() which will wait appropriately for +	 * fp_mode_switching to be zero. +	 */ +	return 0;  }  int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)  {  	const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;  	struct task_struct *t; -	int max_users; +	struct cpumask process_cpus; +	int cpu;  	/* If nothing to change, return right away, successfully.  */  	if (value == mips_get_process_fp_mode(task)) @@ -736,35 +758,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)  	if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)  		return -EOPNOTSUPP; -	/* Proceed with the mode switch */ -	preempt_disable(); - -	/* Save FP & vector context, then disable FPU & MSA */ -	if (task->signal == current->signal) -		lose_fpu(1); - -	/* Prevent any threads from obtaining live FP context */ -	atomic_set(&task->mm->context.fp_mode_switching, 1); -	smp_mb__after_atomic(); - -	/* -	 * If there are multiple online CPUs then force any which are running -	 * threads in this process to lose their FPU context, which they can't -	 * regain until fp_mode_switching is cleared later. -	 */ -	if (num_online_cpus() > 1) { -		/* No need to send an IPI for the local CPU */ -		max_users = (task->mm == current->mm) ? 1 : 0; - -		if (atomic_read(¤t->mm->mm_users) > max_users) -			smp_call_function(prepare_for_fp_mode_switch, -					  (void *)current->mm, 1); -	} - -	/* -	 * There are now no threads of the process with live FP context, so it -	 * is safe to proceed with the FP mode switch. -	 */ +	/* Indicate the new FP mode in each thread */  	for_each_thread(task, t) {  		/* Update desired FP register width */  		if (value & PR_FP_MODE_FR) { @@ -781,9 +775,34 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)  			clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);  	} -	/* Allow threads to use FP again */ -	atomic_set(&task->mm->context.fp_mode_switching, 0); -	preempt_enable(); +	/* +	 * We need to ensure that all threads in the process have switched mode +	 * before returning, in order to allow userland to not worry about +	 * races. We can do this by forcing all CPUs that any thread in the +	 * process may be running on to schedule something else - in this case +	 * prepare_for_fp_mode_switch(). +	 * +	 * We begin by generating a mask of all CPUs that any thread in the +	 * process may be running on. +	 */ +	cpumask_clear(&process_cpus); +	for_each_thread(task, t) +		cpumask_set_cpu(task_cpu(t), &process_cpus); + +	/* +	 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs. +	 * +	 * The CPUs may have rescheduled already since we switched mode or +	 * generated the cpumask, but that doesn't matter. If the task in this +	 * process is scheduled out then our scheduling +	 * prepare_for_fp_mode_switch() will simply be redundant. If it's +	 * scheduled in then it will already have picked up the new FP mode +	 * whilst doing so. +	 */ +	get_online_cpus(); +	for_each_cpu_and(cpu, &process_cpus, cpu_online_mask) +		work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL); +	put_online_cpus();  	wake_up_var(&task->mm->context.fp_mode_switching); |