diff options
Diffstat (limited to 'arch/x86/kernel/smp.c')
| -rw-r--r-- | arch/x86/kernel/smp.c | 88 | 
1 files changed, 28 insertions, 60 deletions
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 96421f97e75c..b8d4e9c3c070 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -115,46 +115,6 @@  static atomic_t stopping_cpu = ATOMIC_INIT(-1);  static bool smp_no_nmi_ipi = false; -/* - * this function sends a 'reschedule' IPI to another CPU. - * it goes straight through and wastes no time serializing - * anything. Worst case is that we lose a reschedule ... - */ -static void native_smp_send_reschedule(int cpu) -{ -	if (unlikely(cpu_is_offline(cpu))) { -		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); -		return; -	} -	apic->send_IPI(cpu, RESCHEDULE_VECTOR); -} - -void native_send_call_func_single_ipi(int cpu) -{ -	apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); -} - -void native_send_call_func_ipi(const struct cpumask *mask) -{ -	cpumask_var_t allbutself; - -	if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { -		apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); -		return; -	} - -	cpumask_copy(allbutself, cpu_online_mask); -	__cpumask_clear_cpu(smp_processor_id(), allbutself); - -	if (cpumask_equal(mask, allbutself) && -	    cpumask_equal(cpu_online_mask, cpu_callout_mask)) -		apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); -	else -		apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); - -	free_cpumask_var(allbutself); -} -  static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)  {  	/* We are registered on stopping cpu too, avoid spurious NMI */ @@ -179,6 +139,12 @@ asmlinkage __visible void smp_reboot_interrupt(void)  	irq_exit();  } +static int register_stop_handler(void) +{ +	return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, +				    NMI_FLAG_FIRST, "smp_stop"); +} +  static void native_stop_other_cpus(int wait)  {  	unsigned long flags; @@ -209,42 +175,44 @@ static void native_stop_other_cpus(int wait)  		/* sync above data before sending IRQ */  		wmb(); -		apic->send_IPI_allbutself(REBOOT_VECTOR); +		apic_send_IPI_allbutself(REBOOT_VECTOR);  		/* -		 * Don't wait longer than a second if the caller -		 * didn't ask us to wait. +		 * Don't wait longer than a second for IPI completion. The +		 * wait request is not checked here because that would +		 * prevent an NMI shutdown attempt in case that not all +		 * CPUs reach shutdown state.  		 */  		timeout = USEC_PER_SEC; -		while (num_online_cpus() > 1 && (wait || timeout--)) +		while (num_online_cpus() > 1 && timeout--)  			udelay(1);  	} -	 -	/* if the REBOOT_VECTOR didn't work, try with the NMI */ -	if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi))  { -		if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, -					 NMI_FLAG_FIRST, "smp_stop")) -			/* Note: we ignore failures here */ -			/* Hope the REBOOT_IRQ is good enough */ -			goto finish; -		/* sync above data before sending IRQ */ -		wmb(); - -		pr_emerg("Shutting down cpus with NMI\n"); +	/* if the REBOOT_VECTOR didn't work, try with the NMI */ +	if (num_online_cpus() > 1) { +		/* +		 * If NMI IPI is enabled, try to register the stop handler +		 * and send the IPI. In any case try to wait for the other +		 * CPUs to stop. +		 */ +		if (!smp_no_nmi_ipi && !register_stop_handler()) { +			/* Sync above data before sending IRQ */ +			wmb(); -		apic->send_IPI_allbutself(NMI_VECTOR); +			pr_emerg("Shutting down cpus with NMI\n"); +			apic_send_IPI_allbutself(NMI_VECTOR); +		}  		/* -		 * Don't wait longer than a 10 ms if the caller -		 * didn't ask us to wait. +		 * Don't wait longer than 10 ms if the caller didn't +		 * reqeust it. If wait is true, the machine hangs here if +		 * one or more CPUs do not reach shutdown state.  		 */  		timeout = USEC_PER_MSEC * 10;  		while (num_online_cpus() > 1 && (wait || timeout--))  			udelay(1);  	} -finish:  	local_irq_save(flags);  	disable_local_APIC();  	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));  |