diff options
Diffstat (limited to 'kernel/smp.c')
| -rw-r--r-- | kernel/smp.c | 62 | 
1 files changed, 52 insertions, 10 deletions
| diff --git a/kernel/smp.c b/kernel/smp.c index 4ec30e069987..9910744f0856 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -194,23 +194,52 @@ void generic_smp_call_function_interrupt(void)  	 */  	list_for_each_entry_rcu(data, &call_function.queue, csd.list) {  		int refs; +		void (*func) (void *info); -		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) +		/* +		 * Since we walk the list without any locks, we might +		 * see an entry that was completed, removed from the +		 * list and is in the process of being reused. +		 * +		 * We must check that the cpu is in the cpumask before +		 * checking the refs, and both must be set before +		 * executing the callback on this cpu. +		 */ + +		if (!cpumask_test_cpu(cpu, data->cpumask)) +			continue; + +		smp_rmb(); + +		if (atomic_read(&data->refs) == 0)  			continue; +		func = data->csd.func;			/* for later warn */  		data->csd.func(data->csd.info); +		/* +		 * If the cpu mask is not still set then it enabled interrupts, +		 * we took another smp interrupt, and executed the function +		 * twice on this cpu.  In theory that copy decremented refs. +		 */ +		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { +			WARN(1, "%pS enabled interrupts and double executed\n", +			     func); +			continue; +		} +  		refs = atomic_dec_return(&data->refs);  		WARN_ON(refs < 0); -		if (!refs) { -			raw_spin_lock(&call_function.lock); -			list_del_rcu(&data->csd.list); -			raw_spin_unlock(&call_function.lock); -		}  		if (refs)  			continue; +		WARN_ON(!cpumask_empty(data->cpumask)); + +		raw_spin_lock(&call_function.lock); +		list_del_rcu(&data->csd.list); +		raw_spin_unlock(&call_function.lock); +  		csd_unlock(&data->csd);  	} @@ -430,7 +459,7 @@ void smp_call_function_many(const struct cpumask *mask,  	 * can't happen.  	 */  	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() -		     && !oops_in_progress); +		     && !oops_in_progress && !early_boot_irqs_disabled);  	/* So, what's a CPU they want? Ignoring this one. */  	cpu = cpumask_first_and(mask, cpu_online_mask); @@ -454,11 +483,21 @@ void smp_call_function_many(const struct cpumask *mask,  	data = &__get_cpu_var(cfd_data);  	csd_lock(&data->csd); +	BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));  	data->csd.func = func;  	data->csd.info = info;  	cpumask_and(data->cpumask, mask, cpu_online_mask);  	cpumask_clear_cpu(this_cpu, data->cpumask); + +	/* +	 * To ensure the interrupt handler gets an complete view +	 * we order the cpumask and refs writes and order the read +	 * of them in the interrupt handler.  In addition we may +	 * only clear our own cpu bit from the mask. +	 */ +	smp_wmb(); +  	atomic_set(&data->refs, cpumask_weight(data->cpumask));  	raw_spin_lock_irqsave(&call_function.lock, flags); @@ -533,17 +572,20 @@ void ipi_call_unlock_irq(void)  #endif /* USE_GENERIC_SMP_HELPERS */  /* - * Call a function on all processors + * Call a function on all processors.  May be used during early boot while + * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead + * of local_irq_disable/enable().   */  int on_each_cpu(void (*func) (void *info), void *info, int wait)  { +	unsigned long flags;  	int ret = 0;  	preempt_disable();  	ret = smp_call_function(func, info, wait); -	local_irq_disable(); +	local_irq_save(flags);  	func(info); -	local_irq_enable(); +	local_irq_restore(flags);  	preempt_enable();  	return ret;  } |