diff options
Diffstat (limited to 'kernel/irq_work.c')
| -rw-r--r-- | kernel/irq_work.c | 75 | 
1 files changed, 42 insertions, 33 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 6b7cdf17ccf8..73288914ed5e 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void)  	 */  } -/* - * Enqueue the irq_work @work on @cpu unless it's already pending - * somewhere. - * - * Can be re-enqueued while the callback is still in progress. - */ -bool irq_work_queue_on(struct irq_work *work, int cpu) +/* Enqueue on current CPU, work must already be claimed and preempt disabled */ +static void __irq_work_queue_local(struct irq_work *work)  { -	/* All work should have been flushed before going offline */ -	WARN_ON_ONCE(cpu_is_offline(cpu)); - -#ifdef CONFIG_SMP - -	/* Arch remote IPI send/receive backend aren't NMI safe */ -	WARN_ON_ONCE(in_nmi()); +	/* If the work is "lazy", handle it from next tick if any */ +	if (work->flags & IRQ_WORK_LAZY) { +		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && +		    tick_nohz_tick_stopped()) +			arch_irq_work_raise(); +	} else { +		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) +			arch_irq_work_raise(); +	} +} +/* Enqueue the irq work @work on the current CPU */ +bool irq_work_queue(struct irq_work *work) +{  	/* Only queue if not already pending */  	if (!irq_work_claim(work))  		return false; -	if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) -		arch_send_call_function_single_ipi(cpu); - -#else /* #ifdef CONFIG_SMP */ -	irq_work_queue(work); -#endif /* #else #ifdef CONFIG_SMP */ +	/* Queue the entry and raise the IPI if needed. */ +	preempt_disable(); +	__irq_work_queue_local(work); +	preempt_enable();  	return true;  } +EXPORT_SYMBOL_GPL(irq_work_queue); -/* Enqueue the irq work @work on the current CPU */ -bool irq_work_queue(struct irq_work *work) +/* + * Enqueue the irq_work @work on @cpu unless it's already pending + * somewhere. + * + * Can be re-enqueued while the callback is still in progress. + */ +bool irq_work_queue_on(struct irq_work *work, int cpu)  { +#ifndef CONFIG_SMP +	return irq_work_queue(work); + +#else /* CONFIG_SMP: */ +	/* All work should have been flushed before going offline */ +	WARN_ON_ONCE(cpu_is_offline(cpu)); +  	/* Only queue if not already pending */  	if (!irq_work_claim(work))  		return false; -	/* Queue the entry and raise the IPI if needed. */  	preempt_disable(); - -	/* If the work is "lazy", handle it from next tick if any */ -	if (work->flags & IRQ_WORK_LAZY) { -		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && -		    tick_nohz_tick_stopped()) -			arch_irq_work_raise(); +	if (cpu != smp_processor_id()) { +		/* Arch remote IPI send/receive backend aren't NMI safe */ +		WARN_ON_ONCE(in_nmi()); +		if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) +			arch_send_call_function_single_ipi(cpu);  	} else { -		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) -			arch_irq_work_raise(); +		__irq_work_queue_local(work);  	} -  	preempt_enable();  	return true; +#endif /* CONFIG_SMP */  } -EXPORT_SYMBOL_GPL(irq_work_queue); +  bool irq_work_needs_cpu(void)  {  |