diff options
Diffstat (limited to 'kernel/irq/manage.c')
| -rw-r--r-- | kernel/irq/manage.c | 70 | 
1 files changed, 51 insertions, 19 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 1753486b440c..7eee98c38f25 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -18,6 +18,7 @@  #include <linux/sched.h>  #include <linux/sched/rt.h>  #include <linux/sched/task.h> +#include <linux/sched/isolation.h>  #include <uapi/linux/sched/types.h>  #include <linux/task_work.h> @@ -217,7 +218,45 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,  	if (!chip || !chip->irq_set_affinity)  		return -EINVAL; -	ret = chip->irq_set_affinity(data, mask, force); +	/* +	 * If this is a managed interrupt and housekeeping is enabled on +	 * it check whether the requested affinity mask intersects with +	 * a housekeeping CPU. If so, then remove the isolated CPUs from +	 * the mask and just keep the housekeeping CPU(s). This prevents +	 * the affinity setter from routing the interrupt to an isolated +	 * CPU to avoid that I/O submitted from a housekeeping CPU causes +	 * interrupts on an isolated one. +	 * +	 * If the masks do not intersect or include online CPU(s) then +	 * keep the requested mask. The isolated target CPUs are only +	 * receiving interrupts when the I/O operation was submitted +	 * directly from them. +	 * +	 * If all housekeeping CPUs in the affinity mask are offline, the +	 * interrupt will be migrated by the CPU hotplug code once a +	 * housekeeping CPU which belongs to the affinity mask comes +	 * online. +	 */ +	if (irqd_affinity_is_managed(data) && +	    housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { +		const struct cpumask *hk_mask, *prog_mask; + +		static DEFINE_RAW_SPINLOCK(tmp_mask_lock); +		static struct cpumask tmp_mask; + +		hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); + +		raw_spin_lock(&tmp_mask_lock); +		cpumask_and(&tmp_mask, mask, hk_mask); +		if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) +			prog_mask = mask; +		else +			prog_mask = &tmp_mask; +		ret = chip->irq_set_affinity(data, prog_mask, force); +		raw_spin_unlock(&tmp_mask_lock); +	} else { +		ret = chip->irq_set_affinity(data, mask, force); +	}  	switch (ret) {  	case IRQ_SET_MASK_OK:  	case IRQ_SET_MASK_OK_DONE: @@ -442,23 +481,9 @@ int irq_setup_affinity(struct irq_desc *desc)  {  	return irq_select_affinity(irq_desc_get_irq(desc));  } -#endif - -/* - * Called when a bogus affinity is set via /proc/irq - */ -int irq_select_affinity_usr(unsigned int irq) -{ -	struct irq_desc *desc = irq_to_desc(irq); -	unsigned long flags; -	int ret; +#endif /* CONFIG_AUTO_IRQ_AFFINITY */ +#endif /* CONFIG_SMP */ -	raw_spin_lock_irqsave(&desc->lock, flags); -	ret = irq_setup_affinity(desc); -	raw_spin_unlock_irqrestore(&desc->lock, flags); -	return ret; -} -#endif  /**   *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt @@ -692,6 +717,13 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)   *   *	Wakeup mode lets this IRQ wake the system from sleep   *	states like "suspend to RAM". + * + *	Note: irq enable/disable state is completely orthogonal + *	to the enable/disable state of irq wake. An irq can be + *	disabled with disable_irq() and still wake the system as + *	long as the irq has wake enabled. If this does not hold, + *	then the underlying irq chip and the related driver need + *	to be investigated.   */  int irq_set_irq_wake(unsigned int irq, unsigned int on)  { @@ -1500,8 +1532,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)  		 * has. The type flags are unreliable as the  		 * underlying chip implementation can override them.  		 */ -		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", -		       irq); +		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n", +		       new->name, irq);  		ret = -EINVAL;  		goto out_unlock;  	}  |