diff options
Diffstat (limited to 'kernel/irq/msi.c')
| -rw-r--r-- | kernel/irq/msi.c | 64 | 
1 files changed, 56 insertions, 8 deletions
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index edb987b2c58d..2f3c4f5382cc 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -339,6 +339,40 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,  	return ret;  } +/* + * Carefully check whether the device can use reservation mode. If + * reservation mode is enabled then the early activation will assign a + * dummy vector to the device. If the PCI/MSI device does not support + * masking of the entry then this can result in spurious interrupts when + * the device driver is not absolutely careful. But even then a malfunction + * of the hardware could result in a spurious interrupt on the dummy vector + * and render the device unusable. If the entry can be masked then the core + * logic will prevent the spurious interrupt and reservation mode can be + * used. For now reservation mode is restricted to PCI/MSI. + */ +static bool msi_check_reservation_mode(struct irq_domain *domain, +				       struct msi_domain_info *info, +				       struct device *dev) +{ +	struct msi_desc *desc; + +	if (domain->bus_token != DOMAIN_BUS_PCI_MSI) +		return false; + +	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) +		return false; + +	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask) +		return false; + +	/* +	 * Checking the first MSI descriptor is sufficient. MSIX supports +	 * masking and MSI does so when the maskbit is set. +	 */ +	desc = first_msi_entry(dev); +	return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; +} +  /**   * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain   * @domain:	The domain to allocate from @@ -353,9 +387,11 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,  {  	struct msi_domain_info *info = domain->host_data;  	struct msi_domain_ops *ops = info->ops; -	msi_alloc_info_t arg; +	struct irq_data *irq_data;  	struct msi_desc *desc; +	msi_alloc_info_t arg;  	int i, ret, virq; +	bool can_reserve;  	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);  	if (ret) @@ -385,6 +421,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,  	if (ops->msi_finish)  		ops->msi_finish(&arg, 0); +	can_reserve = msi_check_reservation_mode(domain, info, dev); +  	for_each_msi_entry(desc, dev) {  		virq = desc->irq;  		if (desc->nvec_used == 1) @@ -397,15 +435,25 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,  		 * the MSI entries before the PCI layer enables MSI in the  		 * card. Otherwise the card latches a random msi message.  		 */ -		if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { -			struct irq_data *irq_data; +		if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) +			continue; +		irq_data = irq_domain_get_irq_data(domain, desc->irq); +		if (!can_reserve) +			irqd_clr_can_reserve(irq_data); +		ret = irq_domain_activate_irq(irq_data, can_reserve); +		if (ret) +			goto cleanup; +	} + +	/* +	 * If these interrupts use reservation mode, clear the activated bit +	 * so request_irq() will assign the final vector. +	 */ +	if (can_reserve) { +		for_each_msi_entry(desc, dev) {  			irq_data = irq_domain_get_irq_data(domain, desc->irq); -			ret = irq_domain_activate_irq(irq_data, true); -			if (ret) -				goto cleanup; -			if (info->flags & MSI_FLAG_MUST_REACTIVATE) -				irqd_clr_activated(irq_data); +			irqd_clr_activated(irq_data);  		}  	}  	return 0;  |