diff options
| author | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
| commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
| tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /drivers/usb/host/xhci.c | |
| parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
| parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'drivers/usb/host/xhci.c')
| -rw-r--r-- | drivers/usb/host/xhci.c | 209 | 
1 files changed, 8 insertions, 201 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6183ce8574b1..78790dc13c5f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -9,6 +9,7 @@   */  #include <linux/pci.h> +#include <linux/iommu.h>  #include <linux/iopoll.h>  #include <linux/irq.h>  #include <linux/log2.h> @@ -228,6 +229,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)  static void xhci_zero_64b_regs(struct xhci_hcd *xhci)  {  	struct device *dev = xhci_to_hcd(xhci)->self.sysdev; +	struct iommu_domain *domain;  	int err, i;  	u64 val;  	u32 intrs; @@ -246,7 +248,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)  	 * an iommu. Doing anything when there is no iommu is definitely  	 * unsafe...  	 */ -	if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev)) +	domain = iommu_get_domain_for_dev(dev); +	if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain || +	    domain->type == IOMMU_DOMAIN_IDENTITY)  		return;  	xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); @@ -318,192 +322,6 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir)  	return 0;  } -#ifdef CONFIG_USB_PCI -/* - * Set up MSI - */ -static int xhci_setup_msi(struct xhci_hcd *xhci) -{ -	int ret; -	/* -	 * TODO:Check with MSI Soc for sysdev -	 */ -	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); - -	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); -	if (ret < 0) { -		xhci_dbg_trace(xhci, trace_xhci_dbg_init, -				"failed to allocate MSI entry"); -		return ret; -	} - -	ret = request_irq(pdev->irq, xhci_msi_irq, -				0, "xhci_hcd", xhci_to_hcd(xhci)); -	if (ret) { -		xhci_dbg_trace(xhci, trace_xhci_dbg_init, -				"disable MSI interrupt"); -		pci_free_irq_vectors(pdev); -	} - -	return ret; -} - -/* - * Set up MSI-X - */ -static int xhci_setup_msix(struct xhci_hcd *xhci) -{ -	int i, ret; -	struct usb_hcd *hcd = xhci_to_hcd(xhci); -	struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - -	/* -	 * calculate number of msi-x vectors supported. -	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, -	 *   with max number of interrupters based on the xhci HCSPARAMS1. -	 * - num_online_cpus: maximum msi-x vectors per CPUs core. -	 *   Add additional 1 vector to ensure always available interrupt. -	 */ -	xhci->msix_count = min(num_online_cpus() + 1, -				HCS_MAX_INTRS(xhci->hcs_params1)); - -	ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count, -			PCI_IRQ_MSIX); -	if (ret < 0) { -		xhci_dbg_trace(xhci, trace_xhci_dbg_init, -				"Failed to enable MSI-X"); -		return ret; -	} - -	for (i = 0; i < xhci->msix_count; i++) { -		ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0, -				"xhci_hcd", xhci_to_hcd(xhci)); -		if (ret) -			goto disable_msix; -	} - -	hcd->msix_enabled = 1; -	return ret; - -disable_msix: -	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); -	while (--i >= 0) -		free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); -	pci_free_irq_vectors(pdev); -	return ret; -} - -/* Free any IRQs and disable MSI-X */ -static void xhci_cleanup_msix(struct xhci_hcd *xhci) -{ -	struct usb_hcd *hcd = xhci_to_hcd(xhci); -	struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - -	if (xhci->quirks & XHCI_PLAT) -		return; - -	/* return if using legacy interrupt */ -	if (hcd->irq > 0) -		return; - -	if (hcd->msix_enabled) { -		int i; - -		for (i = 0; i < xhci->msix_count; i++) -			free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); -	} else { -		free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci)); -	} - -	pci_free_irq_vectors(pdev); -	hcd->msix_enabled = 0; -} - -static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) -{ -	struct usb_hcd *hcd = xhci_to_hcd(xhci); - -	if (hcd->msix_enabled) { -		struct pci_dev *pdev = to_pci_dev(hcd->self.controller); -		int i; - -		for (i = 0; i < xhci->msix_count; i++) -			synchronize_irq(pci_irq_vector(pdev, i)); -	} -} - -static int xhci_try_enable_msi(struct usb_hcd *hcd) -{ -	struct xhci_hcd *xhci = hcd_to_xhci(hcd); -	struct pci_dev  *pdev; -	int ret; - -	/* The xhci platform device has set up IRQs through usb_add_hcd. */ -	if (xhci->quirks & XHCI_PLAT) -		return 0; - -	pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); -	/* -	 * Some Fresco Logic host controllers advertise MSI, but fail to -	 * generate interrupts.  Don't even try to enable MSI. -	 */ -	if (xhci->quirks & XHCI_BROKEN_MSI) -		goto legacy_irq; - -	/* unregister the legacy interrupt */ -	if (hcd->irq) -		free_irq(hcd->irq, hcd); -	hcd->irq = 0; - -	ret = xhci_setup_msix(xhci); -	if (ret) -		/* fall back to msi*/ -		ret = xhci_setup_msi(xhci); - -	if (!ret) { -		hcd->msi_enabled = 1; -		return 0; -	} - -	if (!pdev->irq) { -		xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); -		return -EINVAL; -	} - - legacy_irq: -	if (!strlen(hcd->irq_descr)) -		snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", -			 hcd->driver->description, hcd->self.busnum); - -	/* fall back to legacy interrupt*/ -	ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, -			hcd->irq_descr, hcd); -	if (ret) { -		xhci_err(xhci, "request interrupt %d failed\n", -				pdev->irq); -		return ret; -	} -	hcd->irq = pdev->irq; -	return 0; -} - -#else - -static inline int xhci_try_enable_msi(struct usb_hcd *hcd) -{ -	return 0; -} - -static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) -{ -} - -static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) -{ -} - -#endif -  static void compliance_mode_recovery(struct timer_list *t)  {  	struct xhci_hcd *xhci; @@ -701,10 +519,6 @@ int xhci_run(struct usb_hcd *hcd)  	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); -	ret = xhci_try_enable_msi(hcd); -	if (ret) -		return ret; -  	temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);  	temp_64 &= ~ERST_PTR_MASK;  	xhci_dbg_trace(xhci, trace_xhci_dbg_init, @@ -754,7 +568,7 @@ EXPORT_SYMBOL_GPL(xhci_run);   * Disable device contexts, disable IRQs, and quiesce the HC.   * Reset the HC, finish any completed transactions, and cleanup memory.   */ -static void xhci_stop(struct usb_hcd *hcd) +void xhci_stop(struct usb_hcd *hcd)  {  	u32 temp;  	struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -777,8 +591,6 @@ static void xhci_stop(struct usb_hcd *hcd)  	xhci_reset(xhci, XHCI_RESET_SHORT_USEC);  	spin_unlock_irq(&xhci->lock); -	xhci_cleanup_msix(xhci); -  	/* Deleting Compliance Mode Recovery Timer */  	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&  			(!(xhci_all_ports_seen_u0(xhci)))) { @@ -805,6 +617,7 @@ static void xhci_stop(struct usb_hcd *hcd)  			readl(&xhci->op_regs->status));  	mutex_unlock(&xhci->mutex);  } +EXPORT_SYMBOL_GPL(xhci_stop);  /*   * Shutdown HC (not bus-specific) @@ -846,8 +659,6 @@ void xhci_shutdown(struct usb_hcd *hcd)  	spin_unlock_irq(&xhci->lock); -	xhci_cleanup_msix(xhci); -  	xhci_dbg_trace(xhci, trace_xhci_dbg_init,  			"xhci_shutdown completed - status = %x",  			readl(&xhci->op_regs->status)); @@ -1139,10 +950,6 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)  				__func__);  	} -	/* step 5: remove core well power */ -	/* synchronize irq when using MSI-X */ -	xhci_msix_sync_irqs(xhci); -  	return rc;  }  EXPORT_SYMBOL_GPL(xhci_suspend); @@ -1246,7 +1053,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)  		spin_unlock_irq(&xhci->lock);  		if (retval)  			return retval; -		xhci_cleanup_msix(xhci);  		xhci_dbg(xhci, "// Disabling event ring interrupts\n");  		temp = readl(&xhci->op_regs->status); @@ -4438,6 +4244,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,  	if (!virt_dev || max_exit_latency == virt_dev->current_mel) {  		spin_unlock_irqrestore(&xhci->lock, flags); +		xhci_free_command(xhci, command);  		return 0;  	}  |