diff options
Diffstat (limited to 'drivers/pci/controller/dwc')
| -rw-r--r-- | drivers/pci/controller/dwc/Kconfig | 431 | ||||
| -rw-r--r-- | drivers/pci/controller/dwc/pci-imx6.c | 7 | ||||
| -rw-r--r-- | drivers/pci/controller/dwc/pci-layerscape-ep.c | 1 | ||||
| -rw-r--r-- | drivers/pci/controller/dwc/pcie-designware.c | 10 | ||||
| -rw-r--r-- | drivers/pci/controller/dwc/pcie-qcom.c | 1246 | 
5 files changed, 745 insertions, 950 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 434f6a4f4041..ab96da43e0c2 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -1,6 +1,6 @@  # SPDX-License-Identifier: GPL-2.0 -menu "DesignWare PCI Core Support" +menu "DesignWare-based PCIe controllers"  	depends on PCI  config PCIE_DW @@ -14,88 +14,67 @@ config PCIE_DW_EP  	bool  	select PCIE_DW -config PCI_DRA7XX -	tristate - -config PCI_DRA7XX_HOST -	tristate "TI DRA7xx PCIe controller Host Mode" -	depends on SOC_DRA7XX || COMPILE_TEST -	depends on OF && HAS_IOMEM && TI_PIPE3 +config PCIE_AL +	bool "Amazon Annapurna Labs PCIe controller" +	depends on OF && (ARM64 || COMPILE_TEST)  	depends on PCI_MSI  	select PCIE_DW_HOST -	select PCI_DRA7XX -	default y if SOC_DRA7XX +	select PCI_ECAM  	help -	  Enables support for the PCIe controller in the DRA7xx SoC to work in -	  host mode. There are two instances of PCIe controller in DRA7xx. -	  This controller can work either as EP or RC. In order to enable -	  host-specific features PCI_DRA7XX_HOST must be selected and in order -	  to enable device-specific features PCI_DRA7XX_EP must be selected. -	  This uses the DesignWare core. +	  Say Y here to enable support of the Amazon's Annapurna Labs PCIe +	  controller IP on Amazon SoCs. The PCIe controller uses the DesignWare +	  core plus Annapurna Labs proprietary hardware wrappers. This is +	  required only for DT-based platforms. ACPI platforms with the +	  Annapurna Labs PCIe controller don't need to enable this. -config PCI_DRA7XX_EP -	tristate "TI DRA7xx PCIe controller Endpoint Mode" -	depends on SOC_DRA7XX || COMPILE_TEST -	depends on OF && HAS_IOMEM && TI_PIPE3 -	depends on PCI_ENDPOINT -	select PCIE_DW_EP -	select PCI_DRA7XX +config PCI_MESON +	tristate "Amlogic Meson PCIe controller" +	default m if ARCH_MESON +	depends on PCI_MSI +	select PCIE_DW_HOST  	help -	  Enables support for the PCIe controller in the DRA7xx SoC to work in -	  endpoint mode. There are two instances of PCIe controller in DRA7xx. -	  This controller can work either as EP or RC. In order to enable -	  host-specific features PCI_DRA7XX_HOST must be selected and in order -	  to enable device-specific features PCI_DRA7XX_EP must be selected. -	  This uses the DesignWare core. +	  Say Y here if you want to enable PCI controller support on Amlogic +	  SoCs. The PCI controller on Amlogic is based on DesignWare hardware +	  and therefore the driver re-uses the DesignWare core functions to +	  implement the driver. -config PCIE_DW_PLAT +config PCIE_ARTPEC6  	bool -config PCIE_DW_PLAT_HOST -	bool "Platform bus based DesignWare PCIe Controller - Host mode" +config PCIE_ARTPEC6_HOST +	bool "Axis ARTPEC-6 PCIe controller (host mode)" +	depends on MACH_ARTPEC6 || COMPILE_TEST  	depends on PCI_MSI  	select PCIE_DW_HOST -	select PCIE_DW_PLAT +	select PCIE_ARTPEC6  	help -	  Enables support for the PCIe controller in the Designware IP to -	  work in host mode. There are two instances of PCIe controller in -	  Designware IP. -	  This controller can work either as EP or RC. In order to enable -	  host-specific features PCIE_DW_PLAT_HOST must be selected and in -	  order to enable device-specific features PCI_DW_PLAT_EP must be -	  selected. +	  Enables support for the PCIe controller in the ARTPEC-6 SoC to work in +	  host mode. This uses the DesignWare core. -config PCIE_DW_PLAT_EP -	bool "Platform bus based DesignWare PCIe Controller - Endpoint mode" -	depends on PCI && PCI_MSI +config PCIE_ARTPEC6_EP +	bool "Axis ARTPEC-6 PCIe controller (endpoint mode)" +	depends on MACH_ARTPEC6 || COMPILE_TEST  	depends on PCI_ENDPOINT  	select PCIE_DW_EP -	select PCIE_DW_PLAT +	select PCIE_ARTPEC6  	help -	  Enables support for the PCIe controller in the Designware IP to -	  work in endpoint mode. There are two instances of PCIe controller -	  in Designware IP. -	  This controller can work either as EP or RC. In order to enable -	  host-specific features PCIE_DW_PLAT_HOST must be selected and in -	  order to enable device-specific features PCI_DW_PLAT_EP must be -	  selected. +	  Enables support for the PCIe controller in the ARTPEC-6 SoC to work in +	  endpoint mode. This uses the DesignWare core. -config PCI_EXYNOS -	tristate "Samsung Exynos PCIe controller" -	depends on ARCH_EXYNOS || COMPILE_TEST +config PCIE_BT1 +	tristate "Baikal-T1 PCIe controller" +	depends on MIPS_BAIKAL_T1 || COMPILE_TEST  	depends on PCI_MSI  	select PCIE_DW_HOST  	help -	  Enables support for the PCIe controller in the Samsung Exynos SoCs -	  to work in host mode. The PCI controller is based on the DesignWare -	  hardware and therefore the driver re-uses the DesignWare core -	  functions to implement the driver. +	  Enables support for the PCIe controller in the Baikal-T1 SoC to work +	  in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.  config PCI_IMX6  	bool  config PCI_IMX6_HOST -	bool "Freescale i.MX6/7/8 PCIe controller host mode" +	bool "Freescale i.MX6/7/8 PCIe controller (host mode)"  	depends on ARCH_MXC || COMPILE_TEST  	depends on PCI_MSI  	select PCIE_DW_HOST @@ -107,7 +86,7 @@ config PCI_IMX6_HOST  	  DesignWare core functions to implement the driver.  config PCI_IMX6_EP -	bool "Freescale i.MX6/7/8 PCIe controller endpoint mode" +	bool "Freescale i.MX6/7/8 PCIe controller (endpoint mode)"  	depends on ARCH_MXC || COMPILE_TEST  	depends on PCI_ENDPOINT  	select PCIE_DW_EP @@ -118,43 +97,8 @@ config PCI_IMX6_EP  	  on DesignWare hardware and therefore the driver re-uses the  	  DesignWare core functions to implement the driver. -config PCIE_SPEAR13XX -	bool "STMicroelectronics SPEAr PCIe controller" -	depends on ARCH_SPEAR13XX || COMPILE_TEST -	depends on PCI_MSI -	select PCIE_DW_HOST -	help -	  Say Y here if you want PCIe support on SPEAr13XX SoCs. - -config PCI_KEYSTONE -	bool - -config PCI_KEYSTONE_HOST -	bool "PCI Keystone Host Mode" -	depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST -	depends on PCI_MSI -	select PCIE_DW_HOST -	select PCI_KEYSTONE -	help -	  Enables support for the PCIe controller in the Keystone SoC to -	  work in host mode. The PCI controller on Keystone is based on -	  DesignWare hardware and therefore the driver re-uses the -	  DesignWare core functions to implement the driver. - -config PCI_KEYSTONE_EP -	bool "PCI Keystone Endpoint Mode" -	depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST -	depends on PCI_ENDPOINT -	select PCIE_DW_EP -	select PCI_KEYSTONE -	help -	  Enables support for the PCIe controller in the Keystone SoC to -	  work in endpoint mode. The PCI controller on Keystone is based -	  on DesignWare hardware and therefore the driver re-uses the -	  DesignWare core functions to implement the driver. -  config PCI_LAYERSCAPE -	bool "Freescale Layerscape PCIe controller - Host mode" +	bool "Freescale Layerscape PCIe controller (host mode)"  	depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)  	depends on PCI_MSI  	select PCIE_DW_HOST @@ -167,7 +111,7 @@ config PCI_LAYERSCAPE  	  controller works in RC mode.  config PCI_LAYERSCAPE_EP -	bool "Freescale Layerscape PCIe controller - Endpoint mode" +	bool "Freescale Layerscape PCIe controller (endpoint mode)"  	depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)  	depends on PCI_ENDPOINT  	select PCIE_DW_EP @@ -180,7 +124,7 @@ config PCI_LAYERSCAPE_EP  config PCI_HISI  	depends on OF && (ARM64 || COMPILE_TEST) -	bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers" +	bool "HiSilicon Hip05 and Hip06 SoCs PCIe controller"  	depends on PCI_MSI  	select PCIE_DW_HOST  	select PCI_HOST_COMMON @@ -188,83 +132,26 @@ config PCI_HISI  	  Say Y here if you want PCIe controller support on HiSilicon  	  Hip05 and Hip06 SoCs -config PCIE_QCOM -	bool "Qualcomm PCIe controller" -	depends on OF && (ARCH_QCOM || COMPILE_TEST) -	depends on PCI_MSI -	select PCIE_DW_HOST -	select CRC8 -	help -	  Say Y here to enable PCIe controller support on Qualcomm SoCs. The -	  PCIe controller uses the DesignWare core plus Qualcomm-specific -	  hardware wrappers. - -config PCIE_QCOM_EP -	tristate "Qualcomm PCIe controller - Endpoint mode" -	depends on OF && (ARCH_QCOM || COMPILE_TEST) -	depends on PCI_ENDPOINT -	select PCIE_DW_EP -	help -	  Say Y here to enable support for the PCIe controllers on Qualcomm SoCs -	  to work in endpoint mode. The PCIe controller uses the DesignWare core -	  plus Qualcomm-specific hardware wrappers. - -config PCIE_ARMADA_8K -	bool "Marvell Armada-8K PCIe controller" -	depends on ARCH_MVEBU || COMPILE_TEST -	depends on PCI_MSI -	select PCIE_DW_HOST -	help -	  Say Y here if you want to enable PCIe controller support on -	  Armada-8K SoCs. The PCIe controller on Armada-8K is based on -	  DesignWare hardware and therefore the driver re-uses the -	  DesignWare core functions to implement the driver. - -config PCIE_ARTPEC6 -	bool - -config PCIE_ARTPEC6_HOST -	bool "Axis ARTPEC-6 PCIe controller Host Mode" -	depends on MACH_ARTPEC6 || COMPILE_TEST +config PCIE_KIRIN +	depends on OF && (ARM64 || COMPILE_TEST) +	tristate "HiSilicon Kirin PCIe controller"  	depends on PCI_MSI  	select PCIE_DW_HOST -	select PCIE_ARTPEC6 -	help -	  Enables support for the PCIe controller in the ARTPEC-6 SoC to work in -	  host mode. This uses the DesignWare core. - -config PCIE_ARTPEC6_EP -	bool "Axis ARTPEC-6 PCIe controller Endpoint Mode" -	depends on MACH_ARTPEC6 || COMPILE_TEST -	depends on PCI_ENDPOINT -	select PCIE_DW_EP -	select PCIE_ARTPEC6 +	select REGMAP_MMIO  	help -	  Enables support for the PCIe controller in the ARTPEC-6 SoC to work in -	  endpoint mode. This uses the DesignWare core. +	  Say Y here if you want PCIe controller support +	  on HiSilicon Kirin series SoCs. -config PCIE_BT1 -	tristate "Baikal-T1 PCIe controller" -	depends on MIPS_BAIKAL_T1 || COMPILE_TEST +config PCIE_HISI_STB +	bool "HiSilicon STB PCIe controller" +	depends on ARCH_HISI || COMPILE_TEST  	depends on PCI_MSI  	select PCIE_DW_HOST  	help -	  Enables support for the PCIe controller in the Baikal-T1 SoC to work -	  in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core. - -config PCIE_ROCKCHIP_DW_HOST -	bool "Rockchip DesignWare PCIe controller" -	select PCIE_DW -	select PCIE_DW_HOST -	depends on PCI_MSI -	depends on ARCH_ROCKCHIP || COMPILE_TEST -	depends on OF -	help -	  Enables support for the DesignWare PCIe controller in the -	  Rockchip SoC except RK3399. +	  Say Y here if you want PCIe controller support on HiSilicon STB SoCs  config PCIE_INTEL_GW -	bool "Intel Gateway PCIe host controller support" +	bool "Intel Gateway PCIe controller "  	depends on OF && (X86 || COMPILE_TEST)  	depends on PCI_MSI  	select PCIE_DW_HOST @@ -278,7 +165,7 @@ config PCIE_KEEMBAY  	bool  config PCIE_KEEMBAY_HOST -	bool "Intel Keem Bay PCIe controller - Host mode" +	bool "Intel Keem Bay PCIe controller (host mode)"  	depends on ARCH_KEEMBAY || COMPILE_TEST  	depends on PCI_MSI  	select PCIE_DW_HOST @@ -290,7 +177,7 @@ config PCIE_KEEMBAY_HOST  	  DesignWare core functions.  config PCIE_KEEMBAY_EP -	bool "Intel Keem Bay PCIe controller - Endpoint mode" +	bool "Intel Keem Bay PCIe controller (endpoint mode)"  	depends on ARCH_KEEMBAY || COMPILE_TEST  	depends on PCI_MSI  	depends on PCI_ENDPOINT @@ -302,39 +189,22 @@ config PCIE_KEEMBAY_EP  	  The PCIe controller is based on DesignWare Hardware and uses  	  DesignWare core functions. -config PCIE_KIRIN -	depends on OF && (ARM64 || COMPILE_TEST) -	tristate "HiSilicon Kirin series SoCs PCIe controllers" -	depends on PCI_MSI -	select PCIE_DW_HOST -	help -	  Say Y here if you want PCIe controller support -	  on HiSilicon Kirin series SoCs. - -config PCIE_HISI_STB -	bool "HiSilicon STB SoCs PCIe controllers" -	depends on ARCH_HISI || COMPILE_TEST -	depends on PCI_MSI -	select PCIE_DW_HOST -	help -	  Say Y here if you want PCIe controller support on HiSilicon STB SoCs - -config PCI_MESON -	tristate "MESON PCIe controller" -	default m if ARCH_MESON +config PCIE_ARMADA_8K +	bool "Marvell Armada-8K PCIe controller" +	depends on ARCH_MVEBU || COMPILE_TEST  	depends on PCI_MSI  	select PCIE_DW_HOST  	help -	  Say Y here if you want to enable PCI controller support on Amlogic -	  SoCs. The PCI controller on Amlogic is based on DesignWare hardware -	  and therefore the driver re-uses the DesignWare core functions to -	  implement the driver. +	  Say Y here if you want to enable PCIe controller support on +	  Armada-8K SoCs. The PCIe controller on Armada-8K is based on +	  DesignWare hardware and therefore the driver re-uses the +	  DesignWare core functions to implement the driver.  config PCIE_TEGRA194  	tristate  config PCIE_TEGRA194_HOST -	tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode" +	tristate "NVIDIA Tegra194 (and later) PCIe controller (host mode)"  	depends on ARCH_TEGRA_194_SOC || COMPILE_TEST  	depends on PCI_MSI  	select PCIE_DW_HOST @@ -349,7 +219,7 @@ config PCIE_TEGRA194_HOST  	  selected. This uses the DesignWare core.  config PCIE_TEGRA194_EP -	tristate "NVIDIA Tegra194 (and later) PCIe controller - Endpoint Mode" +	tristate "NVIDIA Tegra194 (and later) PCIe controller (endpoint mode)"  	depends on ARCH_TEGRA_194_SOC || COMPILE_TEST  	depends on PCI_ENDPOINT  	select PCIE_DW_EP @@ -363,17 +233,92 @@ config PCIE_TEGRA194_EP  	  in order to enable device-specific features PCIE_TEGRA194_EP must be  	  selected. This uses the DesignWare core. -config PCIE_VISCONTI_HOST -	bool "Toshiba Visconti PCIe controllers" -	depends on ARCH_VISCONTI || COMPILE_TEST +config PCIE_DW_PLAT +	bool + +config PCIE_DW_PLAT_HOST +	bool "Platform bus based DesignWare PCIe controller (host mode)"  	depends on PCI_MSI  	select PCIE_DW_HOST +	select PCIE_DW_PLAT  	help -	  Say Y here if you want PCIe controller support on Toshiba Visconti SoC. -	  This driver supports TMPV7708 SoC. +	  Enables support for the PCIe controller in the Designware IP to +	  work in host mode. There are two instances of PCIe controller in +	  Designware IP. +	  This controller can work either as EP or RC. In order to enable +	  host-specific features PCIE_DW_PLAT_HOST must be selected and in +	  order to enable device-specific features PCI_DW_PLAT_EP must be +	  selected. + +config PCIE_DW_PLAT_EP +	bool "Platform bus based DesignWare PCIe controller (endpoint mode)" +	depends on PCI && PCI_MSI +	depends on PCI_ENDPOINT +	select PCIE_DW_EP +	select PCIE_DW_PLAT +	help +	  Enables support for the PCIe controller in the Designware IP to +	  work in endpoint mode. There are two instances of PCIe controller +	  in Designware IP. +	  This controller can work either as EP or RC. In order to enable +	  host-specific features PCIE_DW_PLAT_HOST must be selected and in +	  order to enable device-specific features PCI_DW_PLAT_EP must be +	  selected. + +config PCIE_QCOM +	bool "Qualcomm PCIe controller (host mode)" +	depends on OF && (ARCH_QCOM || COMPILE_TEST) +	depends on PCI_MSI +	select PCIE_DW_HOST +	select CRC8 +	help +	  Say Y here to enable PCIe controller support on Qualcomm SoCs. The +	  PCIe controller uses the DesignWare core plus Qualcomm-specific +	  hardware wrappers. + +config PCIE_QCOM_EP +	tristate "Qualcomm PCIe controller (endpoint mode)" +	depends on OF && (ARCH_QCOM || COMPILE_TEST) +	depends on PCI_ENDPOINT +	select PCIE_DW_EP +	help +	  Say Y here to enable support for the PCIe controllers on Qualcomm SoCs +	  to work in endpoint mode. The PCIe controller uses the DesignWare core +	  plus Qualcomm-specific hardware wrappers. + +config PCIE_ROCKCHIP_DW_HOST +	bool "Rockchip DesignWare PCIe controller" +	select PCIE_DW +	select PCIE_DW_HOST +	depends on PCI_MSI +	depends on ARCH_ROCKCHIP || COMPILE_TEST +	depends on OF +	help +	  Enables support for the DesignWare PCIe controller in the +	  Rockchip SoC except RK3399. + +config PCI_EXYNOS +	tristate "Samsung Exynos PCIe controller" +	depends on ARCH_EXYNOS || COMPILE_TEST +	depends on PCI_MSI +	select PCIE_DW_HOST +	help +	  Enables support for the PCIe controller in the Samsung Exynos SoCs +	  to work in host mode. The PCI controller is based on the DesignWare +	  hardware and therefore the driver re-uses the DesignWare core +	  functions to implement the driver. + +config PCIE_FU740 +	bool "SiFive FU740 PCIe controller" +	depends on PCI_MSI +	depends on SOC_SIFIVE || COMPILE_TEST +	select PCIE_DW_HOST +	help +	  Say Y here if you want PCIe controller support for the SiFive +	  FU740.  config PCIE_UNIPHIER -	bool "Socionext UniPhier PCIe host controllers" +	bool "Socionext UniPhier PCIe controller (host mode)"  	depends on ARCH_UNIPHIER || COMPILE_TEST  	depends on OF && HAS_IOMEM  	depends on PCI_MSI @@ -383,7 +328,7 @@ config PCIE_UNIPHIER  	  This driver supports LD20 and PXs3 SoCs.  config PCIE_UNIPHIER_EP -	bool "Socionext UniPhier PCIe endpoint controllers" +	bool "Socionext UniPhier PCIe controller (endpoint mode)"  	depends on ARCH_UNIPHIER || COMPILE_TEST  	depends on OF && HAS_IOMEM  	depends on PCI_ENDPOINT @@ -392,26 +337,82 @@ config PCIE_UNIPHIER_EP  	  Say Y here if you want PCIe endpoint controller support on  	  UniPhier SoCs. This driver supports Pro5 SoC. -config PCIE_AL -	bool "Amazon Annapurna Labs PCIe controller" -	depends on OF && (ARM64 || COMPILE_TEST) +config PCIE_SPEAR13XX +	bool "STMicroelectronics SPEAr PCIe controller" +	depends on ARCH_SPEAR13XX || COMPILE_TEST  	depends on PCI_MSI  	select PCIE_DW_HOST -	select PCI_ECAM  	help -	  Say Y here to enable support of the Amazon's Annapurna Labs PCIe -	  controller IP on Amazon SoCs. The PCIe controller uses the DesignWare -	  core plus Annapurna Labs proprietary hardware wrappers. This is -	  required only for DT-based platforms. ACPI platforms with the -	  Annapurna Labs PCIe controller don't need to enable this. +	  Say Y here if you want PCIe support on SPEAr13XX SoCs. -config PCIE_FU740 -	bool "SiFive FU740 PCIe host controller" +config PCI_DRA7XX +	tristate + +config PCI_DRA7XX_HOST +	tristate "TI DRA7xx PCIe controller (host mode)" +	depends on SOC_DRA7XX || COMPILE_TEST +	depends on OF && HAS_IOMEM && TI_PIPE3  	depends on PCI_MSI -	depends on SOC_SIFIVE || COMPILE_TEST  	select PCIE_DW_HOST +	select PCI_DRA7XX +	default y if SOC_DRA7XX  	help -	  Say Y here if you want PCIe controller support for the SiFive -	  FU740. +	  Enables support for the PCIe controller in the DRA7xx SoC to work in +	  host mode. There are two instances of PCIe controller in DRA7xx. +	  This controller can work either as EP or RC. In order to enable +	  host-specific features PCI_DRA7XX_HOST must be selected and in order +	  to enable device-specific features PCI_DRA7XX_EP must be selected. +	  This uses the DesignWare core. + +config PCI_DRA7XX_EP +	tristate "TI DRA7xx PCIe controller (endpoint mode)" +	depends on SOC_DRA7XX || COMPILE_TEST +	depends on OF && HAS_IOMEM && TI_PIPE3 +	depends on PCI_ENDPOINT +	select PCIE_DW_EP +	select PCI_DRA7XX +	help +	  Enables support for the PCIe controller in the DRA7xx SoC to work in +	  endpoint mode. There are two instances of PCIe controller in DRA7xx. +	  This controller can work either as EP or RC. In order to enable +	  host-specific features PCI_DRA7XX_HOST must be selected and in order +	  to enable device-specific features PCI_DRA7XX_EP must be selected. +	  This uses the DesignWare core. + +config PCI_KEYSTONE +	bool + +config PCI_KEYSTONE_HOST +	bool "TI Keystone PCIe controller (host mode)" +	depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST +	depends on PCI_MSI +	select PCIE_DW_HOST +	select PCI_KEYSTONE +	help +	  Enables support for the PCIe controller in the Keystone SoC to +	  work in host mode. The PCI controller on Keystone is based on +	  DesignWare hardware and therefore the driver re-uses the +	  DesignWare core functions to implement the driver. + +config PCI_KEYSTONE_EP +	bool "TI Keystone PCIe controller (endpoint mode)" +	depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST +	depends on PCI_ENDPOINT +	select PCIE_DW_EP +	select PCI_KEYSTONE +	help +	  Enables support for the PCIe controller in the Keystone SoC to +	  work in endpoint mode. The PCI controller on Keystone is based +	  on DesignWare hardware and therefore the driver re-uses the +	  DesignWare core functions to implement the driver. + +config PCIE_VISCONTI_HOST +	bool "Toshiba Visconti PCIe controller" +	depends on ARCH_VISCONTI || COMPILE_TEST +	depends on PCI_MSI +	select PCIE_DW_HOST +	help +	  Say Y here if you want PCIe controller support on Toshiba Visconti SoC. +	  This driver supports TMPV7708 SoC.  endmenu diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 55a0405b921d..52906f999f2b 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -1566,6 +1566,13 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,  static int __init imx6_pcie_init(void)  {  #ifdef CONFIG_ARM +	struct device_node *np; + +	np = of_find_matching_node(NULL, imx6_pcie_of_match); +	if (!np) +		return -ENODEV; +	of_node_put(np); +  	/*  	 * Since probe() can be deferred we need to make sure that  	 * hook_fault_code is not called after __init memory is freed diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index ad99707b3b99..c640db60edc6 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -110,6 +110,7 @@ static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {  };  static const struct of_device_id ls_pcie_ep_of_match[] = { +	{ .compatible = "fsl,ls1028a-pcie-ep", .data = &ls1_ep_drvdata },  	{ .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },  	{ .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },  	{ .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata }, diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 53a16b8b6ac2..8e33e6e59e68 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -1001,11 +1001,6 @@ void dw_pcie_setup(struct dw_pcie *pci)  		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);  	} -	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); -	val &= ~PORT_LINK_FAST_LINK_MODE; -	val |= PORT_LINK_DLL_LINK_EN; -	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); -  	if (dw_pcie_cap_is(pci, CDM_CHECK)) {  		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);  		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | @@ -1013,6 +1008,11 @@ void dw_pcie_setup(struct dw_pcie *pci)  		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);  	} +	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); +	val &= ~PORT_LINK_FAST_LINK_MODE; +	val |= PORT_LINK_DLL_LINK_EN; +	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); +  	if (!pci->num_lanes) {  		dev_dbg(pci->dev, "Using h/w default number of lanes\n");  		return; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index a232b04af048..4ab30892f6ef 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -10,6 +10,7 @@  #include <linux/clk.h>  #include <linux/crc8.h> +#include <linux/debugfs.h>  #include <linux/delay.h>  #include <linux/gpio/consumer.h>  #include <linux/interconnect.h> @@ -33,7 +34,44 @@  #include "../../pci.h"  #include "pcie-designware.h" -#define PCIE20_PARF_SYS_CTRL			0x00 +/* PARF registers */ +#define PARF_SYS_CTRL				0x00 +#define PARF_PM_CTRL				0x20 +#define PARF_PCS_DEEMPH				0x34 +#define PARF_PCS_SWING				0x38 +#define PARF_PHY_CTRL				0x40 +#define PARF_PHY_REFCLK				0x4c +#define PARF_CONFIG_BITS			0x50 +#define PARF_DBI_BASE_ADDR			0x168 +#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3		0x16c /* Register offset specific to IP ver 2.3.3 */ +#define PARF_MHI_CLOCK_RESET_CTRL		0x174 +#define PARF_AXI_MSTR_WR_ADDR_HALT		0x178 +#define PARF_AXI_MSTR_WR_ADDR_HALT_V2		0x1a8 +#define PARF_Q2A_FLUSH				0x1ac +#define PARF_LTSSM				0x1b0 +#define PARF_SID_OFFSET				0x234 +#define PARF_BDF_TRANSLATE_CFG			0x24c +#define PARF_SLV_ADDR_SPACE_SIZE		0x358 +#define PARF_DEVICE_TYPE			0x1000 +#define PARF_BDF_TO_SID_TABLE_N			0x2000 + +/* ELBI registers */ +#define ELBI_SYS_CTRL				0x04 + +/* DBI registers */ +#define AXI_MSTR_RESP_COMP_CTRL0		0x818 +#define AXI_MSTR_RESP_COMP_CTRL1		0x81c +#define MISC_CONTROL_1_REG			0x8bc + +/* MHI registers */ +#define PARF_DEBUG_CNT_PM_LINKST_IN_L2		0xc04 +#define PARF_DEBUG_CNT_PM_LINKST_IN_L1		0xc0c +#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S		0xc10 +#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1	0xc84 +#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2	0xc88 + +/* PARF_SYS_CTRL register fields */ +#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN	BIT(29)  #define MST_WAKEUP_EN				BIT(13)  #define SLV_WAKEUP_EN				BIT(12)  #define MSTR_ACLK_CGC_DIS			BIT(10) @@ -43,45 +81,63 @@  #define L23_CLK_RMV_DIS				BIT(2)  #define L1_CLK_RMV_DIS				BIT(1) -#define PCIE20_PARF_PM_CTRL			0x20 +/* PARF_PM_CTRL register fields */  #define REQ_NOT_ENTR_L1				BIT(5) -#define PCIE20_PARF_PHY_CTRL			0x40 +/* PARF_PCS_DEEMPH register fields */ +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		FIELD_PREP(GENMASK(21, 16), x) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	FIELD_PREP(GENMASK(13, 8), x) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	FIELD_PREP(GENMASK(5, 0), x) + +/* PARF_PCS_SWING register fields */ +#define PCS_SWING_TX_SWING_FULL(x)		FIELD_PREP(GENMASK(14, 8), x) +#define PCS_SWING_TX_SWING_LOW(x)		FIELD_PREP(GENMASK(6, 0), x) + +/* PARF_PHY_CTRL register fields */  #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16) -#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		((x) << 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x) +#define PHY_TEST_PWR_DOWN			BIT(0) -#define PCIE20_PARF_PHY_REFCLK			0x4C +/* PARF_PHY_REFCLK register fields */  #define PHY_REFCLK_SSP_EN			BIT(16)  #define PHY_REFCLK_USE_PAD			BIT(12) -#define PCIE20_PARF_DBI_BASE_ADDR		0x168 -#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C -#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174 +/* PARF_CONFIG_BITS register fields */ +#define PHY_RX0_EQ(x)				FIELD_PREP(GENMASK(26, 24), x) + +/* PARF_SLV_ADDR_SPACE_SIZE register value */ +#define SLV_ADDR_SPACE_SZ			0x10000000 + +/* PARF_MHI_CLOCK_RESET_CTRL register fields */  #define AHB_CLK_EN				BIT(0)  #define MSTR_AXI_CLK_EN				BIT(1)  #define BYPASS					BIT(4) -#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178 -#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8 -#define PCIE20_PARF_LTSSM			0x1B0 -#define PCIE20_PARF_SID_OFFSET			0x234 -#define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C -#define PCIE20_PARF_DEVICE_TYPE			0x1000 -#define PCIE20_PARF_BDF_TO_SID_TABLE_N		0x2000 +/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ +#define EN					BIT(31) + +/* PARF_LTSSM register fields */ +#define LTSSM_EN				BIT(8) -#define PCIE20_ELBI_SYS_CTRL			0x04 -#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0) +/* PARF_DEVICE_TYPE register fields */ +#define DEVICE_TYPE_RC				0x4 + +/* ELBI_SYS_CTRL register fields */ +#define ELBI_SYS_CTRL_LT_ENABLE			BIT(0) -#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818 +/* AXI_MSTR_RESP_COMP_CTRL0 register fields */  #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4  #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5 -#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c + +/* AXI_MSTR_RESP_COMP_CTRL1 register fields */  #define CFG_BRIDGE_SB_INIT			BIT(0) -#define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \ -						250) -#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \ -						1) +/* MISC_CONTROL_1_REG register fields */ +#define DBI_RO_WR_EN				1 + +/* PCI_EXP_SLTCAP register fields */ +#define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) +#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)  #define PCIE_CAP_SLOT_VAL			(PCI_EXP_SLTCAP_ABP | \  						PCI_EXP_SLTCAP_PCP | \  						PCI_EXP_SLTCAP_MRLSP | \ @@ -93,103 +149,62 @@  						PCIE_CAP_SLOT_POWER_LIMIT_VAL | \  						PCIE_CAP_SLOT_POWER_LIMIT_SCALE) -#define PCIE20_PARF_Q2A_FLUSH			0x1AC - -#define PCIE20_MISC_CONTROL_1_REG		0x8BC -#define DBI_RO_WR_EN				1 -  #define PERST_DELAY_US				1000 -/* PARF registers */ -#define PCIE20_PARF_PCS_DEEMPH			0x34 -#define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		((x) << 16) -#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	((x) << 8) -#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	((x) << 0) -#define PCIE20_PARF_PCS_SWING			0x38 -#define PCS_SWING_TX_SWING_FULL(x)		((x) << 8) -#define PCS_SWING_TX_SWING_LOW(x)		((x) << 0) +#define QCOM_PCIE_CRC8_POLYNOMIAL		(BIT(2) | BIT(1) | BIT(0)) -#define PCIE20_PARF_CONFIG_BITS		0x50 -#define PHY_RX0_EQ(x)				((x) << 24) - -#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358 -#define SLV_ADDR_SPACE_SZ			0x10000000 - -#define PCIE20_LNK_CONTROL2_LINK_STATUS2	0xa0 - -#define DEVICE_TYPE_RC				0x4 - -#define QCOM_PCIE_2_1_0_MAX_SUPPLY	3 -#define QCOM_PCIE_2_1_0_MAX_CLOCKS	5 - -#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) +#define QCOM_PCIE_1_0_0_MAX_CLOCKS		4 +struct qcom_pcie_resources_1_0_0 { +	struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS]; +	struct reset_control *core; +	struct regulator *vdda; +}; +#define QCOM_PCIE_2_1_0_MAX_CLOCKS		5 +#define QCOM_PCIE_2_1_0_MAX_RESETS		6 +#define QCOM_PCIE_2_1_0_MAX_SUPPLY		3  struct qcom_pcie_resources_2_1_0 {  	struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; -	struct reset_control *pci_reset; -	struct reset_control *axi_reset; -	struct reset_control *ahb_reset; -	struct reset_control *por_reset; -	struct reset_control *phy_reset; -	struct reset_control *ext_reset; +	struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS]; +	int num_resets;  	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];  }; -struct qcom_pcie_resources_1_0_0 { -	struct clk *iface; -	struct clk *aux; -	struct clk *master_bus; -	struct clk *slave_bus; -	struct reset_control *core; -	struct regulator *vdda; -}; - -#define QCOM_PCIE_2_3_2_MAX_SUPPLY	2 +#define QCOM_PCIE_2_3_2_MAX_CLOCKS		4 +#define QCOM_PCIE_2_3_2_MAX_SUPPLY		2  struct qcom_pcie_resources_2_3_2 { -	struct clk *aux_clk; -	struct clk *master_clk; -	struct clk *slave_clk; -	struct clk *cfg_clk; +	struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];  	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];  }; -#define QCOM_PCIE_2_4_0_MAX_CLOCKS	4 +#define QCOM_PCIE_2_3_3_MAX_CLOCKS		5 +#define QCOM_PCIE_2_3_3_MAX_RESETS		7 +struct qcom_pcie_resources_2_3_3 { +	struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS]; +	struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS]; +}; + +#define QCOM_PCIE_2_4_0_MAX_CLOCKS		4 +#define QCOM_PCIE_2_4_0_MAX_RESETS		12  struct qcom_pcie_resources_2_4_0 {  	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];  	int num_clks; -	struct reset_control *axi_m_reset; -	struct reset_control *axi_s_reset; -	struct reset_control *pipe_reset; -	struct reset_control *axi_m_vmid_reset; -	struct reset_control *axi_s_xpu_reset; -	struct reset_control *parf_reset; -	struct reset_control *phy_reset; -	struct reset_control *axi_m_sticky_reset; -	struct reset_control *pipe_sticky_reset; -	struct reset_control *pwr_reset; -	struct reset_control *ahb_reset; -	struct reset_control *phy_ahb_reset; -}; - -struct qcom_pcie_resources_2_3_3 { -	struct clk *iface; -	struct clk *axi_m_clk; -	struct clk *axi_s_clk; -	struct clk *ahb_clk; -	struct clk *aux_clk; -	struct reset_control *rst[7]; +	struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS]; +	int num_resets;  }; -/* 6 clocks typically, 7 for sm8250 */ +#define QCOM_PCIE_2_7_0_MAX_CLOCKS		15 +#define QCOM_PCIE_2_7_0_MAX_SUPPLIES		2  struct qcom_pcie_resources_2_7_0 { -	struct clk_bulk_data clks[12]; +	struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];  	int num_clks; -	struct regulator_bulk_data supplies[2]; -	struct reset_control *pci_reset; +	struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES]; +	struct reset_control *rst;  }; +#define QCOM_PCIE_2_9_0_MAX_CLOCKS		5  struct qcom_pcie_resources_2_9_0 { -	struct clk_bulk_data clks[5]; +	struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];  	struct reset_control *rst;  }; @@ -222,11 +237,14 @@ struct qcom_pcie {  	struct dw_pcie *pci;  	void __iomem *parf;			/* DT parf */  	void __iomem *elbi;			/* DT elbi */ +	void __iomem *mhi;  	union qcom_pcie_resources res;  	struct phy *phy;  	struct gpio_desc *reset;  	struct icc_path *icc_mem;  	const struct qcom_pcie_cfg *cfg; +	struct dentry *debugfs; +	bool suspended;  };  #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev) @@ -261,9 +279,9 @@ static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)  	u32 val;  	/* enable link training */ -	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); -	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; -	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); +	val = readl(pcie->elbi + ELBI_SYS_CTRL); +	val |= ELBI_SYS_CTRL_LT_ENABLE; +	writel(val, pcie->elbi + ELBI_SYS_CTRL);  }  static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) @@ -271,6 +289,7 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; +	bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");  	int ret;  	res->supplies[0].supply = "vdda"; @@ -297,28 +316,20 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)  	if (ret < 0)  		return ret; -	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); -	if (IS_ERR(res->pci_reset)) -		return PTR_ERR(res->pci_reset); - -	res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); -	if (IS_ERR(res->axi_reset)) -		return PTR_ERR(res->axi_reset); - -	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); -	if (IS_ERR(res->ahb_reset)) -		return PTR_ERR(res->ahb_reset); +	res->resets[0].id = "pci"; +	res->resets[1].id = "axi"; +	res->resets[2].id = "ahb"; +	res->resets[3].id = "por"; +	res->resets[4].id = "phy"; +	res->resets[5].id = "ext"; -	res->por_reset = devm_reset_control_get_exclusive(dev, "por"); -	if (IS_ERR(res->por_reset)) -		return PTR_ERR(res->por_reset); - -	res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); -	if (IS_ERR(res->ext_reset)) -		return PTR_ERR(res->ext_reset); +	/* ext is optional on APQ8016 */ +	res->num_resets = is_apq ? 5 : 6; +	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); +	if (ret < 0) +		return ret; -	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); -	return PTR_ERR_OR_ZERO(res->phy_reset); +	return 0;  }  static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) @@ -326,14 +337,9 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;  	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); -	reset_control_assert(res->pci_reset); -	reset_control_assert(res->axi_reset); -	reset_control_assert(res->ahb_reset); -	reset_control_assert(res->por_reset); -	reset_control_assert(res->ext_reset); -	reset_control_assert(res->phy_reset); +	reset_control_bulk_assert(res->num_resets, res->resets); -	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); +	writel(1, pcie->parf + PARF_PHY_CTRL);  	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);  } @@ -346,12 +352,11 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)  	int ret;  	/* reset the PCIe interface as uboot can leave it undefined state */ -	reset_control_assert(res->pci_reset); -	reset_control_assert(res->axi_reset); -	reset_control_assert(res->ahb_reset); -	reset_control_assert(res->por_reset); -	reset_control_assert(res->ext_reset); -	reset_control_assert(res->phy_reset); +	ret = reset_control_bulk_assert(res->num_resets, res->resets); +	if (ret < 0) { +		dev_err(dev, "cannot assert resets\n"); +		return ret; +	}  	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);  	if (ret < 0) { @@ -359,58 +364,14 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)  		return ret;  	} -	ret = reset_control_deassert(res->ahb_reset); -	if (ret) { -		dev_err(dev, "cannot deassert ahb reset\n"); -		goto err_deassert_ahb; -	} - -	ret = reset_control_deassert(res->ext_reset); -	if (ret) { -		dev_err(dev, "cannot deassert ext reset\n"); -		goto err_deassert_ext; -	} - -	ret = reset_control_deassert(res->phy_reset); -	if (ret) { -		dev_err(dev, "cannot deassert phy reset\n"); -		goto err_deassert_phy; -	} - -	ret = reset_control_deassert(res->pci_reset); -	if (ret) { -		dev_err(dev, "cannot deassert pci reset\n"); -		goto err_deassert_pci; -	} - -	ret = reset_control_deassert(res->por_reset); -	if (ret) { -		dev_err(dev, "cannot deassert por reset\n"); -		goto err_deassert_por; -	} - -	ret = reset_control_deassert(res->axi_reset); -	if (ret) { -		dev_err(dev, "cannot deassert axi reset\n"); -		goto err_deassert_axi; +	ret = reset_control_bulk_deassert(res->num_resets, res->resets); +	if (ret < 0) { +		dev_err(dev, "cannot deassert resets\n"); +		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); +		return ret;  	}  	return 0; - -err_deassert_axi: -	reset_control_assert(res->por_reset); -err_deassert_por: -	reset_control_assert(res->pci_reset); -err_deassert_pci: -	reset_control_assert(res->phy_reset); -err_deassert_phy: -	reset_control_assert(res->ext_reset); -err_deassert_ext: -	reset_control_assert(res->ahb_reset); -err_deassert_ahb: -	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); - -	return ret;  }  static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) @@ -423,9 +384,9 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)  	int ret;  	/* enable PCIe clocks and resets */ -	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); -	val &= ~BIT(0); -	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); +	val = readl(pcie->parf + PARF_PHY_CTRL); +	val &= ~PHY_TEST_PWR_DOWN; +	writel(val, pcie->parf + PARF_PHY_CTRL);  	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);  	if (ret) @@ -436,37 +397,37 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)  		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |  			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |  			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), -		       pcie->parf + PCIE20_PARF_PCS_DEEMPH); +		       pcie->parf + PARF_PCS_DEEMPH);  		writel(PCS_SWING_TX_SWING_FULL(120) |  			       PCS_SWING_TX_SWING_LOW(120), -		       pcie->parf + PCIE20_PARF_PCS_SWING); -		writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); +		       pcie->parf + PARF_PCS_SWING); +		writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);  	}  	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {  		/* set TX termination offset */ -		val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); +		val = readl(pcie->parf + PARF_PHY_CTRL);  		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;  		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); -		writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); +		writel(val, pcie->parf + PARF_PHY_CTRL);  	}  	/* enable external reference clock */ -	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); +	val = readl(pcie->parf + PARF_PHY_REFCLK);  	/* USE_PAD is required only for ipq806x */  	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))  		val &= ~PHY_REFCLK_USE_PAD;  	val |= PHY_REFCLK_SSP_EN; -	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); +	writel(val, pcie->parf + PARF_PHY_REFCLK);  	/* wait for clock acquisition */  	usleep_range(1000, 1500);  	/* Set the Max TLP size to 2K, instead of using default of 4K */  	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, -	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); +	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);  	writel(CFG_BRIDGE_SB_INIT, -	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); +	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);  	return 0;  } @@ -476,26 +437,20 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; +	int ret;  	res->vdda = devm_regulator_get(dev, "vdda");  	if (IS_ERR(res->vdda))  		return PTR_ERR(res->vdda); -	res->iface = devm_clk_get(dev, "iface"); -	if (IS_ERR(res->iface)) -		return PTR_ERR(res->iface); - -	res->aux = devm_clk_get(dev, "aux"); -	if (IS_ERR(res->aux)) -		return PTR_ERR(res->aux); - -	res->master_bus = devm_clk_get(dev, "master_bus"); -	if (IS_ERR(res->master_bus)) -		return PTR_ERR(res->master_bus); +	res->clks[0].id = "iface"; +	res->clks[1].id = "aux"; +	res->clks[2].id = "master_bus"; +	res->clks[3].id = "slave_bus"; -	res->slave_bus = devm_clk_get(dev, "slave_bus"); -	if (IS_ERR(res->slave_bus)) -		return PTR_ERR(res->slave_bus); +	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); +	if (ret < 0) +		return ret;  	res->core = devm_reset_control_get_exclusive(dev, "core");  	return PTR_ERR_OR_ZERO(res->core); @@ -506,10 +461,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;  	reset_control_assert(res->core); -	clk_disable_unprepare(res->slave_bus); -	clk_disable_unprepare(res->master_bus); -	clk_disable_unprepare(res->iface); -	clk_disable_unprepare(res->aux); +	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);  	regulator_disable(res->vdda);  } @@ -526,46 +478,23 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)  		return ret;  	} -	ret = clk_prepare_enable(res->aux); -	if (ret) { -		dev_err(dev, "cannot prepare/enable aux clock\n"); -		goto err_res; -	} - -	ret = clk_prepare_enable(res->iface); -	if (ret) { -		dev_err(dev, "cannot prepare/enable iface clock\n"); -		goto err_aux; -	} - -	ret = clk_prepare_enable(res->master_bus); -	if (ret) { -		dev_err(dev, "cannot prepare/enable master_bus clock\n"); -		goto err_iface; -	} - -	ret = clk_prepare_enable(res->slave_bus); +	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);  	if (ret) { -		dev_err(dev, "cannot prepare/enable slave_bus clock\n"); -		goto err_master; +		dev_err(dev, "cannot prepare/enable clocks\n"); +		goto err_assert_reset;  	}  	ret = regulator_enable(res->vdda);  	if (ret) {  		dev_err(dev, "cannot enable vdda regulator\n"); -		goto err_slave; +		goto err_disable_clks;  	}  	return 0; -err_slave: -	clk_disable_unprepare(res->slave_bus); -err_master: -	clk_disable_unprepare(res->master_bus); -err_iface: -	clk_disable_unprepare(res->iface); -err_aux: -	clk_disable_unprepare(res->aux); -err_res: + +err_disable_clks: +	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); +err_assert_reset:  	reset_control_assert(res->core);  	return ret; @@ -574,13 +503,13 @@ err_res:  static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)  {  	/* change DBI base address */ -	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); +	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);  	if (IS_ENABLED(CONFIG_PCI_MSI)) { -		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); +		u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); -		val |= BIT(31); -		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); +		val |= EN; +		writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);  	}  	return 0; @@ -591,9 +520,9 @@ static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)  	u32 val;  	/* enable link training */ -	val = readl(pcie->parf + PCIE20_PARF_LTSSM); -	val |= BIT(8); -	writel(val, pcie->parf + PCIE20_PARF_LTSSM); +	val = readl(pcie->parf + PARF_LTSSM); +	val |= LTSSM_EN; +	writel(val, pcie->parf + PARF_LTSSM);  }  static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) @@ -610,21 +539,14 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)  	if (ret)  		return ret; -	res->aux_clk = devm_clk_get(dev, "aux"); -	if (IS_ERR(res->aux_clk)) -		return PTR_ERR(res->aux_clk); - -	res->cfg_clk = devm_clk_get(dev, "cfg"); -	if (IS_ERR(res->cfg_clk)) -		return PTR_ERR(res->cfg_clk); - -	res->master_clk = devm_clk_get(dev, "bus_master"); -	if (IS_ERR(res->master_clk)) -		return PTR_ERR(res->master_clk); +	res->clks[0].id = "aux"; +	res->clks[1].id = "cfg"; +	res->clks[2].id = "bus_master"; +	res->clks[3].id = "bus_slave"; -	res->slave_clk = devm_clk_get(dev, "bus_slave"); -	if (IS_ERR(res->slave_clk)) -		return PTR_ERR(res->slave_clk); +	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); +	if (ret < 0) +		return ret;  	return 0;  } @@ -633,11 +555,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; -	clk_disable_unprepare(res->slave_clk); -	clk_disable_unprepare(res->master_clk); -	clk_disable_unprepare(res->cfg_clk); -	clk_disable_unprepare(res->aux_clk); - +	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);  	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);  } @@ -654,43 +572,14 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)  		return ret;  	} -	ret = clk_prepare_enable(res->aux_clk); -	if (ret) { -		dev_err(dev, "cannot prepare/enable aux clock\n"); -		goto err_aux_clk; -	} - -	ret = clk_prepare_enable(res->cfg_clk); -	if (ret) { -		dev_err(dev, "cannot prepare/enable cfg clock\n"); -		goto err_cfg_clk; -	} - -	ret = clk_prepare_enable(res->master_clk); -	if (ret) { -		dev_err(dev, "cannot prepare/enable master clock\n"); -		goto err_master_clk; -	} - -	ret = clk_prepare_enable(res->slave_clk); +	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);  	if (ret) { -		dev_err(dev, "cannot prepare/enable slave clock\n"); -		goto err_slave_clk; +		dev_err(dev, "cannot prepare/enable clocks\n"); +		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); +		return ret;  	}  	return 0; - -err_slave_clk: -	clk_disable_unprepare(res->master_clk); -err_master_clk: -	clk_disable_unprepare(res->cfg_clk); -err_cfg_clk: -	clk_disable_unprepare(res->aux_clk); - -err_aux_clk: -	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); - -	return ret;  }  static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) @@ -698,25 +587,25 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)  	u32 val;  	/* enable PCIe clocks and resets */ -	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); -	val &= ~BIT(0); -	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); +	val = readl(pcie->parf + PARF_PHY_CTRL); +	val &= ~PHY_TEST_PWR_DOWN; +	writel(val, pcie->parf + PARF_PHY_CTRL);  	/* change DBI base address */ -	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); +	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);  	/* MAC PHY_POWERDOWN MUX DISABLE  */ -	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); -	val &= ~BIT(29); -	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); +	val = readl(pcie->parf + PARF_SYS_CTRL); +	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; +	writel(val, pcie->parf + PARF_SYS_CTRL); -	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); -	val |= BIT(4); -	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); +	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); +	val |= BYPASS; +	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); -	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); -	val |= BIT(31); -	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); +	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); +	val |= EN; +	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);  	return 0;  } @@ -741,65 +630,24 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)  	if (ret < 0)  		return ret; -	res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); -	if (IS_ERR(res->axi_m_reset)) -		return PTR_ERR(res->axi_m_reset); - -	res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); -	if (IS_ERR(res->axi_s_reset)) -		return PTR_ERR(res->axi_s_reset); - -	if (is_ipq) { -		/* -		 * These resources relates to the PHY or are secure clocks, but -		 * are controlled here for IPQ4019 -		 */ -		res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); -		if (IS_ERR(res->pipe_reset)) -			return PTR_ERR(res->pipe_reset); - -		res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, -									 "axi_m_vmid"); -		if (IS_ERR(res->axi_m_vmid_reset)) -			return PTR_ERR(res->axi_m_vmid_reset); - -		res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, -									"axi_s_xpu"); -		if (IS_ERR(res->axi_s_xpu_reset)) -			return PTR_ERR(res->axi_s_xpu_reset); - -		res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); -		if (IS_ERR(res->parf_reset)) -			return PTR_ERR(res->parf_reset); - -		res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); -		if (IS_ERR(res->phy_reset)) -			return PTR_ERR(res->phy_reset); -	} - -	res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, -								   "axi_m_sticky"); -	if (IS_ERR(res->axi_m_sticky_reset)) -		return PTR_ERR(res->axi_m_sticky_reset); - -	res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, -								  "pipe_sticky"); -	if (IS_ERR(res->pipe_sticky_reset)) -		return PTR_ERR(res->pipe_sticky_reset); - -	res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); -	if (IS_ERR(res->pwr_reset)) -		return PTR_ERR(res->pwr_reset); - -	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); -	if (IS_ERR(res->ahb_reset)) -		return PTR_ERR(res->ahb_reset); - -	if (is_ipq) { -		res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); -		if (IS_ERR(res->phy_ahb_reset)) -			return PTR_ERR(res->phy_ahb_reset); -	} +	res->resets[0].id = "axi_m"; +	res->resets[1].id = "axi_s"; +	res->resets[2].id = "axi_m_sticky"; +	res->resets[3].id = "pipe_sticky"; +	res->resets[4].id = "pwr"; +	res->resets[5].id = "ahb"; +	res->resets[6].id = "pipe"; +	res->resets[7].id = "axi_m_vmid"; +	res->resets[8].id = "axi_s_xpu"; +	res->resets[9].id = "parf"; +	res->resets[10].id = "phy"; +	res->resets[11].id = "phy_ahb"; + +	res->num_resets = is_ipq ? 12 : 6; + +	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); +	if (ret < 0) +		return ret;  	return 0;  } @@ -808,15 +656,7 @@ static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; -	reset_control_assert(res->axi_m_reset); -	reset_control_assert(res->axi_s_reset); -	reset_control_assert(res->pipe_reset); -	reset_control_assert(res->pipe_sticky_reset); -	reset_control_assert(res->phy_reset); -	reset_control_assert(res->phy_ahb_reset); -	reset_control_assert(res->axi_m_sticky_reset); -	reset_control_assert(res->pwr_reset); -	reset_control_assert(res->ahb_reset); +	reset_control_bulk_assert(res->num_resets, res->resets);  	clk_bulk_disable_unprepare(res->num_clks, res->clks);  } @@ -827,149 +667,29 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)  	struct device *dev = pci->dev;  	int ret; -	ret = reset_control_assert(res->axi_m_reset); -	if (ret) { -		dev_err(dev, "cannot assert axi master reset\n"); -		return ret; -	} - -	ret = reset_control_assert(res->axi_s_reset); -	if (ret) { -		dev_err(dev, "cannot assert axi slave reset\n"); -		return ret; -	} - -	usleep_range(10000, 12000); - -	ret = reset_control_assert(res->pipe_reset); -	if (ret) { -		dev_err(dev, "cannot assert pipe reset\n"); -		return ret; -	} - -	ret = reset_control_assert(res->pipe_sticky_reset); -	if (ret) { -		dev_err(dev, "cannot assert pipe sticky reset\n"); -		return ret; -	} - -	ret = reset_control_assert(res->phy_reset); -	if (ret) { -		dev_err(dev, "cannot assert phy reset\n"); -		return ret; -	} - -	ret = reset_control_assert(res->phy_ahb_reset); -	if (ret) { -		dev_err(dev, "cannot assert phy ahb reset\n"); +	ret = reset_control_bulk_assert(res->num_resets, res->resets); +	if (ret < 0) { +		dev_err(dev, "cannot assert resets\n");  		return ret;  	}  	usleep_range(10000, 12000); -	ret = reset_control_assert(res->axi_m_sticky_reset); -	if (ret) { -		dev_err(dev, "cannot assert axi master sticky reset\n"); -		return ret; -	} - -	ret = reset_control_assert(res->pwr_reset); -	if (ret) { -		dev_err(dev, "cannot assert power reset\n"); -		return ret; -	} - -	ret = reset_control_assert(res->ahb_reset); -	if (ret) { -		dev_err(dev, "cannot assert ahb reset\n"); +	ret = reset_control_bulk_deassert(res->num_resets, res->resets); +	if (ret < 0) { +		dev_err(dev, "cannot deassert resets\n");  		return ret;  	}  	usleep_range(10000, 12000); -	ret = reset_control_deassert(res->phy_ahb_reset); +	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);  	if (ret) { -		dev_err(dev, "cannot deassert phy ahb reset\n"); +		reset_control_bulk_assert(res->num_resets, res->resets);  		return ret;  	} -	ret = reset_control_deassert(res->phy_reset); -	if (ret) { -		dev_err(dev, "cannot deassert phy reset\n"); -		goto err_rst_phy; -	} - -	ret = reset_control_deassert(res->pipe_reset); -	if (ret) { -		dev_err(dev, "cannot deassert pipe reset\n"); -		goto err_rst_pipe; -	} - -	ret = reset_control_deassert(res->pipe_sticky_reset); -	if (ret) { -		dev_err(dev, "cannot deassert pipe sticky reset\n"); -		goto err_rst_pipe_sticky; -	} - -	usleep_range(10000, 12000); - -	ret = reset_control_deassert(res->axi_m_reset); -	if (ret) { -		dev_err(dev, "cannot deassert axi master reset\n"); -		goto err_rst_axi_m; -	} - -	ret = reset_control_deassert(res->axi_m_sticky_reset); -	if (ret) { -		dev_err(dev, "cannot deassert axi master sticky reset\n"); -		goto err_rst_axi_m_sticky; -	} - -	ret = reset_control_deassert(res->axi_s_reset); -	if (ret) { -		dev_err(dev, "cannot deassert axi slave reset\n"); -		goto err_rst_axi_s; -	} - -	ret = reset_control_deassert(res->pwr_reset); -	if (ret) { -		dev_err(dev, "cannot deassert power reset\n"); -		goto err_rst_pwr; -	} - -	ret = reset_control_deassert(res->ahb_reset); -	if (ret) { -		dev_err(dev, "cannot deassert ahb reset\n"); -		goto err_rst_ahb; -	} - -	usleep_range(10000, 12000); - -	ret = clk_bulk_prepare_enable(res->num_clks, res->clks); -	if (ret) -		goto err_clks; -  	return 0; - -err_clks: -	reset_control_assert(res->ahb_reset); -err_rst_ahb: -	reset_control_assert(res->pwr_reset); -err_rst_pwr: -	reset_control_assert(res->axi_s_reset); -err_rst_axi_s: -	reset_control_assert(res->axi_m_sticky_reset); -err_rst_axi_m_sticky: -	reset_control_assert(res->axi_m_reset); -err_rst_axi_m: -	reset_control_assert(res->pipe_sticky_reset); -err_rst_pipe_sticky: -	reset_control_assert(res->pipe_reset); -err_rst_pipe: -	reset_control_assert(res->phy_reset); -err_rst_phy: -	reset_control_assert(res->phy_ahb_reset); -	return ret;  }  static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie) @@ -977,25 +697,25 @@ static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie)  	u32 val;  	/* enable PCIe clocks and resets */ -	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); -	val &= ~BIT(0); -	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); +	val = readl(pcie->parf + PARF_PHY_CTRL); +	val &= ~PHY_TEST_PWR_DOWN; +	writel(val, pcie->parf + PARF_PHY_CTRL);  	/* change DBI base address */ -	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); +	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);  	/* MAC PHY_POWERDOWN MUX DISABLE  */ -	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); -	val &= ~BIT(29); -	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); +	val = readl(pcie->parf + PARF_SYS_CTRL); +	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; +	writel(val, pcie->parf + PARF_SYS_CTRL); -	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); -	val |= BIT(4); -	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); +	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); +	val |= BYPASS; +	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); -	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); -	val |= BIT(31); -	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); +	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); +	val |= EN; +	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);  	return 0;  } @@ -1005,36 +725,29 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; -	int i; -	const char *rst_names[] = { "axi_m", "axi_s", "pipe", -				    "axi_m_sticky", "sticky", -				    "ahb", "sleep", }; - -	res->iface = devm_clk_get(dev, "iface"); -	if (IS_ERR(res->iface)) -		return PTR_ERR(res->iface); - -	res->axi_m_clk = devm_clk_get(dev, "axi_m"); -	if (IS_ERR(res->axi_m_clk)) -		return PTR_ERR(res->axi_m_clk); - -	res->axi_s_clk = devm_clk_get(dev, "axi_s"); -	if (IS_ERR(res->axi_s_clk)) -		return PTR_ERR(res->axi_s_clk); - -	res->ahb_clk = devm_clk_get(dev, "ahb"); -	if (IS_ERR(res->ahb_clk)) -		return PTR_ERR(res->ahb_clk); - -	res->aux_clk = devm_clk_get(dev, "aux"); -	if (IS_ERR(res->aux_clk)) -		return PTR_ERR(res->aux_clk); - -	for (i = 0; i < ARRAY_SIZE(rst_names); i++) { -		res->rst[i] = devm_reset_control_get(dev, rst_names[i]); -		if (IS_ERR(res->rst[i])) -			return PTR_ERR(res->rst[i]); -	} +	int ret; + +	res->clks[0].id = "iface"; +	res->clks[1].id = "axi_m"; +	res->clks[2].id = "axi_s"; +	res->clks[3].id = "ahb"; +	res->clks[4].id = "aux"; + +	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); +	if (ret < 0) +		return ret; + +	res->rst[0].id = "axi_m"; +	res->rst[1].id = "axi_s"; +	res->rst[2].id = "pipe"; +	res->rst[3].id = "axi_m_sticky"; +	res->rst[4].id = "sticky"; +	res->rst[5].id = "ahb"; +	res->rst[6].id = "sleep"; + +	ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); +	if (ret < 0) +		return ret;  	return 0;  } @@ -1043,11 +756,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; -	clk_disable_unprepare(res->iface); -	clk_disable_unprepare(res->axi_m_clk); -	clk_disable_unprepare(res->axi_s_clk); -	clk_disable_unprepare(res->ahb_clk); -	clk_disable_unprepare(res->aux_clk); +	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);  }  static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) @@ -1055,25 +764,20 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; -	int i, ret; +	int ret; -	for (i = 0; i < ARRAY_SIZE(res->rst); i++) { -		ret = reset_control_assert(res->rst[i]); -		if (ret) { -			dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); -			return ret; -		} +	ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); +	if (ret < 0) { +		dev_err(dev, "cannot assert resets\n"); +		return ret;  	}  	usleep_range(2000, 2500); -	for (i = 0; i < ARRAY_SIZE(res->rst); i++) { -		ret = reset_control_deassert(res->rst[i]); -		if (ret) { -			dev_err(dev, "reset #%d deassert failed (%d)\n", i, -				ret); -			return ret; -		} +	ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); +	if (ret < 0) { +		dev_err(dev, "cannot deassert resets\n"); +		return ret;  	}  	/* @@ -1082,53 +786,20 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)  	 */  	usleep_range(2000, 2500); -	ret = clk_prepare_enable(res->iface); -	if (ret) { -		dev_err(dev, "cannot prepare/enable core clock\n"); -		goto err_clk_iface; -	} - -	ret = clk_prepare_enable(res->axi_m_clk); -	if (ret) { -		dev_err(dev, "cannot prepare/enable core clock\n"); -		goto err_clk_axi_m; -	} - -	ret = clk_prepare_enable(res->axi_s_clk); -	if (ret) { -		dev_err(dev, "cannot prepare/enable axi slave clock\n"); -		goto err_clk_axi_s; -	} - -	ret = clk_prepare_enable(res->ahb_clk); -	if (ret) { -		dev_err(dev, "cannot prepare/enable ahb clock\n"); -		goto err_clk_ahb; -	} - -	ret = clk_prepare_enable(res->aux_clk); +	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);  	if (ret) { -		dev_err(dev, "cannot prepare/enable aux clock\n"); -		goto err_clk_aux; +		dev_err(dev, "cannot prepare/enable clocks\n"); +		goto err_assert_resets;  	}  	return 0; -err_clk_aux: -	clk_disable_unprepare(res->ahb_clk); -err_clk_ahb: -	clk_disable_unprepare(res->axi_s_clk); -err_clk_axi_s: -	clk_disable_unprepare(res->axi_m_clk); -err_clk_axi_m: -	clk_disable_unprepare(res->iface); -err_clk_iface: +err_assert_resets:  	/*  	 * Not checking for failure, will anyway return  	 * the original failure in 'ret'.  	 */ -	for (i = 0; i < ARRAY_SIZE(res->rst); i++) -		reset_control_assert(res->rst[i]); +	reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);  	return ret;  } @@ -1140,22 +811,22 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)  	u32 val;  	writel(SLV_ADDR_SPACE_SZ, -		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); +		pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3); -	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); -	val &= ~BIT(0); -	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); +	val = readl(pcie->parf + PARF_PHY_CTRL); +	val &= ~PHY_TEST_PWR_DOWN; +	writel(val, pcie->parf + PARF_PHY_CTRL); -	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); +	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);  	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS  		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |  		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, -		pcie->parf + PCIE20_PARF_SYS_CTRL); -	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); +		pcie->parf + PARF_SYS_CTRL); +	writel(0, pcie->parf + PARF_Q2A_FLUSH);  	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); -	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); +	writel(DBI_RO_WR_EN, pci->dbi_base + MISC_CONTROL_1_REG);  	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);  	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); @@ -1177,9 +848,9 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)  	unsigned int idx;  	int ret; -	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); -	if (IS_ERR(res->pci_reset)) -		return PTR_ERR(res->pci_reset); +	res->rst = devm_reset_control_array_get_exclusive(dev); +	if (IS_ERR(res->rst)) +		return PTR_ERR(res->rst);  	res->supplies[0].supply = "vdda";  	res->supplies[1].supply = "vddpe-3v3"; @@ -1205,9 +876,12 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)  	res->clks[idx++].id = "ddrss_sf_tbu";  	res->clks[idx++].id = "aggre0";  	res->clks[idx++].id = "aggre1"; +	res->clks[idx++].id = "noc_aggr";  	res->clks[idx++].id = "noc_aggr_4";  	res->clks[idx++].id = "noc_aggr_south_sf";  	res->clks[idx++].id = "cnoc_qx"; +	res->clks[idx++].id = "sleep"; +	res->clks[idx++].id = "cnoc_sf_axi";  	num_opt_clks = idx - num_clks;  	res->num_clks = idx; @@ -1237,17 +911,17 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)  	if (ret < 0)  		goto err_disable_regulators; -	ret = reset_control_assert(res->pci_reset); -	if (ret < 0) { -		dev_err(dev, "cannot assert pci reset\n"); +	ret = reset_control_assert(res->rst); +	if (ret) { +		dev_err(dev, "reset assert failed (%d)\n", ret);  		goto err_disable_clocks;  	}  	usleep_range(1000, 1500); -	ret = reset_control_deassert(res->pci_reset); -	if (ret < 0) { -		dev_err(dev, "cannot deassert pci reset\n"); +	ret = reset_control_deassert(res->rst); +	if (ret) { +		dev_err(dev, "reset deassert failed (%d)\n", ret);  		goto err_disable_clocks;  	} @@ -1255,35 +929,33 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)  	usleep_range(1000, 1500);  	/* configure PCIe to RC mode */ -	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); +	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);  	/* enable PCIe clocks and resets */ -	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); -	val &= ~BIT(0); -	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); +	val = readl(pcie->parf + PARF_PHY_CTRL); +	val &= ~PHY_TEST_PWR_DOWN; +	writel(val, pcie->parf + PARF_PHY_CTRL);  	/* change DBI base address */ -	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); +	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);  	/* MAC PHY_POWERDOWN MUX DISABLE  */ -	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); -	val &= ~BIT(29); -	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); +	val = readl(pcie->parf + PARF_SYS_CTRL); +	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; +	writel(val, pcie->parf + PARF_SYS_CTRL); -	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); -	val |= BIT(4); -	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); +	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); +	val |= BYPASS; +	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);  	/* Enable L1 and L1SS */ -	val = readl(pcie->parf + PCIE20_PARF_PM_CTRL); +	val = readl(pcie->parf + PARF_PM_CTRL);  	val &= ~REQ_NOT_ENTR_L1; -	writel(val, pcie->parf + PCIE20_PARF_PM_CTRL); +	writel(val, pcie->parf + PARF_PM_CTRL); -	if (IS_ENABLED(CONFIG_PCI_MSI)) { -		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); -		val |= BIT(31); -		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); -	} +	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); +	val |= EN; +	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);  	return 0;  err_disable_clocks: @@ -1303,6 +975,76 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)  	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);  } +static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) +{ +	/* iommu map structure */ +	struct { +		u32 bdf; +		u32 phandle; +		u32 smmu_sid; +		u32 smmu_sid_len; +	} *map; +	void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; +	struct device *dev = pcie->pci->dev; +	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; +	int i, nr_map, size = 0; +	u32 smmu_sid_base; + +	of_get_property(dev->of_node, "iommu-map", &size); +	if (!size) +		return 0; + +	map = kzalloc(size, GFP_KERNEL); +	if (!map) +		return -ENOMEM; + +	of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, +				   size / sizeof(u32)); + +	nr_map = size / (sizeof(*map)); + +	crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); + +	/* Registers need to be zero out first */ +	memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); + +	/* Extract the SMMU SID base from the first entry of iommu-map */ +	smmu_sid_base = map[0].smmu_sid; + +	/* Look for an available entry to hold the mapping */ +	for (i = 0; i < nr_map; i++) { +		__be16 bdf_be = cpu_to_be16(map[i].bdf); +		u32 val; +		u8 hash; + +		hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); + +		val = readl(bdf_to_sid_base + hash * sizeof(u32)); + +		/* If the register is already populated, look for next available entry */ +		while (val) { +			u8 current_hash = hash++; +			u8 next_mask = 0xff; + +			/* If NEXT field is NULL then update it with next hash */ +			if (!(val & next_mask)) { +				val |= (u32)hash; +				writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); +			} + +			val = readl(bdf_to_sid_base + hash * sizeof(u32)); +		} + +		/* BDF [31:16] | SID [15:8] | NEXT [7:0] */ +		val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; +		writel(val, bdf_to_sid_base + hash * sizeof(u32)); +	} + +	kfree(map); + +	return 0; +} +  static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; @@ -1371,17 +1113,17 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)  	int i;  	writel(SLV_ADDR_SPACE_SZ, -		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); +		pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); -	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); -	val &= ~BIT(0); -	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); +	val = readl(pcie->parf + PARF_PHY_CTRL); +	val &= ~PHY_TEST_PWR_DOWN; +	writel(val, pcie->parf + PARF_PHY_CTRL); -	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); +	writel(0, pcie->parf + PARF_DBI_BASE_ADDR); -	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); +	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);  	writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, -		pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); +		pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);  	writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |  		GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,  		pci->dbi_base + GEN3_RELATED_OFF); @@ -1389,9 +1131,9 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)  	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |  		SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |  		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, -		pcie->parf + PCIE20_PARF_SYS_CTRL); +		pcie->parf + PARF_SYS_CTRL); -	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); +	writel(0, pcie->parf + PARF_Q2A_FLUSH);  	dw_pcie_dbi_ro_wr_en(pci);  	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); @@ -1404,7 +1146,7 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)  			PCI_EXP_DEVCTL2);  	for (i = 0; i < 256; i++) -		writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i)); +		writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));  	return 0;  } @@ -1417,77 +1159,6 @@ static int qcom_pcie_link_up(struct dw_pcie *pci)  	return !!(val & PCI_EXP_LNKSTA_DLLLA);  } -static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) -{ -	/* iommu map structure */ -	struct { -		u32 bdf; -		u32 phandle; -		u32 smmu_sid; -		u32 smmu_sid_len; -	} *map; -	void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N; -	struct device *dev = pcie->pci->dev; -	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; -	int i, nr_map, size = 0; -	u32 smmu_sid_base; - -	of_get_property(dev->of_node, "iommu-map", &size); -	if (!size) -		return 0; - -	map = kzalloc(size, GFP_KERNEL); -	if (!map) -		return -ENOMEM; - -	of_property_read_u32_array(dev->of_node, -		"iommu-map", (u32 *)map, size / sizeof(u32)); - -	nr_map = size / (sizeof(*map)); - -	crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); - -	/* Registers need to be zero out first */ -	memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); - -	/* Extract the SMMU SID base from the first entry of iommu-map */ -	smmu_sid_base = map[0].smmu_sid; - -	/* Look for an available entry to hold the mapping */ -	for (i = 0; i < nr_map; i++) { -		__be16 bdf_be = cpu_to_be16(map[i].bdf); -		u32 val; -		u8 hash; - -		hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), -			0); - -		val = readl(bdf_to_sid_base + hash * sizeof(u32)); - -		/* If the register is already populated, look for next available entry */ -		while (val) { -			u8 current_hash = hash++; -			u8 next_mask = 0xff; - -			/* If NEXT field is NULL then update it with next hash */ -			if (!(val & next_mask)) { -				val |= (u32)hash; -				writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); -			} - -			val = readl(bdf_to_sid_base + hash * sizeof(u32)); -		} - -		/* BDF [31:16] | SID [15:8] | NEXT [7:0] */ -		val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; -		writel(val, bdf_to_sid_base + hash * sizeof(u32)); -	} - -	kfree(map); - -	return 0; -} -  static int qcom_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); @@ -1608,7 +1279,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = {  	.init = qcom_pcie_init_2_7_0,  	.deinit = qcom_pcie_deinit_2_7_0,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, -	.config_sid = qcom_pcie_config_sid_sm8250, +	.config_sid = qcom_pcie_config_sid_1_9_0,  };  /* Qcom IP rev.: 2.9.0  Synopsys IP rev.: 5.00a */ @@ -1725,13 +1396,51 @@ static void qcom_pcie_icc_update(struct qcom_pcie *pcie)  	}  } +static int qcom_pcie_link_transition_count(struct seq_file *s, void *data) +{ +	struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); + +	seq_printf(s, "L0s transition count: %u\n", +		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); + +	seq_printf(s, "L1 transition count: %u\n", +		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); + +	seq_printf(s, "L1.1 transition count: %u\n", +		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); + +	seq_printf(s, "L1.2 transition count: %u\n", +		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); + +	seq_printf(s, "L2 transition count: %u\n", +		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); + +	return 0; +} + +static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) +{ +	struct dw_pcie *pci = pcie->pci; +	struct device *dev = pci->dev; +	char *name; + +	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); +	if (!name) +		return; + +	pcie->debugfs = debugfs_create_dir(name, NULL); +	debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, +				    qcom_pcie_link_transition_count); +} +  static int qcom_pcie_probe(struct platform_device *pdev)  { +	const struct qcom_pcie_cfg *pcie_cfg;  	struct device *dev = &pdev->dev; +	struct qcom_pcie *pcie;  	struct dw_pcie_rp *pp; +	struct resource *res;  	struct dw_pcie *pci; -	struct qcom_pcie *pcie; -	const struct qcom_pcie_cfg *pcie_cfg;  	int ret;  	pcie_cfg = of_device_get_match_data(dev); @@ -1779,6 +1488,16 @@ static int qcom_pcie_probe(struct platform_device *pdev)  		goto err_pm_runtime_put;  	} +	/* MHI region is optional */ +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi"); +	if (res) { +		pcie->mhi = devm_ioremap_resource(dev, res); +		if (IS_ERR(pcie->mhi)) { +			ret = PTR_ERR(pcie->mhi); +			goto err_pm_runtime_put; +		} +	} +  	pcie->phy = devm_phy_optional_get(dev, "pciephy");  	if (IS_ERR(pcie->phy)) {  		ret = PTR_ERR(pcie->phy); @@ -1809,6 +1528,9 @@ static int qcom_pcie_probe(struct platform_device *pdev)  	qcom_pcie_icc_update(pcie); +	if (pcie->mhi) +		qcom_pcie_init_debugfs(pcie); +  	return 0;  err_phy_exit: @@ -1820,6 +1542,62 @@ err_pm_runtime_put:  	return ret;  } +static int qcom_pcie_suspend_noirq(struct device *dev) +{ +	struct qcom_pcie *pcie = dev_get_drvdata(dev); +	int ret; + +	/* +	 * Set minimum bandwidth required to keep data path functional during +	 * suspend. +	 */ +	ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); +	if (ret) { +		dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret); +		return ret; +	} + +	/* +	 * Turn OFF the resources only for controllers without active PCIe +	 * devices. For controllers with active devices, the resources are kept +	 * ON and the link is expected to be in L0/L1 (sub)states. +	 * +	 * Turning OFF the resources for controllers with active PCIe devices +	 * will trigger access violation during the end of the suspend cycle, +	 * as kernel tries to access the PCIe devices config space for masking +	 * MSIs. +	 * +	 * Also, it is not desirable to put the link into L2/L3 state as that +	 * implies VDD supply will be removed and the devices may go into +	 * powerdown state. This will affect the lifetime of the storage devices +	 * like NVMe. +	 */ +	if (!dw_pcie_link_up(pcie->pci)) { +		qcom_pcie_host_deinit(&pcie->pci->pp); +		pcie->suspended = true; +	} + +	return 0; +} + +static int qcom_pcie_resume_noirq(struct device *dev) +{ +	struct qcom_pcie *pcie = dev_get_drvdata(dev); +	int ret; + +	if (pcie->suspended) { +		ret = qcom_pcie_host_init(&pcie->pci->pp); +		if (ret) +			return ret; + +		pcie->suspended = false; +	} + +	qcom_pcie_icc_update(pcie); + +	return 0; +} +  static const struct of_device_id qcom_pcie_match[] = {  	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },  	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, @@ -1836,11 +1614,13 @@ static const struct of_device_id qcom_pcie_match[] = {  	{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },  	{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },  	{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, +	{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },  	{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },  	{ .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },  	{ .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },  	{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },  	{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, +	{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },  	{ }  }; @@ -1856,12 +1636,18 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);  DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);  DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); +static const struct dev_pm_ops qcom_pcie_pm_ops = { +	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq) +}; +  static struct platform_driver qcom_pcie_driver = {  	.probe = qcom_pcie_probe,  	.driver = {  		.name = "qcom-pcie",  		.suppress_bind_attrs = true,  		.of_match_table = qcom_pcie_match, +		.pm = &qcom_pcie_pm_ops, +		.probe_type = PROBE_PREFER_ASYNCHRONOUS,  	},  };  builtin_platform_driver(qcom_pcie_driver);  |