aboutsummaryrefslogtreecommitdiff
path: root/drivers/pci/controller/dwc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller/dwc')
-rw-r--r--drivers/pci/controller/dwc/Kconfig160
-rw-r--r--drivers/pci/controller/dwc/Makefile15
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c237
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c447
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c161
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c249
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c130
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c72
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c333
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c126
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c42
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c145
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c500
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c513
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c74
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c354
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h166
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c279
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c309
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c240
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c87
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c463
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c460
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c723
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c721
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c601
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c97
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c108
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c1009
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c349
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c118
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c329
32 files changed, 6638 insertions, 2979 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 625a031b2193..62ce3abf0f19 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -8,25 +8,23 @@ config PCIE_DW
config PCIE_DW_HOST
bool
- depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
config PCIE_DW_EP
bool
- depends on PCI_ENDPOINT
select PCIE_DW
config PCI_DRA7XX
- bool
+ tristate
config PCI_DRA7XX_HOST
- bool "TI DRA7xx PCIe controller Host Mode"
+ tristate "TI DRA7xx PCIe controller Host Mode"
depends on SOC_DRA7XX || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCI_DRA7XX
- default y
+ default y if SOC_DRA7XX
help
Enables support for the PCIe controller in the DRA7xx SoC to work in
host mode. There are two instances of PCIe controller in DRA7xx.
@@ -36,10 +34,10 @@ config PCI_DRA7XX_HOST
This uses the DesignWare core.
config PCI_DRA7XX_EP
- bool "TI DRA7xx PCIe controller Endpoint Mode"
+ tristate "TI DRA7xx PCIe controller Endpoint Mode"
depends on SOC_DRA7XX || COMPILE_TEST
- depends on PCI_ENDPOINT
depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCI_DRA7XX
help
@@ -55,7 +53,7 @@ config PCIE_DW_PLAT
config PCIE_DW_PLAT_HOST
bool "Platform bus based DesignWare PCIe Controller - Host mode"
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCIE_DW_PLAT
help
@@ -83,10 +81,15 @@ config PCIE_DW_PLAT_EP
selected.
config PCI_EXYNOS
- bool "Samsung Exynos PCIe controller"
- depends on SOC_EXYNOS5440 || COMPILE_TEST
+ tristate "Samsung Exynos PCIe controller"
+ depends on ARCH_EXYNOS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Samsung Exynos SoCs
+ to work in host mode. The PCI controller is based on the DesignWare
+ hardware and therefore the driver re-uses the DesignWare core
+ functions to implement the driver.
config PCI_IMX6
bool "Freescale i.MX6/7/8 PCIe controller"
@@ -107,11 +110,10 @@ config PCI_KEYSTONE
config PCI_KEYSTONE_HOST
bool "PCI Keystone Host Mode"
- depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCI_KEYSTONE
- default y
help
Enables support for the PCIe controller in the Keystone SoC to
work in host mode. The PCI controller on Keystone is based on
@@ -120,7 +122,7 @@ config PCI_KEYSTONE_HOST
config PCI_KEYSTONE_EP
bool "PCI Keystone Endpoint Mode"
- depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCI_KEYSTONE
@@ -134,8 +136,8 @@ config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller - Host mode"
depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
- select MFD_SYSCON
select PCIE_DW_HOST
+ select MFD_SYSCON
help
Say Y here if you want to enable PCIe controller support on Layerscape
SoCs to work in Host mode.
@@ -170,11 +172,22 @@ config PCIE_QCOM
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select CRC8
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
hardware wrappers.
+config PCIE_QCOM_EP
+ tristate "Qualcomm PCIe controller - Endpoint mode"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
+ to work in endpoint mode. The PCIe controller uses the DesignWare core
+ plus Qualcomm-specific hardware wrappers.
+
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
depends on ARCH_MVEBU || COMPILE_TEST
@@ -209,9 +222,59 @@ config PCIE_ARTPEC6_EP
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
endpoint mode. This uses the DesignWare core.
+config PCIE_ROCKCHIP_DW_HOST
+ bool "Rockchip DesignWare PCIe controller"
+ select PCIE_DW
+ select PCIE_DW_HOST
+ depends on PCI_MSI_IRQ_DOMAIN
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC except RK3399.
+
+config PCIE_INTEL_GW
+ bool "Intel Gateway PCIe host controller support"
+ depends on OF && (X86 || COMPILE_TEST)
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Say 'Y' here to enable PCIe Host controller support on Intel
+ Gateway SoCs.
+ The PCIe controller uses the DesignWare core plus Intel-specific
+ hardware wrappers.
+
+config PCIE_KEEMBAY
+ bool
+
+config PCIE_KEEMBAY_HOST
+ bool "Intel Keem Bay PCIe controller - Host mode"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in host mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_KEEMBAY_EP
+ bool "Intel Keem Bay PCIe controller - Endpoint mode"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in endpoint mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
config PCIE_KIRIN
depends on OF && (ARM64 || COMPILE_TEST)
- bool "HiSilicon Kirin series SoCs PCIe controllers"
+ tristate "HiSilicon Kirin series SoCs PCIe controllers"
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
@@ -227,7 +290,8 @@ config PCIE_HISI_STB
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
config PCI_MESON
- bool "MESON PCIe controller"
+ tristate "MESON PCIe controller"
+ default m if ARCH_MESON
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
@@ -237,30 +301,73 @@ config PCI_MESON
implement the driver.
config PCIE_TEGRA194
- tristate "NVIDIA Tegra194 (and later) PCIe controller"
+ tristate
+
+config PCIE_TEGRA194_HOST
+ tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode"
depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_TEGRA194_EP
+ tristate "NVIDIA Tegra194 (and later) PCIe controller - Endpoint Mode"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
help
- Say Y here if you want support for DesignWare core based PCIe host
- controller found in NVIDIA Tegra194 SoC.
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in endpoint mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_VISCONTI_HOST
+ bool "Toshiba Visconti PCIe controllers"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
+ This driver supports TMPV7708 SoC.
config PCIE_UNIPHIER
- bool "Socionext UniPhier PCIe controllers"
+ bool "Socionext UniPhier PCIe host controllers"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && HAS_IOMEM
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
- Say Y here if you want PCIe controller support on UniPhier SoCs.
+ Say Y here if you want PCIe host controller support on UniPhier SoCs.
This driver supports LD20 and PXs3 SoCs.
+config PCIE_UNIPHIER_EP
+ bool "Socionext UniPhier PCIe endpoint controllers"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here if you want PCIe endpoint controller support on
+ UniPhier SoCs. This driver supports Pro5 SoC.
+
config PCIE_AL
bool "Amazon Annapurna Labs PCIe controller"
depends on OF && (ARM64 || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select PCI_ECAM
help
Say Y here to enable support of the Amazon's Annapurna Labs PCIe
controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
@@ -268,4 +375,13 @@ config PCIE_AL
required only for DT-based platforms. ACPI platforms with the
Annapurna Labs PCIe controller don't need to enable this.
+config PCIE_FU740
+ bool "SiFive FU740 PCIe host controller"
+ depends on PCI_MSI_IRQ_DOMAIN
+ depends on SOC_SIFIVE || COMPILE_TEST
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support for the SiFive
+ FU740.
+
endmenu
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 69faff371f11..8ba7b67f5e50 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -5,19 +5,26 @@ obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
+obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
+obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
@@ -29,7 +36,13 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
# ARM64 and use internal ifdefs to only build the pieces we need
# depending on whether ACPI, the DT driver, or both are enabled.
-ifdef CONFIG_PCI
+obj-$(CONFIG_PCIE_AL) += pcie-al.o
+obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
+
+ifdef CONFIG_ACPI
+ifdef CONFIG_PCI_QUIRKS
obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pcie-tegra194-acpi.o
+endif
endif
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index b20651cea09f..a4221f6f3629 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -2,11 +2,12 @@
/*
* pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
*
- * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Kishon Vijay Abraham I <[email protected]>
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -14,7 +15,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_pci.h>
@@ -73,8 +74,6 @@
#define LINK_UP BIT(16)
#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
-#define EXP_CAP_ID_OFFSET 0x70
-
#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
@@ -91,8 +90,8 @@ struct dra7xx_pcie {
void __iomem *base; /* DT ti_conf */
int phy_count; /* DT phy-names count */
struct phy **phy;
- int link_gen;
struct irq_domain *irq_domain;
+ struct clk *clk;
enum dw_pcie_device_mode mode;
};
@@ -142,33 +141,12 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
struct device *dev = pci->dev;
u32 reg;
- u32 exp_cap_off = EXP_CAP_ID_OFFSET;
if (dw_pcie_link_up(pci)) {
dev_err(dev, "link is already up\n");
return 0;
}
- if (dra7xx->link_gen == 1) {
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
- 4, &reg);
- if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
- reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCAP, 4, reg);
- }
-
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
- 2, &reg);
- if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
- reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCTL2, 2, reg);
- }
- }
-
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
reg |= LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
@@ -205,20 +183,11 @@ static int dra7xx_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- dw_pcie_setup_rc(pp);
-
- dra7xx_pcie_establish_link(pci);
- dw_pcie_wait_for_link(pci);
- dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(dra7xx);
return 0;
}
-static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
- .host_init = dra7xx_pcie_host_init,
-};
-
static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -233,59 +202,87 @@ static const struct irq_domain_ops intx_domain_ops = {
.xlate = pci_irqd_intx_xlate,
};
-static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- struct device_node *node = dev->of_node;
- struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+ unsigned long val;
+ int pos;
- if (!pcie_intc_node) {
- dev_err(dev, "No PCIe Intc node found\n");
- return -ENODEV;
- }
+ val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (index * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!val)
+ return 0;
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
- of_node_put(pcie_intc_node);
- if (!dra7xx->irq_domain) {
- dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return -ENODEV;
+ pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
+ while (pos != MAX_MSI_IRQS_PER_CTRL) {
+ generic_handle_domain_irq(pp->irq_domain,
+ (index * MAX_MSI_IRQS_PER_CTRL) + pos);
+ pos++;
+ pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
}
- return 0;
+ return 1;
}
-static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
{
- struct dra7xx_pcie *dra7xx = arg;
- struct dw_pcie *pci = dra7xx->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret, i, count, num_ctrls;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /**
+ * Need to make sure all MSI status bits read 0 before exiting.
+ * Else, new MSI IRQs are not registered by the wrapper. Have an
+ * upperbound for the loop and exit the IRQ in case of IRQ flood
+ * to avoid locking up system in interrupt context.
+ */
+ count = 0;
+ do {
+ ret = 0;
+
+ for (i = 0; i < num_ctrls; i++)
+ ret |= dra7xx_pcie_handle_msi(pp, i);
+ count++;
+ } while (ret && count <= 1000);
+
+ if (count > 1000)
+ dev_warn_ratelimited(pci->dev,
+ "Too many MSI IRQs to handle\n");
+}
+
+static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dra7xx_pcie *dra7xx;
+ struct dw_pcie *pci;
+ struct pcie_port *pp;
unsigned long reg;
- u32 virq, bit;
+ u32 bit;
+
+ chained_irq_enter(chip, desc);
+
+ pp = irq_desc_get_handler_data(desc);
+ pci = to_dw_pcie_from_pp(pp);
+ dra7xx = to_dra7xx_pcie(pci);
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
switch (reg) {
case MSI:
- dw_handle_msi_irq(pp);
+ dra7xx_pcie_handle_msi_irq(pp);
break;
case INTA:
case INTB:
case INTC:
case INTD:
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_find_mapping(dra7xx->irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(dra7xx->irq_domain, bit);
break;
}
- dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
-
- return IRQ_HANDLED;
+ chained_irq_exit(chip, desc);
}
static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
@@ -347,6 +344,36 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
+ pp);
+ dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, pp);
+ of_node_put(pcie_intc_node);
+ if (!dra7xx->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+ .host_init = dra7xx_pcie_host_init,
+};
+
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -414,35 +441,26 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
.get_features = dra7xx_pcie_get_features,
};
-static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
- struct platform_device *pdev)
+static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
{
int ret;
struct dw_pcie_ep *ep;
- struct resource *res;
struct device *dev = &pdev->dev;
struct dw_pcie *pci = dra7xx->pci;
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics");
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
- pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ pci->dbi_base2 =
+ devm_platform_ioremap_resource_byname(pdev, "ep_dbics2");
if (IS_ERR(pci->dbi_base2))
return PTR_ERR(pci->dbi_base2);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
@@ -452,35 +470,26 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
return 0;
}
-static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
- struct platform_device *pdev)
+static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
{
int ret;
struct dw_pcie *pci = dra7xx->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
- struct resource *res;
pp->irq = platform_get_irq(pdev, 1);
- if (pp->irq < 0) {
- dev_err(dev, "missing IRQ resource\n");
+ if (pp->irq < 0)
return pp->irq;
- }
- ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
- IRQF_SHARED | IRQF_NO_THREAD,
- "dra7-pcie-msi", dra7xx);
- if (ret) {
- dev_err(dev, "failed to request irq\n");
- return ret;
- }
+ /* MSI IRQ is muxed */
+ pp->msi_irq = -ENODEV;
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
return ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics");
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
@@ -600,6 +609,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
/*
* dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
@@ -671,7 +681,7 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev,
return 0;
}
-static int __init dra7xx_pcie_probe(struct platform_device *pdev)
+static int dra7xx_pcie_probe(struct platform_device *pdev)
{
u32 reg;
int ret;
@@ -681,7 +691,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
struct phy **phy;
struct device_link **link;
void __iomem *base;
- struct resource *res;
struct dw_pcie *pci;
struct dra7xx_pcie *dra7xx;
struct device *dev = &pdev->dev;
@@ -713,15 +722,12 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
pci->ops = &dw_pcie_ops;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "missing IRQ resource: %d\n", irq);
+ if (irq < 0)
return irq;
- }
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
- base = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!base)
- return -ENOMEM;
+ base = devm_platform_ioremap_resource_byname(pdev, "ti_conf");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 0) {
@@ -737,6 +743,15 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (!link)
return -ENOMEM;
+ dra7xx->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(dra7xx->clk))
+ return dev_err_probe(dev, PTR_ERR(dra7xx->clk),
+ "clock request failed");
+
+ ret = clk_prepare_enable(dra7xx->clk);
+ if (ret)
+ return ret;
+
for (i = 0; i < phy_count; i++) {
snprintf(name, sizeof(name), "pcie-phy%d", i);
phy[i] = devm_phy_get(dev, name);
@@ -787,10 +802,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
reg &= ~LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- dra7xx->link_gen = of_pci_get_max_link_speed(np);
- if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
- dra7xx->link_gen = 2;
-
switch (mode) {
case DW_PCIE_RC_TYPE:
if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
@@ -841,9 +852,8 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
return 0;
err_gpio:
- pm_runtime_put(dev);
-
err_get_sync:
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
dra7xx_pcie_disable_phy(dra7xx);
@@ -927,6 +937,8 @@ static void dra7xx_pcie_shutdown(struct platform_device *pdev)
pm_runtime_disable(dev);
dra7xx_pcie_disable_phy(dra7xx);
+
+ clk_disable_unprepare(dra7xx->clk);
}
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
@@ -936,6 +948,7 @@ static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
};
static struct platform_driver dra7xx_pcie_driver = {
+ .probe = dra7xx_pcie_probe,
.driver = {
.name = "dra7-pcie",
.of_match_table = of_dra7xx_pcie_match,
@@ -944,4 +957,8 @@ static struct platform_driver dra7xx_pcie_driver = {
},
.shutdown = dra7xx_pcie_shutdown,
};
-builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
+module_platform_driver(dra7xx_pcie_driver);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
+MODULE_DESCRIPTION("PCIe controller driver for TI DRA7xx SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 14a6ba4067fb..c24dab383654 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -1,27 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * PCIe host controller driver for Samsung EXYNOS SoCs
+ * PCIe host controller driver for Samsung Exynos SoCs
*
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
+ * Copyright (C) 2013-2020 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
*
* Author: Jingoo Han <[email protected]>
+ * Jaehoon Chung <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/resource.h>
-#include <linux/signal.h>
-#include <linux/types.h>
+#include <linux/regulator/consumer.h>
#include "pcie-designware.h"
@@ -37,104 +34,43 @@
#define PCIE_IRQ_SPECIAL 0x008
#define PCIE_IRQ_EN_PULSE 0x00c
#define PCIE_IRQ_EN_LEVEL 0x010
-#define IRQ_MSI_ENABLE BIT(2)
#define PCIE_IRQ_EN_SPECIAL 0x014
-#define PCIE_PWR_RESET 0x018
+#define PCIE_SW_WAKE 0x018
+#define PCIE_BUS_EN BIT(1)
#define PCIE_CORE_RESET 0x01c
#define PCIE_CORE_RESET_ENABLE BIT(0)
#define PCIE_STICKY_RESET 0x020
#define PCIE_NONSTICKY_RESET 0x024
#define PCIE_APP_INIT_RESET 0x028
#define PCIE_APP_LTSSM_ENABLE 0x02c
-#define PCIE_ELBI_RDLH_LINKUP 0x064
+#define PCIE_ELBI_RDLH_LINKUP 0x074
+#define PCIE_ELBI_XMLH_LINKUP BIT(4)
#define PCIE_ELBI_LTSSM_ENABLE 0x1
#define PCIE_ELBI_SLV_AWMISC 0x11c
#define PCIE_ELBI_SLV_ARMISC 0x120
#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
-struct exynos_pcie_mem_res {
- void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */
-};
-
-struct exynos_pcie_clk_res {
- struct clk *clk;
- struct clk *bus_clk;
-};
-
struct exynos_pcie {
- struct dw_pcie *pci;
- struct exynos_pcie_mem_res *mem_res;
- struct exynos_pcie_clk_res *clk_res;
- const struct exynos_pcie_ops *ops;
- int reset_gpio;
-
+ struct dw_pcie pci;
+ void __iomem *elbi_base;
+ struct clk *clk;
+ struct clk *bus_clk;
struct phy *phy;
+ struct regulator_bulk_data supplies[2];
};
-struct exynos_pcie_ops {
- int (*get_mem_resources)(struct platform_device *pdev,
- struct exynos_pcie *ep);
- int (*get_clk_resources)(struct exynos_pcie *ep);
- int (*init_clk_resources)(struct exynos_pcie *ep);
- void (*deinit_clk_resources)(struct exynos_pcie *ep);
-};
-
-static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
- struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
- struct resource *res;
-
- ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
- if (!ep->mem_res)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ep->mem_res->elbi_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ep->mem_res->elbi_base))
- return PTR_ERR(ep->mem_res->elbi_base);
-
- return 0;
-}
-
-static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL);
- if (!ep->clk_res)
- return -ENOMEM;
-
- ep->clk_res->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ep->clk_res->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ep->clk_res->clk);
- }
-
- ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(ep->clk_res->bus_clk)) {
- dev_err(dev, "Failed to get pcie bus clock\n");
- return PTR_ERR(ep->clk_res->bus_clk);
- }
-
- return 0;
-}
-
-static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
+static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
+ struct device *dev = ep->pci.dev;
int ret;
- ret = clk_prepare_enable(ep->clk_res->clk);
+ ret = clk_prepare_enable(ep->clk);
if (ret) {
dev_err(dev, "cannot enable pcie rc clock");
return ret;
}
- ret = clk_prepare_enable(ep->clk_res->bus_clk);
+ ret = clk_prepare_enable(ep->bus_clk);
if (ret) {
dev_err(dev, "cannot enable pcie bus clock");
goto err_bus_clk;
@@ -143,24 +79,17 @@ static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
return 0;
err_bus_clk:
- clk_disable_unprepare(ep->clk_res->clk);
+ clk_disable_unprepare(ep->clk);
return ret;
}
-static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep)
+static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
{
- clk_disable_unprepare(ep->clk_res->bus_clk);
- clk_disable_unprepare(ep->clk_res->clk);
+ clk_disable_unprepare(ep->bus_clk);
+ clk_disable_unprepare(ep->clk);
}
-static const struct exynos_pcie_ops exynos5440_pcie_ops = {
- .get_mem_resources = exynos5440_pcie_get_mem_resources,
- .get_clk_resources = exynos5440_pcie_get_clk_resources,
- .init_clk_resources = exynos5440_pcie_init_clk_resources,
- .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources,
-};
-
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
{
writel(val, base + reg);
@@ -175,115 +104,71 @@ static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
}
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
}
static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
}
-static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
+static int exynos_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- if (ep->reset_gpio >= 0)
- devm_gpio_request_one(dev, ep->reset_gpio,
- GPIOF_OUT_INIT_HIGH, "RESET");
-}
-
-static int exynos_pcie_establish_link(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
-
- if (dw_pcie_link_up(pci)) {
- dev_err(dev, "Link already up\n");
- return 0;
- }
-
- exynos_pcie_assert_core_reset(ep);
-
- phy_reset(ep->phy);
-
- exynos_pcie_writel(ep->mem_res->elbi_base, 1,
- PCIE_PWR_RESET);
-
- phy_power_on(ep->phy);
- phy_init(ep->phy);
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+ u32 val;
- exynos_pcie_deassert_core_reset(ep);
- dw_pcie_setup_rc(pp);
- exynos_pcie_assert_reset(ep);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
+ val &= ~PCIE_BUS_EN;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
/* assert LTSSM enable */
- exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
PCIE_APP_LTSSM_ENABLE);
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- phy_power_off(ep->phy);
- return -ETIMEDOUT;
+ return 0;
}
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
{
- u32 val;
-
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE);
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE);
-}
-
-static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
-{
- u32 val;
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
- /* enable INTX interrupt */
- val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
- IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
}
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
@@ -294,26 +179,14 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void exynos_pcie_msi_init(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
- u32 val;
-
- dw_pcie_msi_init(pp);
-
- /* enable MSI interrupt */
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL);
- val |= IRQ_MSI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL);
-}
-
-static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
{
- exynos_pcie_enable_irq_pulse(ep);
+ u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- exynos_pcie_msi_init(ep);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
}
static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
@@ -338,42 +211,43 @@ static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
exynos_pcie_sideband_dbi_w_mode(ep, false);
}
-static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- exynos_pcie_sideband_dbi_r_mode(ep, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_r_mode(ep, false);
- return ret;
+ if (PCI_SLOT(devfn)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
+static int exynos_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- exynos_pcie_sideband_dbi_w_mode(ep, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_w_mode(ep, false);
- return ret;
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
+static struct pci_ops exynos_pci_ops = {
+ .read = exynos_pcie_rd_own_conf,
+ .write = exynos_pcie_wr_own_conf,
+};
+
static int exynos_pcie_link_up(struct dw_pcie *pci)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
- u32 val;
-
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- if (val == PCIE_ELBI_LTSSM_ENABLE)
- return 1;
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- return 0;
+ return (val & PCIE_ELBI_XMLH_LINKUP);
}
static int exynos_pcie_host_init(struct pcie_port *pp)
@@ -381,47 +255,45 @@ static int exynos_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct exynos_pcie *ep = to_exynos_pcie(pci);
- exynos_pcie_establish_link(ep);
- exynos_pcie_enable_interrupts(ep);
+ pp->bridge->ops = &exynos_pci_ops;
+
+ exynos_pcie_assert_core_reset(ep);
+
+ phy_reset(ep->phy);
+ phy_power_on(ep->phy);
+ phy_init(ep->phy);
+
+ exynos_pcie_deassert_core_reset(ep);
+ exynos_pcie_enable_irq_pulse(ep);
return 0;
}
static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
- .rd_own_conf = exynos_pcie_rd_own_conf,
- .wr_own_conf = exynos_pcie_wr_own_conf,
.host_init = exynos_pcie_host_init,
};
-static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
+static int exynos_add_pcie_port(struct exynos_pcie *ep,
struct platform_device *pdev)
{
- struct dw_pcie *pci = ep->pci;
+ struct dw_pcie *pci = &ep->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
- pp->irq = platform_get_irq(pdev, 1);
- if (pp->irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
return pp->irq;
- }
+
ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
- IRQF_SHARED, "exynos-pcie", ep);
+ IRQF_SHARED, "exynos-pcie", ep);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0) {
- dev_err(dev, "failed to get msi irq\n");
- return pp->msi_irq;
- }
- }
-
pp->ops = &exynos_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -436,12 +308,12 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.read_dbi = exynos_pcie_read_dbi,
.write_dbi = exynos_pcie_write_dbi,
.link_up = exynos_pcie_link_up,
+ .start_link = exynos_pcie_start_link,
};
-static int __init exynos_pcie_probe(struct platform_device *pdev)
+static int exynos_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct dw_pcie *pci;
struct exynos_pcie *ep;
struct device_node *np = dev->of_node;
int ret;
@@ -450,43 +322,45 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
if (!ep)
return -ENOMEM;
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
- return -ENOMEM;
-
- pci->dev = dev;
- pci->ops = &dw_pcie_ops;
-
- ep->pci = pci;
- ep->ops = (const struct exynos_pcie_ops *)
- of_device_get_match_data(dev);
-
- ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ ep->pci.dev = dev;
+ ep->pci.ops = &dw_pcie_ops;
ep->phy = devm_of_phy_get(dev, np, NULL);
- if (IS_ERR(ep->phy)) {
- if (PTR_ERR(ep->phy) != -ENODEV)
- return PTR_ERR(ep->phy);
+ if (IS_ERR(ep->phy))
+ return PTR_ERR(ep->phy);
- ep->phy = NULL;
- }
+ /* External Local Bus interface (ELBI) registers */
+ ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(ep->elbi_base))
+ return PTR_ERR(ep->elbi_base);
- if (ep->ops && ep->ops->get_mem_resources) {
- ret = ep->ops->get_mem_resources(pdev, ep);
- if (ret)
- return ret;
+ ep->clk = devm_clk_get(dev, "pcie");
+ if (IS_ERR(ep->clk)) {
+ dev_err(dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(ep->clk);
}
- if (ep->ops && ep->ops->get_clk_resources &&
- ep->ops->init_clk_resources) {
- ret = ep->ops->get_clk_resources(ep);
- if (ret)
- return ret;
- ret = ep->ops->init_clk_resources(ep);
- if (ret)
- return ret;
+ ep->bus_clk = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(ep->bus_clk)) {
+ dev_err(dev, "Failed to get pcie bus clock\n");
+ return PTR_ERR(ep->bus_clk);
}
+ ep->supplies[0].supply = "vdd18";
+ ep->supplies[1].supply = "vdd10";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ep->supplies),
+ ep->supplies);
+ if (ret)
+ return ret;
+
+ ret = exynos_pcie_init_clk_resources(ep);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, ep);
ret = exynos_add_pcie_port(ep, pdev);
@@ -497,9 +371,9 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
fail_probe:
phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
- if (ep->ops && ep->ops->deinit_clk_resources)
- ep->ops->deinit_clk_resources(ep);
return ret;
}
@@ -507,32 +381,65 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)
{
struct exynos_pcie *ep = platform_get_drvdata(pdev);
- if (ep->ops && ep->ops->deinit_clk_resources)
- ep->ops->deinit_clk_resources(ep);
+ dw_pcie_host_deinit(&ep->pci.pp);
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
return 0;
}
+static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &ep->pci;
+ struct pcie_port *pp = &pci->pp;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
+ /* exynos_pcie_host_init controls ep->phy */
+ exynos_pcie_host_init(pp);
+ dw_pcie_setup_rc(pp);
+ exynos_pcie_start_link(pci);
+ return dw_pcie_wait_for_link(pci);
+}
+
+static const struct dev_pm_ops exynos_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
+ exynos_pcie_resume_noirq)
+};
+
static const struct of_device_id exynos_pcie_of_match[] = {
- {
- .compatible = "samsung,exynos5440-pcie",
- .data = &exynos5440_pcie_ops
- },
- {},
+ { .compatible = "samsung,exynos5433-pcie", },
+ { },
};
static struct platform_driver exynos_pcie_driver = {
+ .probe = exynos_pcie_probe,
.remove = __exit_p(exynos_pcie_remove),
.driver = {
.name = "exynos-pcie",
.of_match_table = exynos_pcie_of_match,
+ .pm = &exynos_pcie_pm_ops,
},
};
-
-/* Exynos PCIe driver does not allow module unload */
-
-static int __init exynos_pcie_init(void)
-{
- return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
-}
-subsys_initcall(exynos_pcie_init);
+module_platform_driver(exynos_pcie_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index acfbd34032a8..26f49f797b0f 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -3,7 +3,7 @@
* PCIe host controller driver for Freescale i.MX6 SoCs
*
* Copyright (C) 2013 Kosagi
- * http://www.kosagi.com
+ * https://www.kosagi.com
*
* Author: Sean Cross <[email protected]>
*/
@@ -37,6 +37,7 @@
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
+#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
@@ -79,8 +80,8 @@ struct imx6_pcie {
u32 tx_deemph_gen2_6db;
u32 tx_swing_full;
u32 tx_swing_low;
- int link_gen;
struct regulator *vpcie;
+ struct regulator *vph;
void __iomem *phy_base;
/* power domain for pcie */
@@ -94,15 +95,6 @@ struct imx6_pcie {
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
-/* PCIe Root Complex registers (memory-mapped) */
-#define PCIE_RC_IMX6_MSI_CAP 0x50
-#define PCIE_RC_LCR 0x7c
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
-
-#define PCIE_RC_LCSR 0x80
-
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
@@ -116,8 +108,6 @@ struct imx6_pcie {
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
#define PCIE_PHY_STAT_ACK BIT(16)
-#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
-
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
#define PCIE_PHY_ATEOVRD_EN BIT(2)
@@ -439,7 +429,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
- case IMX6QP: /* FALLTHROUGH */
+ case IMX6QP:
case IMX6Q:
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
@@ -633,6 +623,17 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
imx6_pcie_grp_offset(imx6_pcie),
IMX8MQ_GPR_PCIE_REF_USE_PAD,
IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested
+ * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
+ * VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph &&
+ regulator_get_voltage(imx6_pcie->vph) > 3000000)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
break;
case IMX7D:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
@@ -642,7 +643,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
IMX6SX_GPR12_PCIE_RX_EQ_2);
- /* FALLTHROUGH */
+ fallthrough;
default:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@@ -757,10 +758,11 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
}
}
-static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
+static int imx6_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
struct device *dev = pci->dev;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
int ret;
@@ -769,10 +771,10 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
- dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
@@ -781,12 +783,12 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
if (ret)
goto err_reset_phy;
- if (imx6_pcie->link_gen == 2) {
+ if (pci->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
- dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
* Start Directed Speed Change so the best possible
@@ -824,8 +826,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
dev_info(dev, "Link: Gen2 disabled\n");
}
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
- dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
+ tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+ dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
@@ -845,11 +847,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
imx6_setup_phy_mpll(imx6_pcie);
- dw_pcie_setup_rc(pp);
- imx6_pcie_establish_link(imx6_pcie);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
return 0;
}
@@ -858,35 +855,8 @@ static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
-static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq <= 0) {
- dev_err(dev, "failed to get MSI irq\n");
- return -ENODEV;
- }
- }
-
- pp->ops = &imx6_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
- /* No special ops needed, but pcie-designware still expects this struct */
+ .start_link = imx6_pcie_start_link,
};
#ifdef CONFIG_PM_SLEEP
@@ -995,7 +965,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
imx6_pcie_deassert_core_reset(imx6_pcie);
dw_pcie_setup_rc(pp);
- ret = imx6_pcie_establish_link(imx6_pcie);
+ ret = imx6_pcie_start_link(imx6_pcie->pci);
if (ret < 0)
dev_info(dev, "pcie link is down after resume.\n");
@@ -1029,6 +999,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &imx6_pcie_host_ops;
imx6_pcie->pci = pci;
imx6_pcie->drvdata = of_device_get_match_data(dev);
@@ -1044,10 +1015,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return ret;
}
imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(imx6_pcie->phy_base)) {
- dev_err(dev, "Unable to map PCIe PHY\n");
+ if (IS_ERR(imx6_pcie->phy_base))
return PTR_ERR(imx6_pcie->phy_base);
- }
}
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1075,39 +1044,34 @@ static int imx6_pcie_probe(struct platform_device *pdev)
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy)) {
- dev_err(dev, "pcie_phy clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_phy);
- }
+ if (IS_ERR(imx6_pcie->pcie_phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
+ "pcie_phy clock source missing or invalid\n");
imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(imx6_pcie->pcie_bus)) {
- dev_err(dev, "pcie_bus clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_bus);
- }
+ if (IS_ERR(imx6_pcie->pcie_bus))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
+ "pcie_bus clock source missing or invalid\n");
imx6_pcie->pcie = devm_clk_get(dev, "pcie");
- if (IS_ERR(imx6_pcie->pcie)) {
- dev_err(dev, "pcie clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie);
- }
+ if (IS_ERR(imx6_pcie->pcie))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
+ "pcie clock source missing or invalid\n");
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
- if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
- dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_inbound_axi);
- }
+ if (IS_ERR(imx6_pcie->pcie_inbound_axi))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
+ "pcie_inbound_axi clock missing or invalid\n");
break;
case IMX8MQ:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux)) {
- dev_err(dev, "pcie_aux clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_aux);
- }
- /* fall through */
+ if (IS_ERR(imx6_pcie->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+ fallthrough;
case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
imx6_pcie->controller_id = 1;
@@ -1167,10 +1131,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->tx_swing_low = 127;
/* Limit link speed */
- ret = of_property_read_u32(node, "fsl,max-link-speed",
- &imx6_pcie->link_gen);
- if (ret)
- imx6_pcie->link_gen = 1;
+ pci->link_gen = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
if (IS_ERR(imx6_pcie->vpcie)) {
@@ -1179,22 +1141,28 @@ static int imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->vpcie = NULL;
}
+ imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx6_pcie->vph)) {
+ if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx6_pcie->vph);
+ imx6_pcie->vph = NULL;
+ }
+
platform_set_drvdata(pdev, imx6_pcie);
ret = imx6_pcie_attach_pd(dev);
if (ret)
return ret;
- ret = imx6_add_pcie_port(imx6_pcie, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
if (pci_msi_enabled()) {
- val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
- PCI_MSI_FLAGS);
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
val |= PCI_MSI_FLAGS_ENABLE;
- dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
- val);
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
}
return 0;
@@ -1225,6 +1193,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.variant = IMX6QP,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .dbi_length = 0x200,
},
[IMX7D] = {
.variant = IMX7D,
@@ -1269,7 +1238,7 @@ static void imx6_pcie_quirk(struct pci_dev *dev)
if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
return;
- if (bus->number == pp->root_bus_nr) {
+ if (pci_is_root_bus(bus)) {
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index af677254a072..865258d8c53c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -3,7 +3,7 @@
* PCIe host controller driver for Texas Instruments Keystone SoCs
*
* Copyright (C) 2013-2014 Texas Instruments., Ltd.
- * http://www.ti.com
+ * https://www.ti.com
*
* Author: Murali Karicheri <[email protected]>
* Implementation based on pci-exynos.c and pcie-designware.c
@@ -96,8 +96,6 @@
#define LEG_EP 0x1
#define RC 0x2
-#define EXP_CAP_ID_OFFSET 0x70
-
#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
#define AM654_PCIE_DEV_TYPE_MASK 0x3
@@ -261,28 +259,18 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
u32 pending;
- int virq;
pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
if (BIT(0) & pending) {
- virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
- generic_handle_irq(virq);
+ dev_dbg(dev, ": irq: irq_offset %d", offset);
+ generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
}
/* EOI the INTx interrupt */
ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
}
-/*
- * Dummy function so that DW core doesn't configure MSI
- */
-static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
-{
- return 0;
-}
-
static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
@@ -356,8 +344,9 @@ static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
};
/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
- * registers
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
*
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
@@ -377,6 +366,8 @@ static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
/**
* ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
*
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
@@ -400,10 +391,14 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
- u64 start = pp->mem->start;
- u64 end = pp->mem->end;
+ u64 start, end;
+ struct resource *mem;
int i;
+ mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ start = mem->start;
+ end = mem->end;
+
/* Disable BARs for inbound access */
ks_pcie_set_dbi_mode(ks_pcie);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
@@ -422,7 +417,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
lower_32_bits(start) | OB_ENABLEN);
ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
upper_32_bits(start));
- start += OB_WIN_SIZE;
+ start += OB_WIN_SIZE * SZ_1M;
}
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
@@ -430,50 +425,44 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
}
-static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 *val)
+static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
CFG_FUNC(PCI_FUNC(devfn));
- if (bus->parent->number != pp->root_bus_nr)
+ if (!pci_is_root_bus(bus->parent))
reg |= CFG_TYPE1;
ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
- return dw_pcie_read(pp->va_cfg0_base + where, size, val);
+ return pp->va_cfg0_base + where;
}
-static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 reg;
-
- reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
- CFG_FUNC(PCI_FUNC(devfn));
- if (bus->parent->number != pp->root_bus_nr)
- reg |= CFG_TYPE1;
- ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
-
- return dw_pcie_write(pp->va_cfg0_base + where, size, val);
-}
+static struct pci_ops ks_child_pcie_ops = {
+ .map_bus = ks_pcie_other_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
/**
- * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
+ * @bus: A pointer to the PCI bus structure.
*
* This sets BAR0 to enable inbound access for MSI_IRQ register
*/
-static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
+static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
{
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ if (!pci_is_root_bus(bus))
+ return 0;
+
/* Configure and set up BAR0 */
ks_pcie_set_dbi_mode(ks_pcie);
@@ -488,10 +477,21 @@ static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
* be sufficient. Use physical address to avoid any conflicts.
*/
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
+ return 0;
}
+static struct pci_ops ks_pcie_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = ks_pcie_v3_65_add_bus,
+};
+
/**
* ks_pcie_link_up() - Check if link up
+ * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
+ * controller driver information.
*/
static int ks_pcie_link_up(struct dw_pcie *pci)
{
@@ -510,20 +510,14 @@ static void ks_pcie_stop_link(struct dw_pcie *pci)
/* Disable Link training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
}
static int ks_pcie_start_link(struct dw_pcie *pci)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- struct device *dev = pci->dev;
u32 val;
- if (dw_pcie_link_up(pci)) {
- dev_dbg(dev, "link is already up\n");
- return 0;
- }
-
/* Initiate Link Training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
@@ -583,7 +577,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 vector, virq, reg, pos;
+ u32 vector, reg, pos;
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
@@ -604,10 +598,8 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
continue;
vector = offset + (pos << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
- virq);
- generic_handle_irq(virq);
+ dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
+ generic_handle_domain_irq(pp->irq_domain, vector);
}
chained_irq_exit(chip, desc);
@@ -615,7 +607,6 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
/**
* ks_pcie_legacy_irq_handler() - Handle legacy interrupt
- * @irq: IRQ line for legacy interrupts
* @desc: Pointer to irq descriptor
*
* Traverse through pending legacy interrupts and invoke handler for each. Also
@@ -807,6 +798,10 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
int ret;
+ pp->bridge->ops = &ks_pcie_ops;
+ if (!ks_pcie->is_am6)
+ pp->bridge->child_ops = &ks_child_pcie_ops;
+
ret = ks_pcie_config_legacy_irq(ks_pcie);
if (ret)
return ret;
@@ -815,8 +810,6 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
if (ret)
return ret;
- dw_pcie_setup_rc(pp);
-
ks_pcie_stop_link(pci);
ks_pcie_setup_rc_app_regs(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
@@ -835,23 +828,16 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
"Asynchronous external abort");
#endif
- ks_pcie_start_link(pci);
- dw_pcie_wait_for_link(pci);
-
return 0;
}
static const struct dw_pcie_host_ops ks_pcie_host_ops = {
- .rd_other_conf = ks_pcie_rd_other_conf,
- .wr_other_conf = ks_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
.msi_host_init = ks_pcie_msi_host_init,
- .scan_bus = ks_pcie_v3_65_scan_bus,
};
static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
.host_init = ks_pcie_host_init,
- .msi_host_init = ks_pcie_am654_msi_host_init,
};
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
@@ -861,43 +847,6 @@ static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
return ks_pcie_handle_error_irq(ks_pcie);
}
-static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- struct resource *res;
- int ret;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
-
- pp->va_cfg1_base = pp->va_cfg0_base;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
-static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base,
- u32 reg, size_t size)
-{
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 val;
-
- ks_pcie_set_dbi_mode(ks_pcie);
- dw_pcie_read(base + reg, size, &val);
- ks_pcie_clear_dbi_mode(ks_pcie);
- return val;
-}
-
static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size, u32 val)
{
@@ -912,7 +861,6 @@ static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
.start_link = ks_pcie_start_link,
.stop_link = ks_pcie_stop_link,
.link_up = ks_pcie_link_up,
- .read_dbi2 = ks_pcie_am654_read_dbi2,
.write_dbi2 = ks_pcie_am654_write_dbi2,
};
@@ -959,6 +907,9 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
case PCI_EPC_IRQ_MSI:
dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
break;
+ case PCI_EPC_IRQ_MSIX:
+ dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ break;
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
return -EINVAL;
@@ -970,7 +921,7 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features ks_pcie_am654_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
+ .msix_capable = true,
.reserved_bar = 1 << BAR_0 | 1 << BAR_1,
.bar_fixed_64bit = 1 << BAR_0,
.bar_fixed_size[2] = SZ_1M,
@@ -992,33 +943,6 @@ static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
.get_features = &ks_pcie_am654_get_features,
};
-static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = ks_pcie->pci;
-
- ep = &pci->ep;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
-
- return 0;
-}
-
static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
{
int num_lanes = ks_pcie->num_lanes;
@@ -1122,31 +1046,6 @@ static int ks_pcie_am654_set_mode(struct device *dev,
return 0;
}
-static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed)
-{
- u32 val;
-
- dw_pcie_dbi_ro_wr_en(pci);
-
- val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP);
- if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= link_speed;
- dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP,
- val);
- }
-
- val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2);
- if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= link_speed;
- dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2,
- val);
- }
-
- dw_pcie_dbi_ro_wr_dis(pci);
-}
-
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
.version = 0x365A,
@@ -1194,13 +1093,11 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct keystone_pcie *ks_pcie;
struct device_link **link;
struct gpio_desc *gpiod;
- void __iomem *atu_base;
struct resource *res;
unsigned int version;
void __iomem *base;
u32 num_viewport;
struct phy **phy;
- int link_speed;
u32 num_lanes;
char name[10];
int ret;
@@ -1247,10 +1144,8 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
pci->version = version;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "missing IRQ resource: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
"ks-pcie-error-irq", ks_pcie);
@@ -1319,30 +1214,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- if (pci->version >= 0x480A) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
- atu_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(atu_base)) {
- ret = PTR_ERR(atu_base);
- goto err_get_sync;
- }
-
- pci->atu_base = atu_base;
-
+ if (pci->version >= 0x480A)
ret = ks_pcie_am654_set_mode(dev, mode);
- if (ret < 0)
- goto err_get_sync;
- } else {
+ else
ret = ks_pcie_set_mode(dev);
- if (ret < 0)
- goto err_get_sync;
- }
-
- link_speed = of_pci_get_max_link_speed(np);
- if (link_speed < 0)
- link_speed = 2;
-
- ks_pcie_set_link_speed(pci, link_speed);
+ if (ret < 0)
+ goto err_get_sync;
switch (mode) {
case DW_PCIE_RC_TYPE:
@@ -1354,7 +1231,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "num-viewport", &num_viewport);
if (ret < 0) {
dev_err(dev, "unable to read *num-viewport* property\n");
- return ret;
+ goto err_get_sync;
}
/*
@@ -1372,7 +1249,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ks_pcie->num_viewport = num_viewport;
pci->pp.ops = host_ops;
- ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
goto err_get_sync;
break;
@@ -1383,7 +1260,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
}
pci->ep.ops = ep_ops;
- ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
+ ret = dw_pcie_ep_init(&pci->ep);
if (ret < 0)
goto err_get_sync;
break;
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 0d151cead1b7..39f4664bd84c 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -18,52 +18,58 @@
#include "pcie-designware.h"
-#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
+#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
-struct ls_pcie_ep {
- struct dw_pcie *pci;
+struct ls_pcie_ep_drvdata {
+ u32 func_offset;
+ const struct dw_pcie_ep_ops *ops;
+ const struct dw_pcie_ops *dw_pcie_ops;
};
-#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
+struct ls_pcie_ep {
+ struct dw_pcie *pci;
+ struct pci_epc_features *ls_epc;
+ const struct ls_pcie_ep_drvdata *drvdata;
+};
static int ls_pcie_establish_link(struct dw_pcie *pci)
{
return 0;
}
-static const struct dw_pcie_ops ls_pcie_ep_ops = {
+static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {
.start_link = ls_pcie_establish_link,
};
-static const struct of_device_id ls_pcie_ep_of_match[] = {
- { .compatible = "fsl,ls-pcie-ep",},
- { },
-};
-
-static const struct pci_epc_features ls_pcie_epc_features = {
- .linkup_notifier = false,
- .msi_capable = true,
- .msix_capable = false,
- .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
-};
-
static const struct pci_epc_features*
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
- return &ls_pcie_epc_features;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ return pcie->ls_epc;
}
static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+ struct dw_pcie_ep_func *ep_func;
enum pci_barno bar;
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, 0);
+ if (!ep_func)
+ return;
+
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
+
+ pcie->ls_epc->msi_capable = ep_func->msi_cap ? true : false;
+ pcie->ls_epc->msix_capable = ep_func->msix_cap ? true : false;
}
static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -73,54 +79,63 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
case PCI_EPC_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
case PCI_EPC_IRQ_MSIX:
- return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
+ interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
return -EINVAL;
}
}
-static const struct dw_pcie_ep_ops pcie_ep_ops = {
+static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ WARN_ON(func_no && !pcie->drvdata->func_offset);
+ return pcie->drvdata->func_offset * func_no;
+}
+
+static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
.ep_init = ls_pcie_ep_init,
.raise_irq = ls_pcie_ep_raise_irq,
.get_features = ls_pcie_ep_get_features,
+ .func_conf_select = ls_pcie_ep_func_conf_select,
};
-static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
- struct dw_pcie_ep *ep;
- struct resource *res;
- int ret;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
+static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
+ .ops = &ls_pcie_ep_ops,
+ .dw_pcie_ops = &dw_ls_pcie_ep_ops,
+};
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
+static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
+ .func_offset = 0x20000,
+ .ops = &ls_pcie_ep_ops,
+ .dw_pcie_ops = &dw_ls_pcie_ep_ops,
+};
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
+static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
+ .func_offset = 0x8000,
+ .ops = &ls_pcie_ep_ops,
+ .dw_pcie_ops = &dw_ls_pcie_ep_ops,
+};
- return 0;
-}
+static const struct of_device_id ls_pcie_ep_of_match[] = {
+ { .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },
+ { .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,lx2160ar2-pcie-ep", .data = &lx2_ep_drvdata },
+ { },
+};
static int __init ls_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct ls_pcie_ep *pcie;
+ struct pci_epc_features *ls_epc;
struct resource *dbi_base;
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -130,21 +145,30 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ ls_epc = devm_kzalloc(dev, sizeof(*ls_epc), GFP_KERNEL);
+ if (!ls_epc)
+ return -ENOMEM;
+
+ pcie->drvdata = of_device_get_match_data(dev);
+
+ pci->dev = dev;
+ pci->ops = pcie->drvdata->dw_pcie_ops;
+
+ ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
+
+ pcie->pci = pci;
+ pcie->ls_epc = ls_epc;
+
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
- pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
- pci->dev = dev;
- pci->ops = &ls_pcie_ep_ops;
- pcie->pci = pci;
+ pci->ep.ops = &ls_pcie_ep_ops;
platform_set_drvdata(pdev, pcie);
- ret = ls_add_pcie_ep(pcie, pdev);
-
- return ret;
+ return dw_pcie_ep_init(&pci->ep);
}
static struct platform_driver ls_pcie_ep_driver = {
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index f24f79a70d9a..5b9c625df7b8 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -83,14 +83,6 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
}
-static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
-{
- int i;
-
- for (i = 0; i < PCIE_IATU_NUM; i++)
- dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
-}
-
static int ls1021_pcie_link_up(struct dw_pcie *pci)
{
u32 state;
@@ -136,12 +128,6 @@ static int ls_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
- /*
- * Disable outbound windows configured by the bootloader to avoid
- * one transaction hitting multiple outbound windows.
- * dw_pcie_setup_rc() will reconfigure the outbound windows.
- */
- ls_pcie_disable_outbound_atus(pcie);
ls_pcie_fix_error_response(pcie);
dw_pcie_dbi_ro_wr_en(pci);
@@ -150,8 +136,6 @@ static int ls_pcie_host_init(struct pcie_port *pp)
ls_pcie_drop_msg_tlp(pcie);
- dw_pcie_setup_rc(pp);
-
return 0;
}
@@ -182,37 +166,12 @@ static int ls1021_pcie_host_init(struct pcie_port *pp)
return ls_pcie_host_init(pp);
}
-static int ls_pcie_msi_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- struct device_node *msi_node;
-
- /*
- * The MSI domain is set by the generic of_msi_configure(). This
- * .msi_host_init() function keeps us from doing the default MSI
- * domain setup in dw_pcie_host_init() and also enforces the
- * requirement that "msi-parent" exists.
- */
- msi_node = of_parse_phandle(np, "msi-parent", 0);
- if (!msi_node) {
- dev_err(dev, "failed to find msi-parent\n");
- return -EINVAL;
- }
-
- of_node_put(msi_node);
- return 0;
-}
-
static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
.host_init = ls1021_pcie_host_init,
- .msi_host_init = ls_pcie_msi_host_init,
};
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
.host_init = ls_pcie_host_init,
- .msi_host_init = ls_pcie_msi_host_init,
};
static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
@@ -273,31 +232,12 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ },
};
-static int __init ls_add_pcie_port(struct ls_pcie *pcie)
-{
- struct dw_pcie *pci = pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- int ret;
-
- pp->ops = pcie->drvdata->ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
-static int __init ls_pcie_probe(struct platform_device *pdev)
+static int ls_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct ls_pcie *pcie;
struct resource *dbi_base;
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -311,6 +251,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
+ pci->pp.ops = pcie->drvdata->ops;
pcie->pci = pci;
@@ -326,18 +267,15 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
- ret = ls_add_pcie_port(pcie);
- if (ret < 0)
- return ret;
-
- return 0;
+ return dw_pcie_host_init(&pci->pp);
}
static struct platform_driver ls_pcie_driver = {
+ .probe = ls_pcie_probe,
.driver = {
.name = "layerscape-pcie",
.of_match_table = ls_pcie_of_match,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
+builtin_platform_driver(ls_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 3772b02a5c55..686ded034f22 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -17,37 +17,13 @@
#include <linux/resource.h>
#include <linux/types.h>
#include <linux/phy/phy.h>
+#include <linux/module.h>
#include "pcie-designware.h"
#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
-/* External local bus interface registers */
-#define PLR_OFFSET 0x700
-#define PCIE_PORT_LINK_CTRL_OFF (PLR_OFFSET + 0x10)
-#define FAST_LINK_MODE BIT(7)
-#define LINK_CAPABLE_MASK GENMASK(21, 16)
-#define LINK_CAPABLE_X1 BIT(16)
-
-#define PCIE_GEN2_CTRL_OFF (PLR_OFFSET + 0x10c)
-#define NUM_OF_LANES_MASK GENMASK(12, 8)
-#define NUM_OF_LANES_X1 BIT(8)
-#define DIRECT_SPEED_CHANGE BIT(17)
-
-#define TYPE1_HDR_OFFSET 0x0
-#define PCIE_STATUS_COMMAND (TYPE1_HDR_OFFSET + 0x04)
-#define PCI_IO_EN BIT(0)
-#define PCI_MEM_SPACE_EN BIT(1)
-#define PCI_BUS_MASTER_EN BIT(2)
-
-#define PCIE_BASE_ADDR0 (TYPE1_HDR_OFFSET + 0x10)
-#define PCIE_BASE_ADDR1 (TYPE1_HDR_OFFSET + 0x14)
-
-#define PCIE_CAP_OFFSET 0x70
-#define PCIE_DEV_CTRL_DEV_STUS (PCIE_CAP_OFFSET + 0x08)
-#define PCIE_CAP_MAX_PAYLOAD_MASK GENMASK(7, 5)
#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
-#define PCIE_CAP_MAX_READ_REQ_MASK GENMASK(14, 12)
#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
/* PCIe specific config registers */
@@ -66,7 +42,6 @@
#define PORT_CLK_RATE 100000000UL
#define MAX_PAYLOAD_SIZE 256
#define MAX_READ_REQ_SIZE 256
-#define MESON_PCIE_PHY_POWERUP 0x1c
#define PCIE_RESET_DELAY 500
#define PCIE_SHARED_RESET 1
#define PCIE_NORMAL_RESET 0
@@ -78,37 +53,24 @@ enum pcie_data_rate {
PCIE_GEN4
};
-struct meson_pcie_mem_res {
- void __iomem *elbi_base;
- void __iomem *cfg_base;
- void __iomem *phy_base;
-};
-
struct meson_pcie_clk_res {
struct clk *clk;
- struct clk *mipi_gate;
struct clk *port_clk;
struct clk *general_clk;
};
struct meson_pcie_rc_reset {
- struct reset_control *phy;
struct reset_control *port;
struct reset_control *apb;
};
-struct meson_pcie_param {
- bool has_shared_phy;
-};
-
struct meson_pcie {
struct dw_pcie pci;
- struct meson_pcie_mem_res mem_res;
+ void __iomem *cfg_base;
struct meson_pcie_clk_res clk_res;
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
struct phy *phy;
- const struct meson_pcie_param *param;
};
static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
@@ -130,13 +92,6 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
- if (!mp->param->has_shared_phy) {
- mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
- if (IS_ERR(mrst->phy))
- return PTR_ERR(mrst->phy);
- reset_control_deassert(mrst->phy);
- }
-
mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
if (IS_ERR(mrst->port))
return PTR_ERR(mrst->port);
@@ -150,52 +105,18 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
return 0;
}
-static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
- struct meson_pcie *mp,
- const char *id)
-{
- struct device *dev = mp->pci.dev;
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
-
- return devm_ioremap_resource(dev, res);
-}
-
-static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
- struct meson_pcie *mp,
- const char *id)
-{
- struct device *dev = mp->pci.dev;
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
- if (!res) {
- dev_err(dev, "No REG resource %s\n", id);
- return ERR_PTR(-ENXIO);
- }
-
- return devm_ioremap(dev, res->start, resource_size(res));
-}
-
static int meson_pcie_get_mems(struct platform_device *pdev,
struct meson_pcie *mp)
{
- mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
- if (IS_ERR(mp->mem_res.elbi_base))
- return PTR_ERR(mp->mem_res.elbi_base);
-
- mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
- if (IS_ERR(mp->mem_res.cfg_base))
- return PTR_ERR(mp->mem_res.cfg_base);
-
- /* Meson AXG SoC has two PCI controllers use same phy register */
- if (!mp->param->has_shared_phy) {
- mp->mem_res.phy_base =
- meson_pcie_get_mem_shared(pdev, mp, "phy");
- if (IS_ERR(mp->mem_res.phy_base))
- return PTR_ERR(mp->mem_res.phy_base);
- }
+ struct dw_pcie *pci = &mp->pci;
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(mp->cfg_base))
+ return PTR_ERR(mp->cfg_base);
return 0;
}
@@ -204,37 +125,33 @@ static int meson_pcie_power_on(struct meson_pcie *mp)
{
int ret = 0;
- if (mp->param->has_shared_phy) {
- ret = phy_init(mp->phy);
- if (ret)
- return ret;
+ ret = phy_init(mp->phy);
+ if (ret)
+ return ret;
- ret = phy_power_on(mp->phy);
- if (ret) {
- phy_exit(mp->phy);
- return ret;
- }
- } else
- writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+ ret = phy_power_on(mp->phy);
+ if (ret) {
+ phy_exit(mp->phy);
+ return ret;
+ }
return 0;
}
+static void meson_pcie_power_off(struct meson_pcie *mp)
+{
+ phy_power_off(mp->phy);
+ phy_exit(mp->phy);
+}
+
static int meson_pcie_reset(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
int ret = 0;
- if (mp->param->has_shared_phy) {
- ret = phy_reset(mp->phy);
- if (ret)
- return ret;
- } else {
- reset_control_assert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- reset_control_deassert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- }
+ ret = phy_reset(mp->phy);
+ if (ret)
+ return ret;
reset_control_assert(mrst->port);
reset_control_assert(mrst->apb);
@@ -286,12 +203,6 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
if (IS_ERR(res->port_clk))
return PTR_ERR(res->port_clk);
- if (!mp->param->has_shared_phy) {
- res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
- if (IS_ERR(res->mipi_gate))
- return PTR_ERR(res->mipi_gate);
- }
-
res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
if (IS_ERR(res->general_clk))
return PTR_ERR(res->general_clk);
@@ -303,24 +214,14 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
return 0;
}
-static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
-{
- writel(val, mp->mem_res.elbi_base + reg);
-}
-
-static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
-{
- return readl(mp->mem_res.elbi_base + reg);
-}
-
static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
{
- return readl(mp->mem_res.cfg_base + reg);
+ return readl(mp->cfg_base + reg);
}
static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
{
- writel(val, mp->mem_res.cfg_base + reg);
+ writel(val, mp->cfg_base + reg);
}
static void meson_pcie_assert_reset(struct meson_pcie *mp)
@@ -330,32 +231,13 @@ static void meson_pcie_assert_reset(struct meson_pcie *mp)
gpiod_set_value_cansleep(mp->reset_gpio, 0);
}
-static void meson_pcie_init_dw(struct meson_pcie *mp)
+static void meson_pcie_ltssm_enable(struct meson_pcie *mp)
{
u32 val;
val = meson_cfg_readl(mp, PCIE_CFG0);
val |= APP_LTSSM_ENABLE;
meson_cfg_writel(mp, val, PCIE_CFG0);
-
- val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val &= ~LINK_CAPABLE_MASK;
- meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val |= LINK_CAPABLE_X1 | FAST_LINK_MODE;
- meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
- val &= ~NUM_OF_LANES_MASK;
- meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
- val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
- meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
-
- meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
- meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
}
static int meson_size_to_payload(struct meson_pcie *mp, int size)
@@ -377,69 +259,52 @@ static int meson_size_to_payload(struct meson_pcie *mp, int size)
static void meson_set_max_payload(struct meson_pcie *mp, int size)
{
+ struct dw_pcie *pci = &mp->pci;
u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int max_payload_size = meson_size_to_payload(mp, size);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
- val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
}
static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
{
+ struct dw_pcie *pci = &mp->pci;
u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int max_rd_req_size = meson_size_to_payload(mp, size);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
- val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
}
-static inline void meson_enable_memory_space(struct meson_pcie *mp)
+static int meson_pcie_start_link(struct dw_pcie *pci)
{
- /* Set the RC Bus Master, Memory Space and I/O Space enables */
- meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
- PCIE_STATUS_COMMAND);
-}
-
-static int meson_pcie_establish_link(struct meson_pcie *mp)
-{
- struct dw_pcie *pci = &mp->pci;
- struct pcie_port *pp = &pci->pp;
-
- meson_pcie_init_dw(mp);
- meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
- meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
-
- dw_pcie_setup_rc(pp);
- meson_enable_memory_space(mp);
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ meson_pcie_ltssm_enable(mp);
meson_pcie_assert_reset(mp);
- return dw_pcie_wait_for_link(pci);
-}
-
-static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
-{
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(&mp->pci.pp);
+ return 0;
}
-static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
int ret;
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
@@ -460,13 +325,11 @@ static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
return PCIBIOS_SUCCESSFUL;
}
-static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
- int size, u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- return dw_pcie_write(pci->dbi_base + where, size, val);
-}
+static struct pci_ops meson_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = meson_pcie_rd_own_conf,
+ .write = pci_generic_config_write,
+};
static int meson_pcie_link_up(struct dw_pcie *pci)
{
@@ -511,58 +374,26 @@ static int meson_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct meson_pcie *mp = to_meson_pcie(pci);
- int ret;
- ret = meson_pcie_establish_link(mp);
- if (ret)
- return ret;
+ pp->bridge->ops = &meson_pci_ops;
- meson_pcie_enable_interrupts(mp);
+ meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+ meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
return 0;
}
static const struct dw_pcie_host_ops meson_pcie_host_ops = {
- .rd_own_conf = meson_pcie_rd_own_conf,
- .wr_own_conf = meson_pcie_wr_own_conf,
.host_init = meson_pcie_host_init,
};
-static int meson_add_pcie_port(struct meson_pcie *mp,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = &mp->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0) {
- dev_err(dev, "failed to get MSI IRQ\n");
- return pp->msi_irq;
- }
- }
-
- pp->ops = &meson_pcie_host_ops;
- pci->dbi_base = mp->mem_res.elbi_base;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = meson_pcie_link_up,
+ .start_link = meson_pcie_start_link,
};
static int meson_pcie_probe(struct platform_device *pdev)
{
- const struct meson_pcie_param *match_data;
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct meson_pcie *mp;
@@ -575,18 +406,13 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci = &mp->pci;
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &meson_pcie_host_ops;
+ pci->num_lanes = 1;
- match_data = of_device_get_match_data(dev);
- if (!match_data) {
- dev_err(dev, "failed to get match data\n");
- return -ENODEV;
- }
- mp->param = match_data;
-
- if (mp->param->has_shared_phy) {
- mp->phy = devm_phy_get(dev, "pcie");
- if (IS_ERR(mp->phy))
- return PTR_ERR(mp->phy);
+ mp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(mp->phy)) {
+ dev_err(dev, "get phy failed, %ld\n", PTR_ERR(mp->phy));
+ return PTR_ERR(mp->phy);
}
mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -627,7 +453,7 @@ static int meson_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mp);
- ret = meson_add_pcie_port(mp, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0) {
dev_err(dev, "Add PCIe port failed, %d\n", ret);
goto err_phy;
@@ -636,33 +462,20 @@ static int meson_pcie_probe(struct platform_device *pdev)
return 0;
err_phy:
- if (mp->param->has_shared_phy) {
- phy_power_off(mp->phy);
- phy_exit(mp->phy);
- }
-
+ meson_pcie_power_off(mp);
return ret;
}
-static struct meson_pcie_param meson_pcie_axg_param = {
- .has_shared_phy = false,
-};
-
-static struct meson_pcie_param meson_pcie_g12a_param = {
- .has_shared_phy = true,
-};
-
static const struct of_device_id meson_pcie_of_match[] = {
{
.compatible = "amlogic,axg-pcie",
- .data = &meson_pcie_axg_param,
},
{
.compatible = "amlogic,g12a-pcie",
- .data = &meson_pcie_g12a_param,
},
{},
};
+MODULE_DEVICE_TABLE(of, meson_pcie_of_match);
static struct platform_driver meson_pcie_driver = {
.probe = meson_pcie_probe,
@@ -672,4 +485,8 @@ static struct platform_driver meson_pcie_driver = {
},
};
-builtin_platform_driver(meson_pcie_driver);
+module_platform_driver(meson_pcie_driver);
+
+MODULE_AUTHOR("Yue Wang <[email protected]>");
+MODULE_DESCRIPTION("Amlogic PCIe Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index 1eeda2f6371f..e8afa50129a8 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -67,21 +67,15 @@ static int al_pcie_init(struct pci_config_window *cfg)
dev_dbg(dev, "Root port dbi res: %pR\n", res);
al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(al_pcie->dbi_base)) {
- long err = PTR_ERR(al_pcie->dbi_base);
-
- dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n",
- res, err);
- return err;
- }
+ if (IS_ERR(al_pcie->dbi_base))
+ return PTR_ERR(al_pcie->dbi_base);
cfg->priv = al_pcie;
return 0;
}
-struct pci_ecam_ops al_pcie_ops = {
- .bus_shift = 20,
+const struct pci_ecam_ops al_pcie_ops = {
.init = al_pcie_init,
.pci_ops = {
.map_bus = al_pcie_map_bus,
@@ -143,8 +137,6 @@ struct al_pcie {
struct al_pcie_target_bus_cfg target_bus_cfg;
};
-#define PCIE_ECAM_DEVFN(x) (((x) & 0xff) << 12)
-
#define to_al_pcie(x) dev_get_drvdata((x)->dev)
static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
@@ -222,19 +214,15 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
reg);
}
-static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,
- unsigned int busnr,
- unsigned int devfn)
+static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ struct pcie_port *pp = bus->sysdata;
+ struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
+ unsigned int busnr = bus->number;
struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
- struct pcie_port *pp = &pcie->pci->pp;
- void __iomem *pci_base_addr;
-
- pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base +
- (busnr_ecam << 20) +
- PCIE_ECAM_DEVFN(devfn));
if (busnr_reg != target_bus_cfg->reg_val) {
dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
@@ -245,52 +233,14 @@ static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,
target_bus_cfg->reg_mask);
}
- return pci_base_addr;
+ return pp->va_cfg0_base + PCIE_ECAM_OFFSET(busnr_ecam, devfn, where);
}
-static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct al_pcie *pcie = to_al_pcie(pci);
- unsigned int busnr = bus->number;
- void __iomem *pci_addr;
- int rc;
-
- pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
-
- rc = dw_pcie_read(pci_addr + where, size, val);
-
- dev_dbg(pci->dev, "%d-byte config read from %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
- size, pci_domain_nr(bus), bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where,
- (pci_addr + where), *val);
-
- return rc;
-}
-
-static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct al_pcie *pcie = to_al_pcie(pci);
- unsigned int busnr = bus->number;
- void __iomem *pci_addr;
- int rc;
-
- pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
-
- rc = dw_pcie_write(pci_addr + where, size, val);
-
- dev_dbg(pci->dev, "%d-byte config write to %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
- size, pci_domain_nr(bus), bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where,
- (pci_addr + where), val);
-
- return rc;
-}
+static struct pci_ops al_child_pci_ops = {
+ .map_bus = al_pcie_conf_addr_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
static void al_pcie_config_prepare(struct al_pcie *pcie)
{
@@ -302,10 +252,11 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
u8 secondary_bus;
u32 cfg_control;
u32 reg;
+ struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
target_bus_cfg = &pcie->target_bus_cfg;
- ecam_bus_mask = (pcie->ecam_size >> 20) - 1;
+ ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
if (ecam_bus_mask > 255) {
dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
ecam_bus_mask = 255;
@@ -315,13 +266,13 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
target_bus_cfg->ecam_mask = ecam_bus_mask;
/* This portion is taken from the cfg_target_bus reg */
target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
- target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask;
+ target_bus_cfg->reg_val = bus->start & target_bus_cfg->reg_mask;
al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
target_bus_cfg->reg_mask);
- secondary_bus = pp->busn->start + 1;
- subordinate_bus = pp->busn->end;
+ secondary_bus = bus->start + 1;
+ subordinate_bus = bus->end;
/* Set the valid values of secondary and subordinate buses */
cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
@@ -344,6 +295,8 @@ static int al_pcie_host_init(struct pcie_port *pp)
struct al_pcie *pcie = to_al_pcie(pci);
int rc;
+ pp->bridge->child_ops = &al_child_pci_ops;
+
rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
if (rc)
return rc;
@@ -358,37 +311,14 @@ static int al_pcie_host_init(struct pcie_port *pp)
}
static const struct dw_pcie_host_ops al_pcie_host_ops = {
- .rd_other_conf = al_pcie_rd_other_conf,
- .wr_other_conf = al_pcie_wr_other_conf,
.host_init = al_pcie_host_init,
};
-static int al_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret;
-
- pp->ops = &al_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct dw_pcie_ops dw_pcie_ops = {
-};
-
static int al_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *controller_res;
struct resource *ecam_res;
- struct resource *dbi_res;
struct al_pcie *al_pcie;
struct dw_pcie *pci;
@@ -401,18 +331,11 @@ static int al_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &al_pcie_host_ops;
al_pcie->pci = pci;
al_pcie->dev = dev;
- dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
- if (IS_ERR(pci->dbi_base)) {
- dev_err(dev, "couldn't remap dbi base %pR\n", dbi_res);
- return PTR_ERR(pci->dbi_base);
- }
-
ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (!ecam_res) {
dev_err(dev, "couldn't find 'config' reg in DT\n");
@@ -429,12 +352,11 @@ static int al_pcie_probe(struct platform_device *pdev)
return PTR_ERR(al_pcie->controller_base);
}
- dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n",
- dbi_res, controller_res);
+ dev_dbg(dev, "From DT: controller_base: %pR\n", controller_res);
platform_set_drvdata(pdev, al_pcie);
- return al_add_pcie_port(&pci->pp, pdev);
+ return dw_pcie_host_init(&pci->pp);
}
static const struct of_device_id al_pcie_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 49596547e8c2..4e2552dcf982 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -154,11 +154,23 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
+static int armada8k_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = pcie->pci;
u32 reg;
+ /* Start LTSSM */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg |= PCIE_APP_LTSSM_EN;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
+ return 0;
+}
+
+static int armada8k_pcie_host_init(struct pcie_port *pp)
+{
+ u32 reg;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
if (!dw_pcie_link_up(pci)) {
/* Disable LTSSM state machine to enable configuration */
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
@@ -193,26 +205,6 @@ static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
- if (!dw_pcie_link_up(pci)) {
- /* Configuration done. Start LTSSM */
- reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
- reg |= PCIE_APP_LTSSM_EN;
- dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
- }
-
- /* Wait until the link becomes active again */
- if (dw_pcie_wait_for_link(pci))
- dev_err(pci->dev, "Link not up after reconfiguration\n");
-}
-
-static int armada8k_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
-
- dw_pcie_setup_rc(pp);
- armada8k_pcie_establish_link(pcie);
-
return 0;
}
@@ -248,10 +240,8 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
pp->ops = &armada8k_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 0);
- if (pp->irq < 0) {
- dev_err(dev, "failed to get irq for port\n");
+ if (pp->irq < 0)
return pp->irq;
- }
ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
IRQF_SHARED, "armada8k-pcie", pcie);
@@ -271,6 +261,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = armada8k_pcie_link_up,
+ .start_link = armada8k_pcie_start_link,
};
static int armada8k_pcie_probe(struct platform_device *pdev)
@@ -317,7 +308,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
if (IS_ERR(pci->dbi_base)) {
- dev_err(dev, "couldn't remap regs base %p\n", base);
ret = PTR_ERR(pci->dbi_base);
goto fail_clkreg;
}
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 9e2482bd7b6d..c91fc1954432 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -44,16 +44,6 @@ struct artpec_pcie_of_data {
static const struct of_device_id artpec6_pcie_of_match[];
-/* PCIe Port Logic registers (memory-mapped) */
-#define PL_OFFSET 0x700
-
-#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc)
-#define ACK_N_FTS_MASK GENMASK(15, 8)
-#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK)
-
-#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0)
-#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK)
-
/* ARTPEC-6 specific registers */
#define PCIECFG 0x18
#define PCIECFG_DBG_OEN BIT(24)
@@ -292,33 +282,6 @@ static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
}
}
-static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- u32 val;
-
- if (artpec6_pcie->variant != ARTPEC7)
- return;
-
- /*
- * Increase the N_FTS (Number of Fast Training Sequences)
- * to be transmitted when transitioning from L0s to L0.
- */
- val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF);
- val &= ~ACK_N_FTS_MASK;
- val |= ACK_N_FTS(180);
- dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val);
-
- /*
- * Set the Number of Fast Training Sequences that the core
- * advertises as its N_FTS during Gen2 or Gen3 link training.
- */
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val &= ~FAST_TRAINING_SEQ_MASK;
- val |= FAST_TRAINING_SEQ(180);
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
-}
-
static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
{
u32 val;
@@ -352,29 +315,19 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
usleep_range(100, 200);
}
-static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-}
-
static int artpec6_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ if (artpec6_pcie->variant == ARTPEC7) {
+ pci->n_fts[0] = 180;
+ pci->n_fts[1] = 180;
+ }
artpec6_pcie_assert_core_reset(artpec6_pcie);
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
- artpec6_pcie_set_nfts(artpec6_pcie);
- dw_pcie_setup_rc(pp);
- artpec6_pcie_establish_link(pci);
- dw_pcie_wait_for_link(pci);
- artpec6_pcie_enable_interrupts(artpec6_pcie);
return 0;
}
@@ -383,33 +336,6 @@ static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
.host_init = artpec6_pcie_host_init,
};
-static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0) {
- dev_err(dev, "failed to get MSI irq\n");
- return pp->msi_irq;
- }
- }
-
- pp->ops = &artpec6_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -420,7 +346,6 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
- artpec6_pcie_set_nfts(artpec6_pcie);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
@@ -449,51 +374,17 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
.raise_irq = artpec6_pcie_raise_irq,
};
-static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = artpec6_pcie->pci;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- pci->dbi_base2 = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
-
- return 0;
-}
-
static int artpec6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct artpec6_pcie *artpec6_pcie;
- struct resource *dbi_base;
- struct resource *phy_base;
int ret;
const struct of_device_id *match;
const struct artpec_pcie_of_data *data;
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
+ u32 val;
match = of_match_device(artpec6_pcie_of_match, dev);
if (!match)
@@ -518,13 +409,8 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
artpec6_pcie->variant = variant;
artpec6_pcie->mode = mode;
- dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
+ artpec6_pcie->phy_base =
+ devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(artpec6_pcie->phy_base))
return PTR_ERR(artpec6_pcie->phy_base);
@@ -541,24 +427,23 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
return -ENODEV;
- ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
+ pci->pp.ops = &artpec6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
break;
- case DW_PCIE_EP_TYPE: {
- u32 val;
-
+ case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
return -ENODEV;
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_DEVICE_TYPE_MASK;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
- ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
- if (ret < 0)
- return ret;
- break;
- }
+
+ pci->ep.ops = &pcie_ep_ops;
+
+ return dw_pcie_ep_init(&pci->ep);
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 3dd2e2697294..0eda8236c125 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Synopsys DesignWare PCIe Endpoint controller driver
*
* Copyright (C) 2017 Texas Instruments
@@ -7,24 +7,63 @@
*/
#include <linux/of.h>
+#include <linux/platform_device.h>
#include "pcie-designware.h"
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include "../../pci.h"
+
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
pci_epc_linkup(epc);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_init_notify(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
+
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie_ep_func *ep_func;
+
+ list_for_each_entry(ep_func, &ep->func_list, list) {
+ if (ep_func->func_no == func_no)
+ return ep_func;
+ }
+
+ return NULL;
+}
+
+static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
+{
+ unsigned int func_offset = 0;
+
+ if (ep->ops->func_conf_select)
+ func_offset = ep->ops->func_conf_select(ep, func_no);
+
+ return func_offset;
+}
-static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
- int flags)
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
+ enum pci_barno bar, int flags)
{
u32 reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, 0x0);
dw_pcie_writel_dbi(pci, reg, 0x0);
@@ -37,49 +76,99 @@ static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
- __dw_pcie_ep_reset_bar(pci, bar, 0);
+ u8 func_no, funcs;
+
+ funcs = pci->ep.epc->max_functions;
+
+ for (func_no = 0; func_no < funcs; func_no++)
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
+
+static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
+ u8 cap_ptr, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ if (!cap_ptr)
+ return 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
+ cap_id = (reg & 0x00ff);
+
+ if (cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+}
+
+static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 next_cap_ptr;
+ u16 reg;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
-static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
- dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
- dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
- dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
- dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
+ dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
hdr->subclass_code | hdr->baseclass_code << 8);
- dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
hdr->cache_line_size);
- dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
hdr->subsys_vendor_id);
- dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
- dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
hdr->interrupt_pin);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
-static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
- dma_addr_t cpu_addr,
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_barno bar, dma_addr_t cpu_addr,
enum dw_pcie_as_type as_type)
{
int ret;
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
- if (free_win >= ep->num_ib_windows) {
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
+ ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,
as_type);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
@@ -92,20 +181,21 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
return 0;
}
-static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
+ phys_addr_t phys_addr,
u64 pci_addr, size_t size)
{
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
- if (free_win >= ep->num_ob_windows) {
+ free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
+ if (free_win >= pci->num_ob_windows) {
dev_err(pci->dev, "No free outbound window\n");
return -EINVAL;
}
- dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
set_bit(free_win, ep->ob_window_map);
ep->outbound_addr[free_win] = phys_addr;
@@ -113,7 +203,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
return 0;
}
-static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -121,13 +211,14 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
enum pci_barno bar = epf_bar->barno;
u32 atu_index = ep->bar_to_atu[bar];
- __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
clear_bit(atu_index, ep->ib_window_map);
+ ep->epf_bar[bar] = NULL;
}
-static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -137,14 +228,20 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
size_t size = epf_bar->size;
int flags = epf_bar->flags;
enum dw_pcie_as_type as_type;
- u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ u32 reg;
+ unsigned int func_offset = 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
if (!(flags & PCI_BASE_ADDRESS_SPACE))
as_type = DW_PCIE_AS_MEM;
else
as_type = DW_PCIE_AS_IO;
- ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, bar,
+ epf_bar->phys_addr, as_type);
if (ret)
return ret;
@@ -158,6 +255,7 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
dw_pcie_writel_dbi(pci, reg + 4, 0);
}
+ ep->epf_bar[bar] = epf_bar;
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -167,8 +265,9 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 *atu_index)
{
u32 index;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < ep->num_ob_windows; index++) {
+ for (index = 0; index < pci->num_ob_windows; index++) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -178,7 +277,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
-static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
int ret;
@@ -194,15 +293,14 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
clear_bit(atu_index, ep->ob_window_map);
}
-static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
- phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
+ ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
if (ret) {
dev_err(pci->dev, "Failed to enable address\n");
return ret;
@@ -211,16 +309,21 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
return 0;
}
-static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- reg = ep->msi_cap + PCI_MSI_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
@@ -230,16 +333,22 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- reg = ep->msi_cap + PCI_MSI_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
@@ -250,16 +359,21 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
return 0;
}
-static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msix_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
return -EINVAL;
@@ -269,27 +383,43 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msix_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
val |= interrupts;
- dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, reg, val);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
+ val = offset | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
+ val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
-static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -305,10 +435,8 @@ static void dw_pcie_ep_stop(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops->stop_link)
- return;
-
- pci->ops->stop_link(pci);
+ if (pci->ops && pci->ops->stop_link)
+ pci->ops->stop_link(pci);
}
static int dw_pcie_ep_start(struct pci_epc *epc)
@@ -316,14 +444,14 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops->start_link)
+ if (!pci->ops || !pci->ops->start_link)
return -EINVAL;
return pci->ops->start_link(pci);
}
static const struct pci_epc_features*
-dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -358,108 +486,124 @@ int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
unsigned int aligned_offset;
+ unsigned int func_offset = 0;
u16 msg_ctrl, msg_data;
u32 msg_addr_lower, msg_addr_upper, reg;
u64 msg_addr;
bool has_upper;
int ret;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
- reg = ep->msi_cap + PCI_MSI_FLAGS;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
msg_ctrl = dw_pcie_readw_dbi(pci, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
- reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
if (has_upper) {
- reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
- reg = ep->msi_cap + PCI_MSI_DATA_64;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
msg_data = dw_pcie_readw_dbi(pci, reg);
} else {
msg_addr_upper = 0;
- reg = ep->msi_cap + PCI_MSI_DATA_32;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
- aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
+ aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
msg_addr = ((u64)msg_addr_upper) << 32 |
(msg_addr_lower & ~aligned_offset);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
- epc->mem->page_size);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ u32 msg_data;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
+ (interrupt_num - 1);
+
+ dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
return 0;
}
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
- u16 interrupt_num)
+ u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct pci_epf_msix_tbl *msix_tbl;
struct pci_epc *epc = ep->epc;
- u16 tbl_offset, bir;
- u32 bar_addr_upper, bar_addr_lower;
- u32 msg_addr_upper, msg_addr_lower;
+ unsigned int func_offset = 0;
u32 reg, msg_data, vec_ctrl;
- u64 tbl_addr, msg_addr, reg_u64;
- void __iomem *msix_tbl;
+ unsigned int aligned_offset;
+ u32 tbl_offset;
+ u64 msg_addr;
int ret;
+ u8 bir;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
- reg = ep->msix_cap + PCI_MSIX_TABLE;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
tbl_offset = dw_pcie_readl_dbi(pci, reg);
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- reg = PCI_BASE_ADDRESS_0 + (4 * bir);
- bar_addr_upper = 0;
- bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
- reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
- if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
- bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
-
- tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
- tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
- tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
-
- msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
- PCI_MSIX_ENTRY_SIZE);
- if (!msix_tbl)
- return -EINVAL;
-
- msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
- msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
- msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
- msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
- vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
-
- iounmap(msix_tbl);
+ msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+ vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
return -EPERM;
}
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
- epc->mem->page_size);
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ epc->mem->window.page_size);
if (ret)
return ret;
- writel(msg_data, ep->msi_mem);
+ writel(msg_data, ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
@@ -469,7 +613,7 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
struct pci_epc *epc = ep->epc;
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->page_size);
+ epc->mem->window.page_size);
pci_epc_mem_exit(epc);
}
@@ -492,65 +636,110 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
return 0;
}
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int offset;
+ unsigned int nbars;
+ u8 hdr_type;
+ u32 reg;
int i;
+
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
+ if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+ dev_err(pci->dev,
+ "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+ hdr_type);
+ return -EIO;
+ }
+
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ }
+
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
int ret;
- u32 reg;
void *addr;
- u8 hdr_type;
- unsigned int nbars;
- unsigned int offset;
+ u8 func_no;
+ struct resource *res;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
+ const struct pci_epc_features *epc_features;
+ struct dw_pcie_ep_func *ep_func;
- if (!pci->dbi_base || !pci->dbi_base2) {
- dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
- return -EINVAL;
- }
+ INIT_LIST_HEAD(&ep->func_list);
- ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
- if (ret < 0) {
- dev_err(dev, "Unable to read *num-ib-windows* property\n");
- return ret;
- }
- if (ep->num_ib_windows > MAX_IATU_IN) {
- dev_err(dev, "Invalid *num-ib-windows*\n");
- return -EINVAL;
+ if (!pci->dbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
}
- ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
- if (ret < 0) {
- dev_err(dev, "Unable to read *num-ob-windows* property\n");
- return ret;
+ if (!pci->dbi_base2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ if (!res)
+ pci->dbi_base2 = pci->dbi_base + SZ_4K;
+ else {
+ pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+ }
}
- if (ep->num_ob_windows > MAX_IATU_OUT) {
- dev_err(dev, "Invalid *num-ob-windows*\n");
+
+ dw_pcie_iatu_detect(pci);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
return -EINVAL;
- }
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
ep->ib_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(ep->num_ib_windows),
+ BITS_TO_LONGS(pci->num_ib_windows),
sizeof(long),
GFP_KERNEL);
if (!ep->ib_window_map)
return -ENOMEM;
ep->ob_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(ep->num_ob_windows),
+ BITS_TO_LONGS(pci->num_ob_windows),
sizeof(long),
GFP_KERNEL);
if (!ep->ob_window_map)
return -ENOMEM;
- addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
GFP_KERNEL);
if (!addr)
return -ENOMEM;
ep->outbound_addr = addr;
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
+
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
@@ -560,50 +749,47 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
-
- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
- if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
- dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
- hdr_type);
- return -EIO;
- }
-
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
if (ret < 0)
epc->max_functions = 1;
- ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
- ep->page_size);
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ return -ENOMEM;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
+ ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+ ep->page_size);
if (ret < 0) {
dev_err(dev, "Failed to initialize address space\n");
return ret;
}
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
- epc->mem->page_size);
+ epc->mem->window.page_size);
if (!ep->msi_mem) {
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
return -ENOMEM;
}
- ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
-
- ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- if (offset) {
- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
-
- dw_pcie_dbi_ro_wr_en(pci);
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
- dw_pcie_dbi_ro_wr_dis(pci);
+ if (ep->ops->get_features) {
+ epc_features = ep->ops->get_features(ep);
+ if (epc_features->core_init_notifier)
+ return 0;
}
- dw_pcie_setup(pci);
-
- return 0;
+ return dw_pcie_ep_init_complete(ep);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 395feb8ca051..f4755f3a03be 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -3,7 +3,7 @@
* Synopsys DesignWare PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
+ * https://www.samsung.com
*
* Author: Jingoo Han <[email protected]>
*/
@@ -20,30 +20,7 @@
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
-
-static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci;
-
- if (pp->ops->rd_own_conf)
- return pp->ops->rd_own_conf(pp, where, size, val);
-
- pci = to_dw_pcie_from_pp(pp);
- return dw_pcie_read(pci->dbi_base + where, size, val);
-}
-
-static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci;
-
- if (pp->ops->wr_own_conf)
- return pp->ops->wr_own_conf(pp, where, size, val);
-
- pci = to_dw_pcie_from_pp(pp);
- return dw_pcie_write(pci->dbi_base + where, size, val);
-}
+static struct pci_ops dw_child_pcie_ops;
static void dw_msi_ack_irq(struct irq_data *d)
{
@@ -78,17 +55,17 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
- int i, pos, irq;
+ int i, pos;
unsigned long val;
u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
for (i = 0; i < num_ctrls; i++) {
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
- (i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &status);
+ status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE));
if (!status)
continue;
@@ -97,10 +74,9 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
pos = 0;
while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
- irq = irq_find_mapping(pp->irq_domain,
- (i * MAX_MSI_IRQS_PER_CTRL) +
- pos);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
pos++;
}
}
@@ -148,6 +124,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d,
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -158,8 +135,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -167,6 +143,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
static void dw_pci_bottom_unmask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -177,8 +154,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -186,13 +162,14 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
static void dw_pci_bottom_ack(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -236,7 +213,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct pcie_port *pp = domain->host_data;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
@@ -264,6 +241,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return -ENOMEM;
}
+ irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
+
pp->msi_domain = pci_msi_create_irq_domain(fwnode,
&dw_pcie_msi_domain_info,
pp->irq_domain);
@@ -276,44 +255,35 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return 0;
}
-void dw_pcie_free_msi(struct pcie_port *pp)
+static void dw_pcie_free_msi(struct pcie_port *pp)
{
- if (pp->msi_irq) {
- irq_set_chained_handler(pp->msi_irq, NULL);
- irq_set_handler_data(pp->msi_irq, NULL);
- }
+ if (pp->msi_irq)
+ irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL);
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
- if (pp->msi_page)
- __free_page(pp->msi_page);
+ if (pp->msi_data) {
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+
+ dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ }
}
-void dw_pcie_msi_init(struct pcie_port *pp)
+static void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- u64 msi_target;
+ u64 msi_target = (u64)pp->msi_data;
- pp->msi_page = alloc_page(GFP_KERNEL);
- pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, pp->msi_data)) {
- dev_err(dev, "Failed to map MSI data\n");
- __free_page(pp->msi_page);
- pp->msi_page = NULL;
+ if (!pci_msi_enabled() || !pp->has_msi_ctrl)
return;
- }
- msi_target = (u64)pp->msi_data;
/* Program the msi_data */
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
- lower_32_bits(msi_target));
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
- upper_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
-EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
int dw_pcie_host_init(struct pcie_port *pp)
{
@@ -322,184 +292,133 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
- u32 hdr_type;
int ret;
raw_spin_lock_init(&pci->pp.lock);
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res) >> 1;
- pp->cfg1_size = resource_size(cfg_res) >> 1;
+ pp->cfg0_size = resource_size(cfg_res);
pp->cfg0_base = cfg_res->start;
- pp->cfg1_base = cfg_res->start + pp->cfg0_size;
- } else if (!pp->va_cfg0_base) {
+
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, cfg_res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+ } else {
dev_err(dev, "Missing *config* reg space\n");
+ return -ENODEV;
+ }
+
+ if (!pci->dbi_base) {
+ struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
}
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
- ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
- &bridge->dma_ranges, NULL);
- if (ret)
- return ret;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &bridge->windows) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- pp->io_base = pci_pio_to_address(pp->io->start);
- break;
- case IORESOURCE_MEM:
- pp->mem = win->res;
- pp->mem->name = "MEM";
- pp->mem_size = resource_size(pp->mem);
- pp->mem_bus_addr = pp->mem->start - win->offset;
- break;
- case 0:
- pp->cfg = win->res;
- pp->cfg0_size = resource_size(pp->cfg) >> 1;
- pp->cfg1_size = resource_size(pp->cfg) >> 1;
- pp->cfg0_base = pp->cfg->start;
- pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
- break;
- case IORESOURCE_BUS:
- pp->busn = win->res;
- break;
- }
- }
+ pp->bridge = bridge;
- if (!pci->dbi_base) {
- pci->dbi_base = devm_pci_remap_cfgspace(dev,
- pp->cfg->start,
- resource_size(pp->cfg));
- if (!pci->dbi_base) {
- dev_err(dev, "Error with ioremap\n");
- return -ENOMEM;
- }
+ /* Get the I/O range from DT */
+ win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (win) {
+ pp->io_size = resource_size(win->res);
+ pp->io_bus_addr = win->res->start - win->offset;
+ pp->io_base = pci_pio_to_address(win->res->start);
}
- pp->mem_base = pp->mem->start;
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
- if (!pp->va_cfg0_base) {
- pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
- pp->cfg0_base, pp->cfg0_size);
- if (!pp->va_cfg0_base) {
- dev_err(dev, "Error with ioremap in function\n");
- return -ENOMEM;
- }
- }
+ /* Set default bus ops */
+ bridge->ops = &dw_pcie_ops;
+ bridge->child_ops = &dw_child_pcie_ops;
- if (!pp->va_cfg1_base) {
- pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
- pp->cfg1_base,
- pp->cfg1_size);
- if (!pp->va_cfg1_base) {
- dev_err(dev, "Error with ioremap\n");
- return -ENOMEM;
- }
+ if (pp->ops->host_init) {
+ ret = pp->ops->host_init(pp);
+ if (ret)
+ return ret;
}
- ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
- if (ret)
- pci->num_viewport = 2;
-
if (pci_msi_enabled()) {
- /*
- * If a specific SoC driver needs to change the
- * default number of vectors, it needs to implement
- * the set_num_vectors callback.
- */
- if (!pp->ops->set_num_vectors) {
+ pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
+ of_property_read_bool(np, "msi-parent") ||
+ of_property_read_bool(np, "msi-map"));
+
+ if (!pp->num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
- } else {
- pp->ops->set_num_vectors(pp);
-
- if (pp->num_vectors > MAX_MSI_IRQS ||
- pp->num_vectors == 0) {
- dev_err(dev,
- "Invalid number of vectors\n");
- return -EINVAL;
- }
+ } else if (pp->num_vectors > MAX_MSI_IRQS) {
+ dev_err(dev, "Invalid number of vectors\n");
+ return -EINVAL;
}
- if (!pp->ops->msi_host_init) {
+ if (pp->ops->msi_host_init) {
+ ret = pp->ops->msi_host_init(pp);
+ if (ret < 0)
+ return ret;
+ } else if (pp->has_msi_ctrl) {
+ if (!pp->msi_irq) {
+ pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq < 0) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (pp->msi_irq < 0)
+ return pp->msi_irq;
+ }
+ }
+
pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
ret = dw_pcie_allocate_domains(pp);
if (ret)
return ret;
- if (pp->msi_irq)
+ if (pp->msi_irq > 0)
irq_set_chained_handler_and_data(pp->msi_irq,
dw_chained_msi_isr,
pp);
- } else {
- ret = pp->ops->msi_host_init(pp);
- if (ret < 0)
- return ret;
+
+ ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
+ if (ret)
+ dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+
+ pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
+ sizeof(pp->msi_msg),
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(pci->dev, pp->msi_data)) {
+ dev_err(pci->dev, "Failed to map MSI data\n");
+ pp->msi_data = 0;
+ goto err_free_msi;
+ }
}
}
- if (pp->ops->host_init) {
- ret = pp->ops->host_init(pp);
+ dw_pcie_iatu_detect(pci);
+
+ dw_pcie_setup_rc(pp);
+
+ if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) {
+ ret = pci->ops->start_link(pci);
if (ret)
goto err_free_msi;
}
- ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type);
- if (ret != PCIBIOS_SUCCESSFUL) {
- dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n",
- ret);
- ret = pcibios_err_to_errno(ret);
- goto err_free_msi;
- }
- if (hdr_type != PCI_HEADER_TYPE_BRIDGE) {
- dev_err(pci->dev,
- "PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n",
- hdr_type);
- ret = -EIO;
- goto err_free_msi;
- }
-
- pp->root_bus_nr = pp->busn->start;
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
- bridge->dev.parent = dev;
bridge->sysdata = pp;
- bridge->busnr = pp->root_bus_nr;
- bridge->ops = &dw_pcie_ops;
- bridge->map_irq = of_irq_parse_and_map_pci;
- bridge->swizzle_irq = pci_common_swizzle;
-
- ret = pci_scan_root_bus_bridge(bridge);
- if (ret)
- goto err_free_msi;
-
- pp->root_bus = bridge->bus;
-
- if (pp->ops->scan_bus)
- pp->ops->scan_bus(pp);
- pci_bus_size_bridges(pp->root_bus);
- pci_bus_assign_resources(pp->root_bus);
-
- list_for_each_entry(child, &pp->root_bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(pp->root_bus);
- return 0;
+ ret = pci_host_probe(bridge);
+ if (!ret)
+ return 0;
err_free_msi:
- if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
return ret;
}
@@ -507,131 +426,105 @@ EXPORT_SYMBOL_GPL(dw_pcie_host_init);
void dw_pcie_host_deinit(struct pcie_port *pp)
{
- pci_stop_root_bus(pp->root_bus);
- pci_remove_root_bus(pp->root_bus);
- if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ pci_stop_root_bus(pp->bridge->bus);
+ pci_remove_root_bus(pp->bridge->bus);
+ if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
-static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 *val,
- bool write)
+static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
- int ret, type;
- u32 busdev, cfg_size;
- u64 cpu_addr;
- void __iomem *va_cfg_base;
+ int type;
+ u32 busdev;
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
- if (bus->parent->number == pp->root_bus_nr) {
+ if (pci_is_root_bus(bus->parent))
type = PCIE_ATU_TYPE_CFG0;
- cpu_addr = pp->cfg0_base;
- cfg_size = pp->cfg0_size;
- va_cfg_base = pp->va_cfg0_base;
- } else {
- type = PCIE_ATU_TYPE_CFG1;
- cpu_addr = pp->cfg1_base;
- cfg_size = pp->cfg1_size;
- va_cfg_base = pp->va_cfg1_base;
- }
-
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- type, cpu_addr,
- busdev, cfg_size);
- if (write)
- ret = dw_pcie_write(va_cfg_base + where, size, *val);
else
- ret = dw_pcie_read(va_cfg_base + where, size, val);
-
- if (pci->num_viewport <= 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
-
- return ret;
-}
+ type = PCIE_ATU_TYPE_CFG1;
-static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 *val)
-{
- if (pp->ops->rd_other_conf)
- return pp->ops->rd_other_conf(pp, bus, devfn, where,
- size, val);
- return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
- false);
-}
+ dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
-static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 val)
-{
- if (pp->ops->wr_other_conf)
- return pp->ops->wr_other_conf(pp, bus, devfn, where,
- size, val);
-
- return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
- true);
+ return pp->va_cfg0_base + where;
}
-static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
- int dev)
+static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
+ int ret;
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- /* If there is no link, then there is no device */
- if (bus->number != pp->root_bus_nr) {
- if (!dw_pcie_link_up(pci))
- return 0;
- }
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
- /* Access only one slot on each root port */
- if (bus->number == pp->root_bus_nr && dev > 0)
- return 0;
+ if (!ret && pci->io_cfg_atu_shared)
+ dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
- return 1;
+ return ret;
}
-static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
- int size, u32 *val)
+static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
+ int ret;
struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
- if (bus->number == pp->root_bus_nr)
- return dw_pcie_rd_own_conf(pp, where, size, val);
+ if (!ret && pci->io_cfg_atu_shared)
+ dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
- return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
+ return ret;
}
-static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
+static struct pci_ops dw_child_pcie_ops = {
+ .map_bus = dw_pcie_other_conf_map_bus,
+ .read = dw_pcie_rd_other_conf,
+ .write = dw_pcie_wr_other_conf,
+};
+
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
{
struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == pp->root_bus_nr)
- return dw_pcie_wr_own_conf(pp, where, size, val);
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
- return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
+ return pci->dbi_base + where;
}
+EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
static struct pci_ops dw_pcie_ops = {
- .read = dw_pcie_rd_conf,
- .write = dw_pcie_wr_conf,
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
void dw_pcie_setup_rc(struct pcie_port *pp)
{
+ int i;
u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -643,21 +536,23 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_setup(pci);
- if (!pp->ops->msi_host_init) {
+ if (pp->has_msi_ctrl) {
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
pp->irq_mask[ctrl] = ~0;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, pp->irq_mask[ctrl]);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
+ ~0);
}
}
+ dw_pcie_msi_init(pp);
+
/* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
@@ -681,29 +576,55 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ /* Ensure all outbound windows are disabled so there are multiple matches */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
+
/*
- * If the platform provides ->rd_other_conf, it means the platform
- * uses its own address translation component rather than ATU, so
- * we should not program the ATU here.
+ * If the platform provides its own child bus config accesses, it means
+ * the platform uses its own address translation component rather than
+ * ATU, so we should not program the ATU here.
*/
- if (!pp->ops->rd_other_conf) {
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_MEM, pp->mem_base,
- pp->mem_bus_addr, pp->mem_size);
- if (pci->num_viewport > 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->bridge->child_ops == &dw_child_pcie_ops) {
+ int atu_idx = 0;
+ struct resource_entry *entry;
+
+ /* Get last memory resource entry */
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++atu_idx)
+ break;
+
+ dw_pcie_prog_outbound_atu(pci, atu_idx,
+ PCIE_ATU_TYPE_MEM, entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++atu_idx)
+ dw_pcie_prog_outbound_atu(pci, atu_idx,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
+ else
+ pci->io_cfg_atu_shared = true;
+ }
+
+ if (pci->num_ob_windows <= atu_idx)
+ dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
+ pci->num_ob_windows);
}
- dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
/* Program correct class for RC */
- dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
- dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
dw_pcie_dbi_ro_wr_dis(pci);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 73646b677aff..8851eb161a0e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -33,27 +33,7 @@ struct dw_plat_pcie_of_data {
static const struct of_device_id dw_plat_pcie_of_match[];
-static int dw_plat_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- dw_pcie_setup_rc(pp);
- dw_pcie_wait_for_link(pci);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-
- return 0;
-}
-
-static void dw_plat_set_num_vectors(struct pcie_port *pp)
-{
- pp->num_vectors = MAX_MSI_IRQS;
-}
-
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
- .host_init = dw_plat_pcie_host_init,
- .set_num_vectors = dw_plat_set_num_vectors,
};
static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
@@ -124,12 +104,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
if (pp->irq < 0)
return pp->irq;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
+ pp->num_vectors = MAX_MSI_IRQS;
pp->ops = &dw_plat_pcie_host_ops;
ret = dw_pcie_host_init(pp);
@@ -141,44 +116,11 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
return 0;
}
-static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = dw_plat_pcie->pci;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- pci->dbi_base2 = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "Failed to initialize endpoint\n");
- return ret;
- }
- return 0;
-}
-
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_plat_pcie *dw_plat_pcie;
struct dw_pcie *pci;
- struct resource *res; /* Resource from DT */
int ret;
const struct of_device_id *match;
const struct dw_plat_pcie_of_data *data;
@@ -205,14 +147,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
dw_plat_pcie->pci = pci;
dw_plat_pcie->mode = mode;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- if (!res)
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- pci->dbi_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
platform_set_drvdata(pdev, dw_plat_pcie);
switch (dw_plat_pcie->mode) {
@@ -228,10 +162,8 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
return -ENODEV;
- ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
- if (ret < 0)
- return ret;
- break;
+ pci->ep.ops = &pcie_ep_ops;
+ return dw_pcie_ep_init(&pci->ep);
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 820488dfeaed..850b4533f4ef 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -3,15 +3,17 @@
* Synopsys DesignWare PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
+ * https://www.samsung.com
*
* Author: Jingoo Han <[email protected]>
*/
#include <linux/delay.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/types.h>
+#include "../../pci.h"
#include "pcie-designware.h"
/*
@@ -139,7 +141,7 @@ u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
int ret;
u32 val;
- if (pci->ops->read_dbi)
+ if (pci->ops && pci->ops->read_dbi)
return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
@@ -154,7 +156,7 @@ void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
- if (pci->ops->write_dbi) {
+ if (pci->ops && pci->ops->write_dbi) {
pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
return;
}
@@ -165,26 +167,11 @@ void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
}
EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
-u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
-{
- int ret;
- u32 val;
-
- if (pci->ops->read_dbi2)
- return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
-
- ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
- if (ret)
- dev_err(pci->dev, "read DBI address failed\n");
-
- return val;
-}
-
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
- if (pci->ops->write_dbi2) {
+ if (pci->ops && pci->ops->write_dbi2) {
pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
return;
}
@@ -194,31 +181,31 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
dev_err(pci->dev, "write DBI address failed\n");
}
-u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
{
int ret;
u32 val;
- if (pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
- ret = dw_pcie_read(pci->atu_base + reg, size, &val);
+ ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
if (ret)
dev_err(pci->dev, "Read ATU address failed\n");
return val;
}
-void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
{
int ret;
- if (pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
+ if (pci->ops && pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
return;
}
- ret = dw_pcie_write(pci->atu_base + reg, size, val);
+ ret = dw_pcie_write(pci->atu_base + reg, 4, val);
if (ret)
dev_err(pci->dev, "Write ATU address failed\n");
}
@@ -238,24 +225,73 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
dw_pcie_writel_atu(pci, offset + reg, val);
}
-static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
- int type, u64 cpu_addr,
- u64 pci_addr, u32 size)
+static inline u32 dw_pcie_enable_ecrc(u32 val)
+{
+ /*
+ * DesignWare core version 4.90A has a design issue where the 'TD'
+ * bit in the Control register-1 of the ATU outbound region acts
+ * like an override for the ECRC setting, i.e., the presence of TLP
+ * Digest (ECRC) in the outgoing TLPs is solely determined by this
+ * bit. This is contrary to the PCIe spec which says that the
+ * enablement of the ECRC is solely determined by the AER
+ * registers.
+ *
+ * Because of this, even when the ECRC is enabled through AER
+ * registers, the transactions going through ATU won't have TLP
+ * Digest as there is no way the PCI core AER code could program
+ * the TD bit which is specific to the DesignWare core.
+ *
+ * The best way to handle this scenario is to program the TD bit
+ * always. It affects only the traffic from root port to downstream
+ * devices.
+ *
+ * At this point,
+ * When ECRC is enabled in AER registers, everything works normally
+ * When ECRC is NOT enabled in AER registers, then,
+ * on Root Port:- TLP Digest (DWord size) gets appended to each packet
+ * even through it is not required. Since downstream
+ * TLPs are mostly for configuration accesses and BAR
+ * accesses, they are not in critical path and won't
+ * have much negative effect on the performance.
+ * on End Point:- TLP Digest is received for some/all the packets coming
+ * from the root port. TLP Digest is ignored because,
+ * as per the PCIe Spec r5.0 v1.0 section 2.2.3
+ * "TLP Digest Rules", when an endpoint receives TLP
+ * Digest when its ECRC check functionality is disabled
+ * in AER registers, received TLP Digest is just ignored.
+ * Since there is no issue or error reported either side, best way to
+ * handle the scenario is to program TD bit by default.
+ */
+
+ return val | PCIE_ATU_TD;
+}
+
+static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
+ int index, int type,
+ u64 cpu_addr, u64 pci_addr,
+ u64 size)
{
u32 retries, val;
+ u64 limit_addr = cpu_addr + size - 1;
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
upper_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
+ lower_32_bits(limit_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
upper_32_bits(pci_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
- type);
+ val = type | PCIE_ATU_FUNC_NUM(func_no);
+ val = upper_32_bits(size - 1) ?
+ val | PCIE_ATU_INCREASE_REGION_SIZE : val;
+ if (pci->version == 0x490A)
+ val = dw_pcie_enable_ecrc(val);
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_ENABLE);
@@ -274,17 +310,18 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u32 size)
+static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
{
u32 retries, val;
- if (pci->ops->cpu_addr_fixup)
+ if (pci->ops && pci->ops->cpu_addr_fixup)
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
if (pci->iatu_unroll_enabled) {
- dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
- pci_addr, size);
+ dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
return;
}
@@ -296,11 +333,19 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
upper_32_bits(cpu_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
lower_32_bits(cpu_addr + size - 1));
+ if (pci->version >= 0x460A)
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(cpu_addr + size - 1));
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+ val = type | PCIE_ATU_FUNC_NUM(func_no);
+ val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ?
+ val | PCIE_ATU_INCREASE_REGION_SIZE : val;
+ if (pci->version == 0x490A)
+ val = dw_pcie_enable_ecrc(val);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val);
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
/*
@@ -317,6 +362,21 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
+void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
+{
+ __dw_pcie_prog_outbound_atu(pci, 0, index, type,
+ cpu_addr, pci_addr, size);
+}
+
+void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u64 size)
+{
+ __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
+}
+
static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
@@ -332,8 +392,8 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
dw_pcie_writel_atu(pci, offset + reg, val);
}
-static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
- int bar, u64 cpu_addr,
+static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
+ int index, int bar, u64 cpu_addr,
enum dw_pcie_as_type as_type)
{
int type;
@@ -355,8 +415,10 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
return -EINVAL;
}
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
PCIE_ATU_ENABLE |
PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
@@ -377,14 +439,15 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
return -EBUSY;
}
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
- u64 cpu_addr, enum dw_pcie_as_type as_type)
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int bar, u64 cpu_addr,
+ enum dw_pcie_as_type as_type)
{
int type;
u32 retries, val;
if (pci->iatu_unroll_enabled)
- return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
+ return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
cpu_addr, as_type);
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
@@ -403,9 +466,11 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
return -EINVAL;
}
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
- | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
@@ -440,7 +505,7 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
}
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
@@ -466,13 +531,60 @@ int dw_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
- if (pci->ops->link_up)
+ if (pci->ops && pci->ops->link_up)
return pci->ops->link_up(pci);
val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
+EXPORT_SYMBOL_GPL(dw_pcie_link_up);
+
+void dw_pcie_upconfig_setup(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
+ val |= PORT_MLTI_UPCFG_SUPPORT;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
+
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+{
+ u32 cap, ctrl2, link_speed;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
+
+ switch (pcie_link_speed[link_gen]) {
+ case PCIE_SPEED_2_5GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
+ break;
+ case PCIE_SPEED_5_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
+ break;
+ case PCIE_SPEED_8_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
+ break;
+ case PCIE_SPEED_16_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
+ break;
+ default:
+ /* Use hardware capability */
+ link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
+ break;
+ }
+
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
+
+ cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
+
+}
static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
@@ -485,34 +597,144 @@ static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
return 0;
}
-void dw_pcie_setup(struct dw_pcie *pci)
+static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
{
- int ret;
+ int max_region, i, ob = 0, ib = 0;
u32 val;
- u32 lanes;
+
+ max_region = min((int)pci->atu_size / 512, 256);
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
+ 0x11110000);
+
+ val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
+ if (val == 0x11110000)
+ ob++;
+ else
+ break;
+ }
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
+ 0x11110000);
+
+ val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
+ if (val == 0x11110000)
+ ib++;
+ else
+ break;
+ }
+ pci->num_ib_windows = ib;
+ pci->num_ob_windows = ob;
+}
+
+static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
+{
+ int max_region, i, ob = 0, ib = 0;
+ u32 val;
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
+ if (val == 0x11110000)
+ ob++;
+ else
+ break;
+ }
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
+ if (val == 0x11110000)
+ ib++;
+ else
+ break;
+ }
+
+ pci->num_ib_windows = ib;
+ pci->num_ob_windows = ob;
+}
+
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
+{
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
if (pci->version >= 0x480A || (!pci->version &&
dw_pcie_iatu_unroll_enabled(pci))) {
pci->iatu_unroll_enabled = true;
- if (!pci->atu_base)
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
- }
- dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
+ if (!pci->atu_base) {
+ struct resource *res =
+ platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ if (res)
+ pci->atu_size = resource_size(res);
+ pci->atu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->atu_base))
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+
+ if (!pci->atu_size)
+ /* Pick a minimal default, enough for 8 in and 8 out windows */
+ pci->atu_size = SZ_4K;
+
+ dw_pcie_iatu_detect_regions_unroll(pci);
+ } else
+ dw_pcie_iatu_detect_regions(pci);
+
+ dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
"enabled" : "disabled");
+ dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
+ pci->num_ob_windows, pci->num_ib_windows);
+}
- ret = of_property_read_u32(np, "num-lanes", &lanes);
- if (ret) {
- dev_dbg(pci->dev, "property num-lanes isn't found\n");
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+ u32 val;
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+
+ if (pci->link_gen > 0)
+ dw_pcie_link_set_max_speed(pci, pci->link_gen);
+
+ /* Configure Gen1 N_FTS */
+ if (pci->n_fts[0]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
+ val |= PORT_AFR_N_FTS(pci->n_fts[0]);
+ val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+ }
+
+ /* Configure Gen2+ N_FTS */
+ if (pci->n_fts[1]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_N_FTS_MASK;
+ val |= pci->n_fts[pci->link_gen - 1];
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+ of_property_read_u32(np, "num-lanes", &pci->num_lanes);
+ if (!pci->num_lanes) {
+ dev_dbg(pci->dev, "Using h/w default number of lanes\n");
return;
}
/* Set the number of lanes */
- val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
val &= ~PORT_LINK_MODE_MASK;
- switch (lanes) {
+ switch (pci->num_lanes) {
case 1:
val |= PORT_LINK_MODE_1_LANES;
break;
@@ -526,7 +748,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
val |= PORT_LINK_MODE_8_LANES;
break;
default:
- dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
return;
}
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
@@ -534,7 +756,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
/* Set link width speed control register */
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
- switch (lanes) {
+ switch (pci->num_lanes) {
case 1:
val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
break;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 5accdd6bc388..7d6e9b7576be 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -3,7 +3,7 @@
* Synopsys DesignWare PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
+ * https://www.samsung.com
*
* Author: Jingoo Han <[email protected]>
*/
@@ -30,7 +30,20 @@
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_AFR 0x70C
+#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
+#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
+#define PORT_AFR_CC_N_FTS_MASK GENMASK(23, 16)
+#define PORT_AFR_CC_N_FTS(n) FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n)
+#define PORT_AFR_ENTER_ASPM BIT(30)
+#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT 24
+#define PORT_AFR_L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
+#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT 27
+#define PORT_AFR_L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
+
#define PCIE_PORT_LINK_CONTROL 0x710
+#define PORT_LINK_DLL_LINK_EN BIT(5)
+#define PORT_LINK_FAST_LINK_MODE BIT(7)
#define PORT_LINK_MODE_MASK GENMASK(21, 16)
#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
@@ -46,6 +59,7 @@
#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+#define PORT_LOGIC_N_FTS_MASK GENMASK(7, 0)
#define PORT_LOGIC_SPEED_CHANGE BIT(17)
#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8)
#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
@@ -60,20 +74,24 @@
#define PCIE_MSI_INTR0_MASK 0x82C
#define PCIE_MSI_INTR0_STATUS 0x830
+#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
+#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
+
#define PCIE_ATU_VIEWPORT 0x900
#define PCIE_ATU_REGION_INBOUND BIT(31)
#define PCIE_ATU_REGION_OUTBOUND 0
-#define PCIE_ATU_REGION_INDEX2 0x2
-#define PCIE_ATU_REGION_INDEX1 0x1
-#define PCIE_ATU_REGION_INDEX0 0x0
#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
#define PCIE_ATU_TYPE_MEM 0x0
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TD BIT(8)
+#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_CR2 0x908
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x90C
#define PCIE_ATU_UPPER_BASE 0x910
#define PCIE_ATU_LIMIT 0x914
@@ -82,10 +100,14 @@
#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
#define PCIE_ATU_UPPER_TARGET 0x91C
+#define PCIE_ATU_UPPER_LIMIT 0x924
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
#define PCIE_DBI_RO_WR_EN BIT(0)
+#define PCIE_MSIX_DOORBELL 0x948
+#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
+
#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
@@ -103,9 +125,10 @@
#define PCIE_ATU_UNR_REGION_CTRL2 0x04
#define PCIE_ATU_UNR_LOWER_BASE 0x08
#define PCIE_ATU_UNR_UPPER_BASE 0x0C
-#define PCIE_ATU_UNR_LIMIT 0x10
+#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
/*
* The default address offset between dbi_base and atu_base. Root controller
@@ -150,47 +173,29 @@ enum dw_pcie_device_mode {
};
struct dw_pcie_host_ops {
- int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
- int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
- int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val);
- int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val);
int (*host_init)(struct pcie_port *pp);
- void (*scan_bus)(struct pcie_port *pp);
- void (*set_num_vectors)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp);
};
struct pcie_port {
- u8 root_bus_nr;
+ bool has_msi_ctrl:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
- u64 cfg1_base;
- void __iomem *va_cfg1_base;
- u32 cfg1_size;
resource_size_t io_base;
phys_addr_t io_bus_addr;
u32 io_size;
- u64 mem_base;
- phys_addr_t mem_bus_addr;
- u32 mem_size;
- struct resource *cfg;
- struct resource *io;
- struct resource *mem;
- struct resource *busn;
int irq;
const struct dw_pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
+ u16 msi_msg;
dma_addr_t msi_data;
- struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
- struct pci_bus *root_bus;
+ struct pci_host_bridge *bridge;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@@ -206,10 +211,26 @@ struct dw_pcie_ep_ops {
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
+ /*
+ * Provide a method to implement the different func config space
+ * access for different platform, if different func have different
+ * offset, return the offset of func. if use write a register way
+ * return a 0, and implement code in callback function of platform
+ * driver.
+ */
+ unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no);
+};
+
+struct dw_pcie_ep_func {
+ struct list_head list;
+ u8 func_no;
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
};
struct dw_pcie_ep {
struct pci_epc *epc;
+ struct list_head func_list;
const struct dw_pcie_ep_ops *ops;
phys_addr_t phys_base;
size_t addr_size;
@@ -218,12 +239,9 @@ struct dw_pcie_ep {
phys_addr_t *outbound_addr;
unsigned long *ib_window_map;
unsigned long *ob_window_map;
- u32 num_ib_windows;
- u32 num_ob_windows;
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
- u8 msi_cap; /* MSI capability offset */
- u8 msix_cap; /* MSI-X capability offset */
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
struct dw_pcie_ops {
@@ -232,8 +250,6 @@ struct dw_pcie_ops {
size_t size);
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
- u32 (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
- size_t size);
void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
int (*link_up)(struct dw_pcie *pcie);
@@ -247,12 +263,18 @@ struct dw_pcie {
void __iomem *dbi_base2;
/* Used when iatu_unroll_enabled is true */
void __iomem *atu_base;
- u32 num_viewport;
- u8 iatu_unroll_enabled;
+ size_t atu_size;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
struct pcie_port pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
unsigned int version;
+ int num_lanes;
+ int link_gen;
+ u8 n_fts[2];
+ bool iatu_unroll_enabled: 1;
+ bool io_cfg_atu_shared: 1;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -268,20 +290,23 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val);
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
-void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
+void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
int type, u64 cpu_addr, u64 pci_addr,
- u32 size);
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
- u64 cpu_addr, enum dw_pcie_as_type as_type);
+ u64 size);
+void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u64 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int bar, u64 cpu_addr,
+ enum dw_pcie_as_type as_type);
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
enum dw_pcie_region_type type);
void dw_pcie_setup(struct dw_pcie *pci);
+void dw_pcie_iatu_detect(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
@@ -318,21 +343,6 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
-static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
-{
- return dw_pcie_read_dbi2(pci, reg, 0x4);
-}
-
-static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
-{
- dw_pcie_write_atu(pci, reg, 0x4, val);
-}
-
-static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
-{
- return dw_pcie_read_atu(pci, reg, 0x4);
-}
-
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
{
u32 reg;
@@ -357,26 +367,18 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
#ifdef CONFIG_PCIE_DW_HOST
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
-void dw_pcie_msi_init(struct pcie_port *pp);
-void dw_pcie_free_msi(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
void dw_pcie_host_deinit(struct pcie_port *pp);
int dw_pcie_allocate_domains(struct pcie_port *pp);
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
#else
static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
return IRQ_NONE;
}
-static inline void dw_pcie_msi_init(struct pcie_port *pp)
-{
-}
-
-static inline void dw_pcie_free_msi(struct pcie_port *pp)
-{
-}
-
static inline void dw_pcie_setup_rc(struct pcie_port *pp)
{
}
@@ -394,18 +396,30 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
{
return 0;
}
+static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
@@ -416,6 +430,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
+static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+}
+
static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
}
@@ -437,8 +460,21 @@ static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static inline int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep,
+ u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+
+static inline struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return NULL;
+}
#endif
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
new file mode 100644
index 000000000000..c9b341e55cbb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Rockchip SoCs.
+ *
+ * Copyright (C) 2021 Rockchip Electronics Co., Ltd.
+ * http://www.rock-chips.com
+ *
+ * Author: Simon Xue <[email protected]>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write
+ * mask for the lower 16 bits.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_SMLH_LINKUP BIT(16)
+#define PCIE_RDLH_LINKUP BIT(17)
+#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+#define PCIE_L0S_ENTRY 0x11
+#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_CLIENT_LTSSM_STATUS 0x300
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+
+struct rockchip_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct regulator *vpcie3v3;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
+ u32 reg)
+{
+ return readl_relaxed(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CONTROL);
+}
+
+static int rockchip_pcie_link_up(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
+ (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
+ return 1;
+
+ return 0;
+}
+
+static int rockchip_pcie_start_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Reset device */
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 0);
+
+ rockchip_pcie_enable_ltssm(rockchip);
+
+ /*
+ * PCIe requires the refclk to be stable for 100µs prior to releasing
+ * PERST. See table 2-4 in section 2.6.2 AC Specifications of the PCI
+ * Express Card Electromechanical Specification, 1.1. However, we don't
+ * know if the refclk is coming from RC's PHY or external OSC. If it's
+ * from RC, so enabling LTSSM is the just right place to release #PERST.
+ * We need more extra time as before, rather than setting just
+ * 100us as we don't know how long should the device need to reset.
+ */
+ msleep(100);
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
+
+ return 0;
+}
+
+static int rockchip_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+
+ /* LTSSM enable control mode */
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
+ .host_init = rockchip_pcie_host_init,
+};
+
+static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (ret < 0)
+ return ret;
+
+ rockchip->clk_cnt = ret;
+
+ return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+}
+
+static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
+
+ rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->rst_gpio))
+ return PTR_ERR(rockchip->rst_gpio);
+
+ return 0;
+}
+
+static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ rockchip->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(rockchip->phy))
+ return dev_err_probe(dev, PTR_ERR(rockchip->phy),
+ "missing PHY\n");
+
+ ret = phy_init(rockchip->phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(rockchip->phy);
+ if (ret)
+ phy_exit(rockchip->phy);
+
+ return ret;
+}
+
+static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
+{
+ phy_exit(rockchip->phy);
+ phy_power_off(rockchip->phy);
+}
+
+static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+
+ rockchip->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
+ return reset_control_deassert(rockchip->rst);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = rockchip_pcie_link_up,
+ .start_link = rockchip_pcie_start_link,
+};
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip;
+ struct pcie_port *pp;
+ int ret;
+
+ rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
+ if (!rockchip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rockchip);
+
+ rockchip->pci.dev = dev;
+ rockchip->pci.ops = &dw_pcie_ops;
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+
+ ret = rockchip_pcie_resource_get(pdev, rockchip);
+ if (ret)
+ return ret;
+
+ /* DON'T MOVE ME: must be enable before PHY init */
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3),
+ "failed to get vpcie3v3 regulator\n");
+ rockchip->vpcie3v3 = NULL;
+ } else {
+ ret = regulator_enable(rockchip->vpcie3v3);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie3v3 regulator\n");
+ return ret;
+ }
+ }
+
+ ret = rockchip_pcie_phy_init(rockchip);
+ if (ret)
+ goto disable_regulator;
+
+ ret = rockchip_pcie_reset_control_release(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = rockchip_pcie_clk_init(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = dw_pcie_host_init(pp);
+ if (!ret)
+ return 0;
+
+ clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
+deinit_phy:
+ rockchip_pcie_phy_deinit(rockchip);
+disable_regulator:
+ if (rockchip->vpcie3v3)
+ regulator_disable(rockchip->vpcie3v3);
+
+ return ret;
+}
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3568-pcie", },
+ {},
+};
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-dw-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rockchip_pcie_probe,
+};
+builtin_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
new file mode 100644
index 000000000000..00cde9a248b5
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FU740 DesignWare PCIe Controller integration
+ * Copyright (C) 2019-2021 SiFive, Inc.
+ * Paul Walmsley
+ * Greentime Hu
+ *
+ * Based in part on the i.MX6 PCIe host controller shim which is:
+ *
+ * Copyright (C) 2013 Kosagi
+ * https://www.kosagi.com
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_fu740_pcie(x) dev_get_drvdata((x)->dev)
+
+struct fu740_pcie {
+ struct dw_pcie pci;
+ void __iomem *mgmt_base;
+ struct gpio_desc *reset;
+ struct gpio_desc *pwren;
+ struct clk *pcie_aux;
+ struct reset_control *rst;
+};
+
+#define SIFIVE_DEVICESRESETREG 0x28
+
+#define PCIEX8MGMT_PERST_N 0x0
+#define PCIEX8MGMT_APP_LTSSM_ENABLE 0x10
+#define PCIEX8MGMT_APP_HOLD_PHY_RST 0x18
+#define PCIEX8MGMT_DEVICE_TYPE 0x708
+#define PCIEX8MGMT_PHY0_CR_PARA_ADDR 0x860
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_EN 0x870
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_DATA 0x878
+#define PCIEX8MGMT_PHY0_CR_PARA_SEL 0x880
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_DATA 0x888
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_EN 0x890
+#define PCIEX8MGMT_PHY0_CR_PARA_ACK 0x898
+#define PCIEX8MGMT_PHY1_CR_PARA_ADDR 0x8a0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_EN 0x8b0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_DATA 0x8b8
+#define PCIEX8MGMT_PHY1_CR_PARA_SEL 0x8c0
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_DATA 0x8c8
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_EN 0x8d0
+#define PCIEX8MGMT_PHY1_CR_PARA_ACK 0x8d8
+
+#define PCIEX8MGMT_PHY_CDR_TRACK_EN BIT(0)
+#define PCIEX8MGMT_PHY_LOS_THRSHLD BIT(5)
+#define PCIEX8MGMT_PHY_TERM_EN BIT(9)
+#define PCIEX8MGMT_PHY_TERM_ACDC BIT(10)
+#define PCIEX8MGMT_PHY_EN BIT(11)
+#define PCIEX8MGMT_PHY_INIT_VAL (PCIEX8MGMT_PHY_CDR_TRACK_EN|\
+ PCIEX8MGMT_PHY_LOS_THRSHLD|\
+ PCIEX8MGMT_PHY_TERM_EN|\
+ PCIEX8MGMT_PHY_TERM_ACDC|\
+ PCIEX8MGMT_PHY_EN)
+
+#define PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 0x1008
+#define PCIEX8MGMT_PHY_LANE_OFF 0x100
+#define PCIEX8MGMT_PHY_LANE0_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 0)
+#define PCIEX8MGMT_PHY_LANE1_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 1)
+#define PCIEX8MGMT_PHY_LANE2_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 2)
+#define PCIEX8MGMT_PHY_LANE3_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 3)
+
+static void fu740_pcie_assert_reset(struct fu740_pcie *afp)
+{
+ /* Assert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 0);
+ /* Assert controller PERST_N */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+}
+
+static void fu740_pcie_deassert_reset(struct fu740_pcie *afp)
+{
+ /* Deassert controller PERST_N */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+ /* Deassert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 1);
+}
+
+static void fu740_pcie_power_on(struct fu740_pcie *afp)
+{
+ gpiod_set_value_cansleep(afp->pwren, 1);
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms.
+ * Section 2.2 of PCI Express Card Electromechanical Specification
+ * Revision 3.0
+ */
+ msleep(100);
+}
+
+static void fu740_pcie_drive_reset(struct fu740_pcie *afp)
+{
+ fu740_pcie_assert_reset(afp);
+ fu740_pcie_power_on(afp);
+ fu740_pcie_deassert_reset(afp);
+}
+
+static void fu740_phyregwrite(const uint8_t phy, const uint16_t addr,
+ const uint16_t wrdata, struct fu740_pcie *afp)
+{
+ struct device *dev = afp->pci.dev;
+ void __iomem *phy_cr_para_addr;
+ void __iomem *phy_cr_para_wr_data;
+ void __iomem *phy_cr_para_wr_en;
+ void __iomem *phy_cr_para_ack;
+ int ret, val;
+
+ /* Setup */
+ if (phy) {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ACK;
+ } else {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ACK;
+ }
+
+ writel_relaxed(addr, phy_cr_para_addr);
+ writel_relaxed(wrdata, phy_cr_para_wr_data);
+ writel_relaxed(1, phy_cr_para_wr_en);
+
+ /* Wait for wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for wait_idle state failed!\n");
+
+ /* Clear */
+ writel_relaxed(0, phy_cr_para_wr_en);
+
+ /* Wait for ~wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, !val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for !wait_idle state failed!\n");
+}
+
+static void fu740_pcie_init_phy(struct fu740_pcie *afp)
+{
+ /* Enable phy cr_para_sel interfaces */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_SEL);
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_SEL);
+
+ /*
+ * Wait 10 cr_para cycles to guarantee that the registers are ready
+ * to be edited.
+ */
+ ndelay(10);
+
+ /* Set PHY AC termination mode */
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+}
+
+static int fu740_pcie_start_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+ struct fu740_pcie *afp = dev_get_drvdata(dev);
+
+ /* Enable LTSSM */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE);
+ return 0;
+}
+
+static int fu740_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct fu740_pcie *afp = to_fu740_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* Power on reset */
+ fu740_pcie_drive_reset(afp);
+
+ /* Enable pcieauxclk */
+ ret = clk_prepare_enable(afp->pcie_aux);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ return ret;
+ }
+
+ /*
+ * Assert hold_phy_rst (hold the controller LTSSM in reset after
+ * power_up_rst_n for register programming with cr_para)
+ */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+
+ /* Deassert power_up_rst_n */
+ ret = reset_control_deassert(afp->rst);
+ if (ret) {
+ dev_err(dev, "unable to deassert pcie_power_up_rst_n\n");
+ return ret;
+ }
+
+ fu740_pcie_init_phy(afp);
+
+ /* Disable pcieauxclk */
+ clk_disable_unprepare(afp->pcie_aux);
+ /* Clear hold_phy_rst */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+ /* Enable pcieauxclk */
+ ret = clk_prepare_enable(afp->pcie_aux);
+ /* Set RC mode */
+ writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
+ .host_init = fu740_pcie_host_init,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = fu740_pcie_start_link,
+};
+
+static int fu740_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct fu740_pcie *afp;
+
+ afp = devm_kzalloc(dev, sizeof(*afp), GFP_KERNEL);
+ if (!afp)
+ return -ENOMEM;
+ pci = &afp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &fu740_pcie_host_ops;
+
+ /* SiFive specific region: mgmt */
+ afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
+ if (IS_ERR(afp->mgmt_base))
+ return PTR_ERR(afp->mgmt_base);
+
+ /* Fetch GPIOs */
+ afp->reset = devm_gpiod_get_optional(dev, "reset-gpios", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->reset))
+ return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n");
+
+ afp->pwren = devm_gpiod_get_optional(dev, "pwren-gpios", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->pwren))
+ return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n");
+
+ /* Fetch clocks */
+ afp->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(afp->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(afp->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+
+ /* Fetch reset */
+ afp->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(afp->rst))
+ return dev_err_probe(dev, PTR_ERR(afp->rst), "unable to get reset\n");
+
+ platform_set_drvdata(pdev, afp);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static void fu740_pcie_shutdown(struct platform_device *pdev)
+{
+ struct fu740_pcie *afp = platform_get_drvdata(pdev);
+
+ /* Bring down link, so bootloader gets clean state in case of reboot */
+ fu740_pcie_assert_reset(afp);
+}
+
+static const struct of_device_id fu740_pcie_of_match[] = {
+ { .compatible = "sifive,fu740-pcie", },
+ {},
+};
+
+static struct platform_driver fu740_pcie_driver = {
+ .driver = {
+ .name = "fu740-pcie",
+ .of_match_table = fu740_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fu740_pcie_probe,
+ .shutdown = fu740_pcie_shutdown,
+};
+
+builtin_platform_driver(fu740_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 6d9e1b2b8f7b..8fc5960faf28 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -10,15 +10,10 @@
*/
#include <linux/interrupt.h>
#include <linux/init.h>
-#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
-#include <linux/of_pci.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
-#include <linux/regmap.h>
#include "../../pci.h"
#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
@@ -104,8 +99,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
return 0;
}
-struct pci_ecam_ops hisi_pcie_ops = {
- .bus_shift = 20,
+const struct pci_ecam_ops hisi_pcie_ops = {
.init = hisi_pcie_init,
.pci_ops = {
.map_bus = hisi_pcie_map_bus,
@@ -118,229 +112,6 @@ struct pci_ecam_ops hisi_pcie_ops = {
#ifdef CONFIG_PCI_HISI
-#include "pcie-designware.h"
-
-#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
-#define PCIE_HIP06_CTRL_OFF 0x1000
-#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c)
-#define PCIE_LTSSM_LINKUP_STATE 0x11
-#define PCIE_LTSSM_STATE_MASK 0x3F
-
-#define to_hisi_pcie(x) dev_get_drvdata((x)->dev)
-
-struct hisi_pcie;
-
-struct pcie_soc_ops {
- int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie);
-};
-
-struct hisi_pcie {
- struct dw_pcie *pci;
- struct regmap *subctrl;
- u32 port_id;
- const struct pcie_soc_ops *soc_ops;
-};
-
-/* HipXX PCIe host only supports 32-bit config access */
-static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- u32 reg;
- u32 reg_val;
- void *walker = &reg_val;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- walker += (where & 0x3);
- reg = where & ~0x3;
- reg_val = dw_pcie_readl_dbi(pci, reg);
-
- if (size == 1)
- *val = *(u8 __force *) walker;
- else if (size == 2)
- *val = *(u16 __force *) walker;
- else if (size == 4)
- *val = reg_val;
- else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-/* HipXX PCIe host only supports 32-bit config access */
-static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
- u32 val)
-{
- u32 reg_val;
- u32 reg;
- void *walker = &reg_val;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- walker += (where & 0x3);
- reg = where & ~0x3;
- if (size == 4)
- dw_pcie_writel_dbi(pci, reg, val);
- else if (size == 2) {
- reg_val = dw_pcie_readl_dbi(pci, reg);
- *(u16 __force *) walker = val;
- dw_pcie_writel_dbi(pci, reg, reg_val);
- } else if (size == 1) {
- reg_val = dw_pcie_readl_dbi(pci, reg);
- *(u8 __force *) walker = val;
- dw_pcie_writel_dbi(pci, reg, reg_val);
- } else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie)
-{
- u32 val;
-
- regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG +
- 0x100 * hisi_pcie->port_id, &val);
-
- return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
-}
-
-static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
-{
- struct dw_pcie *pci = hisi_pcie->pci;
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4);
-
- return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
-}
-
-static int hisi_pcie_link_up(struct dw_pcie *pci)
-{
- struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci);
-
- return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie);
-}
-
-static const struct dw_pcie_host_ops hisi_pcie_host_ops = {
- .rd_own_conf = hisi_pcie_cfg_read,
- .wr_own_conf = hisi_pcie_cfg_write,
-};
-
-static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = hisi_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
- u32 port_id;
-
- if (of_property_read_u32(dev->of_node, "port-id", &port_id)) {
- dev_err(dev, "failed to read port-id\n");
- return -EINVAL;
- }
- if (port_id > 3) {
- dev_err(dev, "Invalid port-id: %d\n", port_id);
- return -EINVAL;
- }
- hisi_pcie->port_id = port_id;
-
- pp->ops = &hisi_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct dw_pcie_ops dw_pcie_ops = {
- .link_up = hisi_pcie_link_up,
-};
-
-static int hisi_pcie_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci;
- struct hisi_pcie *hisi_pcie;
- struct resource *reg;
- int ret;
-
- hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
- if (!hisi_pcie)
- return -ENOMEM;
-
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
- return -ENOMEM;
-
- pci->dev = dev;
- pci->ops = &dw_pcie_ops;
-
- hisi_pcie->pci = pci;
-
- hisi_pcie->soc_ops = of_device_get_match_data(dev);
-
- hisi_pcie->subctrl =
- syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
- if (IS_ERR(hisi_pcie->subctrl)) {
- dev_err(dev, "cannot get subctrl base\n");
- return PTR_ERR(hisi_pcie->subctrl);
- }
-
- reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
- platform_set_drvdata(pdev, hisi_pcie);
-
- ret = hisi_add_pcie_port(hisi_pcie, pdev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static struct pcie_soc_ops hip05_ops = {
- &hisi_pcie_link_up_hip05
-};
-
-static struct pcie_soc_ops hip06_ops = {
- &hisi_pcie_link_up_hip06
-};
-
-static const struct of_device_id hisi_pcie_of_match[] = {
- {
- .compatible = "hisilicon,hip05-pcie",
- .data = (void *) &hip05_ops,
- },
- {
- .compatible = "hisilicon,hip06-pcie",
- .data = (void *) &hip06_ops,
- },
- {},
-};
-
-static struct platform_driver hisi_pcie_driver = {
- .probe = hisi_pcie_probe,
- .driver = {
- .name = "hisi-pcie",
- .of_match_table = hisi_pcie_of_match,
- .suppress_bind_attrs = true,
- },
-};
-builtin_platform_driver(hisi_pcie_driver);
-
-static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct pci_ecam_ops *ops;
-
- ops = (struct pci_ecam_ops *)of_device_get_match_data(dev);
- return pci_host_common_probe(pdev, ops);
-}
-
static int hisi_pcie_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -362,8 +133,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
return 0;
}
-struct pci_ecam_ops hisi_pcie_platform_ops = {
- .bus_shift = 20,
+static const struct pci_ecam_ops hisi_pcie_platform_ops = {
.init = hisi_pcie_platform_init,
.pci_ops = {
.map_bus = hisi_pcie_map_bus,
@@ -375,17 +145,17 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
{
.compatible = "hisilicon,hip06-pcie-ecam",
- .data = (void *) &hisi_pcie_platform_ops,
+ .data = &hisi_pcie_platform_ops,
},
{
.compatible = "hisilicon,hip07-pcie-ecam",
- .data = (void *) &hisi_pcie_platform_ops,
+ .data = &hisi_pcie_platform_ops,
},
{},
};
static struct platform_driver hisi_pcie_almost_ecam_driver = {
- .probe = hisi_pcie_almost_ecam_probe,
+ .probe = pci_host_common_probe,
.driver = {
.name = "hisi-pcie-almost-ecam",
.of_match_table = hisi_pcie_almost_ecam_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 811b5c6d62ea..86f9d16c50d7 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -122,32 +122,37 @@ static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
histb_pcie_dbi_w_mode(&pci->pp, false);
}
-static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where,
- int size, u32 *val)
+static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- histb_pcie_dbi_r_mode(pp, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- histb_pcie_dbi_r_mode(pp, false);
+ if (PCI_SLOT(devfn)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
- return ret;
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where,
- int size, u32 val)
+static int histb_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- histb_pcie_dbi_w_mode(pp, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- histb_pcie_dbi_w_mode(pp, false);
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
- return ret;
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
+static struct pci_ops histb_pci_ops = {
+ .read = histb_pcie_rd_own_conf,
+ .write = histb_pcie_wr_own_conf,
+};
+
static int histb_pcie_link_up(struct dw_pcie *pci)
{
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -164,47 +169,37 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int histb_pcie_establish_link(struct pcie_port *pp)
+static int histb_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
- if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "Link already up\n");
- return 0;
- }
-
- /* PCIe RC work mode */
- regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
- regval &= ~PCIE_DEVICE_TYPE_MASK;
- regval |= PCIE_WM_RC;
- histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
-
- /* setup root complex */
- dw_pcie_setup_rc(pp);
-
/* assert LTSSM enable */
regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
regval |= PCIE_APP_LTSSM_ENABLE;
histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static int histb_pcie_host_init(struct pcie_port *pp)
{
- histb_pcie_establish_link(pp);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+
+ pp->bridge->ops = &histb_pci_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ /* PCIe RC work mode */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ regval &= ~PCIE_DEVICE_TYPE_MASK;
+ regval |= PCIE_WM_RC;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
return 0;
}
static const struct dw_pcie_host_ops histb_pcie_host_ops = {
- .rd_own_conf = histb_pcie_rd_own_conf,
- .wr_own_conf = histb_pcie_wr_own_conf,
.host_init = histb_pcie_host_init,
};
@@ -297,6 +292,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.read_dbi = histb_pcie_read_dbi,
.write_dbi = histb_pcie_write_dbi,
.link_up = histb_pcie_link_up,
+ .start_link = histb_pcie_start_link,
};
static int histb_pcie_probe(struct platform_device *pdev)
@@ -304,7 +300,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
struct histb_pcie *hipcie;
struct dw_pcie *pci;
struct pcie_port *pp;
- struct resource *res;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
enum of_gpio_flags of_flags;
@@ -324,15 +319,13 @@ static int histb_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
- hipcie->ctrl = devm_ioremap_resource(dev, res);
+ hipcie->ctrl = devm_platform_ioremap_resource_byname(pdev, "control");
if (IS_ERR(hipcie->ctrl)) {
dev_err(dev, "cannot get control reg base\n");
return PTR_ERR(hipcie->ctrl);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi");
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc-dbi");
if (IS_ERR(pci->dbi_base)) {
dev_err(dev, "cannot get rc-dbi base\n");
return PTR_ERR(pci->dbi_base);
@@ -400,14 +393,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
return PTR_ERR(hipcie->bus_reset);
}
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0) {
- dev_err(dev, "Failed to get MSI IRQ\n");
- return pp->msi_irq;
- }
- }
-
hipcie->phy = devm_phy_get(dev, "phy");
if (IS_ERR(hipcie->phy)) {
dev_info(dev, "no pcie-phy found\n");
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
new file mode 100644
index 000000000000..d15cf35fa7f2
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Intel Gateway SoCs
+ *
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/pci_regs.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1)
+#define PORT_AFR_N_FTS_GEN3 180
+#define PORT_AFR_N_FTS_GEN4 196
+
+/* PCIe Application logic Registers */
+#define PCIE_APP_CCR 0x10
+#define PCIE_APP_CCR_LTSSM_ENABLE BIT(0)
+
+#define PCIE_APP_MSG_CR 0x30
+#define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0)
+
+#define PCIE_APP_PMC 0x44
+#define PCIE_APP_PMC_IN_L2 BIT(20)
+
+#define PCIE_APP_IRNEN 0xF4
+#define PCIE_APP_IRNCR 0xF8
+#define PCIE_APP_IRN_AER_REPORT BIT(0)
+#define PCIE_APP_IRN_PME BIT(2)
+#define PCIE_APP_IRN_RX_VDM_MSG BIT(4)
+#define PCIE_APP_IRN_PM_TO_ACK BIT(9)
+#define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11)
+#define PCIE_APP_IRN_BW_MGT BIT(12)
+#define PCIE_APP_IRN_INTA BIT(13)
+#define PCIE_APP_IRN_INTB BIT(14)
+#define PCIE_APP_IRN_INTC BIT(15)
+#define PCIE_APP_IRN_INTD BIT(16)
+#define PCIE_APP_IRN_MSG_LTR BIT(18)
+#define PCIE_APP_IRN_SYS_ERR_RC BIT(29)
+#define PCIE_APP_INTX_OFST 12
+
+#define PCIE_APP_IRN_INT \
+ (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \
+ PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \
+ PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \
+ PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \
+ PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
+ PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
+
+#define BUS_IATU_OFFSET SZ_256M
+#define RESET_INTERVAL_MS 100
+
+struct intel_pcie_soc {
+ unsigned int pcie_ver;
+};
+
+struct intel_pcie_port {
+ struct dw_pcie pci;
+ void __iomem *app_base;
+ struct gpio_desc *reset_gpio;
+ u32 rst_intrvl;
+ struct clk *core_clk;
+ struct reset_control *core_rst;
+ struct phy *phy;
+};
+
+static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
+{
+ u32 old;
+
+ old = readl(base + ofs);
+ val = (old & ~mask) | (val & mask);
+
+ if (val != old)
+ writel(val, base + ofs);
+}
+
+static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+{
+ writel(val, lpp->app_base + ofs);
+}
+
+static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(lpp->app_base, ofs, mask, val);
+}
+
+static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs)
+{
+ return dw_pcie_readl_dbi(&lpp->pci, ofs);
+}
+
+static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+{
+ dw_pcie_writel_dbi(&lpp->pci, ofs, val);
+}
+
+static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val);
+}
+
+static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp)
+{
+ pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
+ PCIE_APP_CCR_LTSSM_ENABLE);
+}
+
+static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp)
+{
+ pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
+}
+
+static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
+{
+ u32 val;
+ u8 offset = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
+
+ val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL);
+
+ val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC);
+ pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val);
+}
+
+static void intel_pcie_init_n_fts(struct dw_pcie *pci)
+{
+ switch (pci->link_gen) {
+ case 3:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
+ break;
+ case 4:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN4;
+ break;
+ default:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT;
+ break;
+ }
+ pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
+}
+
+static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
+{
+ struct device *dev = lpp->pci.dev;
+ int ret;
+
+ lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(lpp->reset_gpio)) {
+ ret = PTR_ERR(lpp->reset_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* Make initial reset last for 100us */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp)
+{
+ reset_control_assert(lpp->core_rst);
+}
+
+static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp)
+{
+ /*
+ * One micro-second delay to make sure the reset pulse
+ * wide enough so that core reset is clean.
+ */
+ udelay(1);
+ reset_control_deassert(lpp->core_rst);
+
+ /*
+ * Some SoC core reset also reset PHY, more delay needed
+ * to make sure the reset process is done.
+ */
+ usleep_range(1000, 2000);
+}
+
+static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp)
+{
+ gpiod_set_value_cansleep(lpp->reset_gpio, 1);
+}
+
+static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp)
+{
+ msleep(lpp->rst_intrvl);
+ gpiod_set_value_cansleep(lpp->reset_gpio, 0);
+}
+
+static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp)
+{
+ pcie_app_wr(lpp, PCIE_APP_IRNEN, 0);
+ pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
+}
+
+static int intel_pcie_get_resources(struct platform_device *pdev)
+{
+ struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &lpp->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ lpp->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(lpp->core_clk)) {
+ ret = PTR_ERR(lpp->core_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clks: %d\n", ret);
+ return ret;
+ }
+
+ lpp->core_rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(lpp->core_rst)) {
+ ret = PTR_ERR(lpp->core_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get resets: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "reset-assert-ms",
+ &lpp->rst_intrvl);
+ if (ret)
+ lpp->rst_intrvl = RESET_INTERVAL_MS;
+
+ lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(lpp->app_base))
+ return PTR_ERR(lpp->app_base);
+
+ lpp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(lpp->phy)) {
+ ret = PTR_ERR(lpp->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't get pcie-phy: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
+{
+ u32 value;
+ int ret;
+ struct dw_pcie *pci = &lpp->pci;
+
+ if (pci->link_gen < 3)
+ return 0;
+
+ /* Send PME_TURN_OFF message */
+ pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
+ PCIE_APP_MSG_XMT_PM_TURNOFF);
+
+ /* Read PMC status and wait for falling into L2 link state */
+ ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value,
+ value & PCIE_APP_PMC_IN_L2, 20,
+ jiffies_to_usecs(5 * HZ));
+ if (ret)
+ dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n");
+
+ return ret;
+}
+
+static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
+{
+ if (dw_pcie_link_up(&lpp->pci))
+ intel_pcie_wait_l2(lpp);
+
+ /* Put endpoint device in reset state */
+ intel_pcie_device_rst_assert(lpp);
+ pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
+}
+
+static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
+{
+ int ret;
+ struct dw_pcie *pci = &lpp->pci;
+
+ intel_pcie_core_rst_assert(lpp);
+ intel_pcie_device_rst_assert(lpp);
+
+ ret = phy_init(lpp->phy);
+ if (ret)
+ return ret;
+
+ intel_pcie_core_rst_deassert(lpp);
+
+ ret = clk_prepare_enable(lpp->core_clk);
+ if (ret) {
+ dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret);
+ goto clk_err;
+ }
+
+ pci->atu_base = pci->dbi_base + 0xC0000;
+
+ intel_pcie_ltssm_disable(lpp);
+ intel_pcie_link_setup(lpp);
+ intel_pcie_init_n_fts(pci);
+ dw_pcie_setup_rc(&pci->pp);
+ dw_pcie_upconfig_setup(pci);
+
+ intel_pcie_device_rst_deassert(lpp);
+ intel_pcie_ltssm_enable(lpp);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto app_init_err;
+
+ /* Enable integrated interrupts */
+ pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
+ PCIE_APP_IRN_INT);
+
+ return 0;
+
+app_init_err:
+ clk_disable_unprepare(lpp->core_clk);
+clk_err:
+ intel_pcie_core_rst_assert(lpp);
+ phy_exit(lpp->phy);
+
+ return ret;
+}
+
+static void __intel_pcie_remove(struct intel_pcie_port *lpp)
+{
+ intel_pcie_core_irq_disable(lpp);
+ intel_pcie_turn_off(lpp);
+ clk_disable_unprepare(lpp->core_clk);
+ intel_pcie_core_rst_assert(lpp);
+ phy_exit(lpp->phy);
+}
+
+static int intel_pcie_remove(struct platform_device *pdev)
+{
+ struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
+ struct pcie_port *pp = &lpp->pci.pp;
+
+ dw_pcie_host_deinit(pp);
+ __intel_pcie_remove(lpp);
+
+ return 0;
+}
+
+static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
+{
+ struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+ int ret;
+
+ intel_pcie_core_irq_disable(lpp);
+ ret = intel_pcie_wait_l2(lpp);
+ if (ret)
+ return ret;
+
+ phy_exit(lpp->phy);
+ clk_disable_unprepare(lpp->core_clk);
+ return ret;
+}
+
+static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
+{
+ struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+
+ return intel_pcie_host_setup(lpp);
+}
+
+static int intel_pcie_rc_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev);
+
+ return intel_pcie_host_setup(lpp);
+}
+
+static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
+{
+ return cpu_addr + BUS_IATU_OFFSET;
+}
+
+static const struct dw_pcie_ops intel_pcie_ops = {
+ .cpu_addr_fixup = intel_pcie_cpu_addr,
+};
+
+static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
+ .host_init = intel_pcie_rc_init,
+};
+
+static const struct intel_pcie_soc pcie_data = {
+ .pcie_ver = 0x520A,
+};
+
+static int intel_pcie_probe(struct platform_device *pdev)
+{
+ const struct intel_pcie_soc *data;
+ struct device *dev = &pdev->dev;
+ struct intel_pcie_port *lpp;
+ struct pcie_port *pp;
+ struct dw_pcie *pci;
+ int ret;
+
+ lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL);
+ if (!lpp)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lpp);
+ pci = &lpp->pci;
+ pci->dev = dev;
+ pp = &pci->pp;
+
+ ret = intel_pcie_get_resources(pdev);
+ if (ret)
+ return ret;
+
+ ret = intel_pcie_ep_rst_init(lpp);
+ if (ret)
+ return ret;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ pci->ops = &intel_pcie_ops;
+ pci->version = data->pcie_ver;
+ pp->ops = &intel_pcie_dw_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "Cannot initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops intel_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
+ intel_pcie_resume_noirq)
+};
+
+static const struct of_device_id of_intel_pcie_match[] = {
+ { .compatible = "intel,lgm-pcie", .data = &pcie_data },
+ {}
+};
+
+static struct platform_driver intel_pcie_driver = {
+ .probe = intel_pcie_probe,
+ .remove = intel_pcie_remove,
+ .driver = {
+ .name = "intel-gw-pcie",
+ .of_match_table = of_intel_pcie_match,
+ .pm = &intel_pcie_pm_ops,
+ },
+};
+builtin_platform_driver(intel_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
new file mode 100644
index 000000000000..1ac29a6eef22
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Intel Keem Bay
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "pcie-designware.h"
+
+/* PCIE_REGS_APB_SLV Registers */
+#define PCIE_REGS_PCIE_CFG 0x0004
+#define PCIE_DEVICE_TYPE BIT(8)
+#define PCIE_RSTN BIT(0)
+#define PCIE_REGS_PCIE_APP_CNTRL 0x0008
+#define APP_LTSSM_ENABLE BIT(0)
+#define PCIE_REGS_INTERRUPT_ENABLE 0x0028
+#define MSI_CTRL_INT_EN BIT(8)
+#define EDMA_INT_EN GENMASK(7, 0)
+#define PCIE_REGS_INTERRUPT_STATUS 0x002c
+#define MSI_CTRL_INT BIT(8)
+#define PCIE_REGS_PCIE_SII_PM_STATE 0x00b0
+#define SMLH_LINK_UP BIT(19)
+#define RDLH_LINK_UP BIT(8)
+#define PCIE_REGS_PCIE_SII_LINK_UP (SMLH_LINK_UP | RDLH_LINK_UP)
+#define PCIE_REGS_PCIE_PHY_CNTL 0x0164
+#define PHY0_SRAM_BYPASS BIT(8)
+#define PCIE_REGS_PCIE_PHY_STAT 0x0168
+#define PHY0_MPLLA_STATE BIT(1)
+#define PCIE_REGS_LJPLL_STA 0x016c
+#define LJPLL_LOCK BIT(0)
+#define PCIE_REGS_LJPLL_CNTRL_0 0x0170
+#define LJPLL_EN BIT(29)
+#define LJPLL_FOUT_EN GENMASK(24, 21)
+#define PCIE_REGS_LJPLL_CNTRL_2 0x0178
+#define LJPLL_REF_DIV GENMASK(17, 12)
+#define LJPLL_FB_DIV GENMASK(11, 0)
+#define PCIE_REGS_LJPLL_CNTRL_3 0x017c
+#define LJPLL_POST_DIV3A GENMASK(24, 22)
+#define LJPLL_POST_DIV2A GENMASK(18, 16)
+
+#define PERST_DELAY_US 1000
+#define AUX_CLK_RATE_HZ 24000000
+
+struct keembay_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ enum dw_pcie_device_mode mode;
+
+ struct clk *clk_master;
+ struct clk *clk_aux;
+ struct gpio_desc *reset;
+};
+
+struct keembay_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static void keembay_ep_reset_assert(struct keembay_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_ep_reset_deassert(struct keembay_pcie *pcie)
+{
+ /*
+ * Ensure that PERST# is asserted for a minimum of 100ms.
+ *
+ * For more details, refer to PCI Express Card Electromechanical
+ * Specification Revision 1.1, Table-2.4.
+ */
+ msleep(100);
+
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
+{
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+ if (enable)
+ val |= APP_LTSSM_ENABLE;
+ else
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+}
+
+static int keembay_pcie_link_up(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_SII_PM_STATE);
+
+ return (val & PCIE_REGS_PCIE_SII_LINK_UP) == PCIE_REGS_PCIE_SII_LINK_UP;
+}
+
+static int keembay_pcie_start_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+ int ret;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE)
+ return 0;
+
+ keembay_pcie_ltssm_set(pcie, false);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_PCIE_PHY_STAT,
+ val, val & PHY0_MPLLA_STATE, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(pci->dev, "MPLLA is not locked\n");
+ return ret;
+ }
+
+ keembay_pcie_ltssm_set(pcie, true);
+
+ return 0;
+}
+
+static void keembay_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ keembay_pcie_ltssm_set(pcie, false);
+}
+
+static const struct dw_pcie_ops keembay_pcie_ops = {
+ .link_up = keembay_pcie_link_up,
+ .start_link = keembay_pcie_start_link,
+ .stop_link = keembay_pcie_stop_link,
+};
+
+static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+static int keembay_pcie_probe_clocks(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+
+ pcie->clk_master = keembay_pcie_probe_clock(dev, "master", 0);
+ if (IS_ERR(pcie->clk_master))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_master),
+ "Failed to enable master clock");
+
+ pcie->clk_aux = keembay_pcie_probe_clock(dev, "aux", AUX_CLK_RATE_HZ);
+ if (IS_ERR(pcie->clk_aux))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_aux),
+ "Failed to enable auxiliary clock");
+
+ return 0;
+}
+
+/*
+ * Initialize the internal PCIe PLL in Host mode.
+ * See the following sections in Keem Bay data book,
+ * (1) 6.4.6.1 PCIe Subsystem Example Initialization,
+ * (2) 6.8 PCIe Low Jitter PLL for Ref Clk Generation.
+ */
+static int keembay_pcie_pll_init(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+ int ret;
+
+ val = FIELD_PREP(LJPLL_REF_DIV, 0) | FIELD_PREP(LJPLL_FB_DIV, 0x32);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_2);
+
+ val = FIELD_PREP(LJPLL_POST_DIV3A, 0x2) |
+ FIELD_PREP(LJPLL_POST_DIV2A, 0x2);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_3);
+
+ val = FIELD_PREP(LJPLL_EN, 0x1) | FIELD_PREP(LJPLL_FOUT_EN, 0xc);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_0);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_LJPLL_STA,
+ val, val & LJPLL_LOCK, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret)
+ dev_err(pci->dev, "Low jitter PLL is not locked\n");
+
+ return ret;
+}
+
+static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 val, mask, status;
+ struct pcie_port *pp;
+
+ /*
+ * Keem Bay PCIe Controller provides an additional IP logic on top of
+ * standard DWC IP to clear MSI IRQ by writing '1' to the respective
+ * bit of the status register.
+ *
+ * So, a chained irq handler is defined to handle this additional
+ * IP logic.
+ */
+
+ chained_irq_enter(chip, desc);
+
+ pp = &pcie->pci.pp;
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ mask = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ status = val & mask;
+
+ if (status & MSI_CTRL_INT) {
+ dw_handle_msi_irq(pp);
+ writel(status, pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int keembay_pcie_setup_msi_irq(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "pcie");
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, keembay_pcie_msi_irq_handler,
+ pcie);
+
+ return 0;
+}
+
+static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ writel(EDMA_INT_EN, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+}
+
+static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ /* Legacy interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "Legacy IRQ is not supported\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type %d\n", type);
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features keembay_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .align = SZ_16K,
+};
+
+static const struct pci_epc_features *
+keembay_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &keembay_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
+ .ep_init = keembay_pcie_ep_init,
+ .raise_irq = keembay_pcie_ep_raise_irq,
+ .get_features = keembay_pcie_get_features,
+};
+
+static const struct dw_pcie_host_ops keembay_pcie_host_ops = {
+};
+
+static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ u32 val;
+ int ret;
+
+ pp->ops = &keembay_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
+
+ ret = keembay_pcie_setup_msi_irq(pcie);
+ if (ret)
+ return ret;
+
+ pcie->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ ret = keembay_pcie_probe_clocks(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+ val |= PHY0_SRAM_BYPASS;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+
+ writel(PCIE_DEVICE_TYPE, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+
+ ret = keembay_pcie_pll_init(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ writel(val | PCIE_RSTN, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ keembay_ep_reset_deassert(pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ keembay_ep_reset_assert(pcie);
+ dev_err(dev, "Failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ val |= MSI_CTRL_INT_EN;
+ writel(val, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static int keembay_pcie_probe(struct platform_device *pdev)
+{
+ const struct keembay_pcie_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct keembay_pcie *pcie;
+ struct dw_pcie *pci;
+ enum dw_pcie_device_mode mode;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &keembay_pcie_ops;
+
+ pcie->mode = mode;
+
+ pcie->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(pcie->apb_base))
+ return PTR_ERR(pcie->apb_base);
+
+ platform_set_drvdata(pdev, pcie);
+
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_HOST))
+ return -ENODEV;
+
+ return keembay_pcie_add_pcie_port(pcie, pdev);
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_EP))
+ return -ENODEV;
+
+ pci->ep.ops = &keembay_pcie_ep_ops;
+ return dw_pcie_ep_init(&pci->ep);
+ default:
+ dev_err(dev, "Invalid device type %d\n", pcie->mode);
+ return -ENODEV;
+ }
+}
+
+static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct keembay_pcie_of_data keembay_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id keembay_pcie_of_match[] = {
+ {
+ .compatible = "intel,keembay-pcie",
+ .data = &keembay_pcie_rc_of_data,
+ },
+ {
+ .compatible = "intel,keembay-pcie-ep",
+ .data = &keembay_pcie_ep_of_data,
+ },
+ {}
+};
+
+static struct platform_driver keembay_pcie_driver = {
+ .driver = {
+ .name = "keembay-pcie",
+ .of_match_table = keembay_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = keembay_pcie_probe,
+};
+builtin_platform_driver(keembay_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index c19617a912bd..095afbccf9c1 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -3,21 +3,23 @@
* PCIe host controller driver for Kirin Phone SoCs
*
* Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
- * http://www.huawei.com
+ * https://www.huawei.com
*
* Author: Xiaowei Song <[email protected]>
*/
-#include <linux/compiler.h>
#include <linux/clk.h>
+#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
@@ -28,26 +30,16 @@
#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
-#define REF_CLK_FREQ 100000000
-
/* PCIe ELBI registers */
#define SOC_PCIECTRL_CTRL0_ADDR 0x000
#define SOC_PCIECTRL_CTRL1_ADDR 0x004
-#define SOC_PCIEPHY_CTRL2_ADDR 0x008
-#define SOC_PCIEPHY_CTRL3_ADDR 0x00c
#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
/* info located in APB */
#define PCIE_APP_LTSSM_ENABLE 0x01c
-#define PCIE_APB_PHY_CTRL0 0x0
-#define PCIE_APB_PHY_CTRL1 0x4
#define PCIE_APB_PHY_STATUS0 0x400
#define PCIE_LINKUP_ENABLE (0x8020)
#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
-#define PIPE_CLK_STABLE (0x1 << 19)
-#define PHY_REF_PAD_BIT (0x1 << 8)
-#define PHY_PWR_DOWN_BIT (0x1 << 22)
-#define PHY_RST_ACK_BIT (0x1 << 16)
/* info located in sysctrl */
#define SCTRL_PCIE_CMOS_OFFSET 0x60
@@ -60,17 +52,70 @@
#define PCIE_DEBOUNCE_PARAM 0xF0F400
#define PCIE_OE_BYPASS (0x3 << 28)
+/*
+ * Max number of connected PCI slots at an external PCI bridge
+ *
+ * This is used on HiKey 970, which has a PEX 8606 bridge with 4 connected
+ * lanes (lane 0 upstream, and the other three lanes, one connected to an
+ * in-board Ethernet adapter and the other two connected to M.2 and mini
+ * PCI slots.
+ *
+ * Each slot has a different clock source and uses a separate PERST# pin.
+ */
+#define MAX_PCI_SLOTS 3
+
+enum pcie_kirin_phy_type {
+ PCIE_KIRIN_INTERNAL_PHY,
+ PCIE_KIRIN_EXTERNAL_PHY
+};
+
+struct kirin_pcie {
+ enum pcie_kirin_phy_type type;
+
+ struct dw_pcie *pci;
+ struct regmap *apb;
+ struct phy *phy;
+ void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
+
+ /* DWC PERST# */
+ int gpio_id_dwc_perst;
+
+ /* Per-slot PERST# */
+ int num_slots;
+ int gpio_id_reset[MAX_PCI_SLOTS];
+ const char *reset_names[MAX_PCI_SLOTS];
+
+ /* Per-slot clkreq */
+ int n_gpio_clkreq;
+ int gpio_id_clkreq[MAX_PCI_SLOTS];
+ const char *clkreq_names[MAX_PCI_SLOTS];
+};
+
+/*
+ * Kirin 960 PHY. Can't be split into a PHY driver without changing the
+ * DT schema.
+ */
+
+#define REF_CLK_FREQ 100000000
+
+/* PHY info located in APB */
+#define PCIE_APB_PHY_CTRL0 0x0
+#define PCIE_APB_PHY_CTRL1 0x4
+#define PCIE_APB_PHY_STATUS0 0x400
+#define PIPE_CLK_STABLE BIT(19)
+#define PHY_REF_PAD_BIT BIT(8)
+#define PHY_PWR_DOWN_BIT BIT(22)
+#define PHY_RST_ACK_BIT BIT(16)
+
/* peri_crg ctrl */
#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
/* Time for delay */
-#define REF_2_PERST_MIN 20000
+#define REF_2_PERST_MIN 21000
#define REF_2_PERST_MAX 25000
#define PERST_2_ACCESS_MIN 10000
#define PERST_2_ACCESS_MAX 12000
-#define LINK_WAIT_MIN 900
-#define LINK_WAIT_MAX 1000
#define PIPE_CLK_WAIT_MIN 550
#define PIPE_CLK_WAIT_MAX 600
#define TIME_CMOS_MIN 100
@@ -78,128 +123,101 @@
#define TIME_PHY_PD_MIN 10
#define TIME_PHY_PD_MAX 11
-struct kirin_pcie {
- struct dw_pcie *pci;
- void __iomem *apb_base;
- void __iomem *phy_base;
+struct hi3660_pcie_phy {
+ struct device *dev;
+ void __iomem *base;
struct regmap *crgctrl;
struct regmap *sysctrl;
struct clk *apb_sys_clk;
struct clk *apb_phy_clk;
struct clk *phy_ref_clk;
- struct clk *pcie_aclk;
- struct clk *pcie_aux_clk;
- int gpio_id_reset;
+ struct clk *aclk;
+ struct clk *aux_clk;
};
-/* Registers in PCIeCTRL */
-static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
- u32 val, u32 reg)
-{
- writel(val, kirin_pcie->apb_base + reg);
-}
-
-static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
-{
- return readl(kirin_pcie->apb_base + reg);
-}
-
/* Registers in PCIePHY */
-static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
+static inline void kirin_apb_phy_writel(struct hi3660_pcie_phy *hi3660_pcie_phy,
u32 val, u32 reg)
{
- writel(val, kirin_pcie->phy_base + reg);
+ writel(val, hi3660_pcie_phy->base + reg);
}
-static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+static inline u32 kirin_apb_phy_readl(struct hi3660_pcie_phy *hi3660_pcie_phy,
+ u32 reg)
{
- return readl(kirin_pcie->phy_base + reg);
+ return readl(hi3660_pcie_phy->base + reg);
}
-static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
- struct platform_device *pdev)
+static int hi3660_pcie_phy_get_clk(struct hi3660_pcie_phy *phy)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = phy->dev;
- kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
- if (IS_ERR(kirin_pcie->phy_ref_clk))
- return PTR_ERR(kirin_pcie->phy_ref_clk);
+ phy->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
+ if (IS_ERR(phy->phy_ref_clk))
+ return PTR_ERR(phy->phy_ref_clk);
- kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(kirin_pcie->pcie_aux_clk))
- return PTR_ERR(kirin_pcie->pcie_aux_clk);
+ phy->aux_clk = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(phy->aux_clk))
+ return PTR_ERR(phy->aux_clk);
- kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
- if (IS_ERR(kirin_pcie->apb_phy_clk))
- return PTR_ERR(kirin_pcie->apb_phy_clk);
+ phy->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
+ if (IS_ERR(phy->apb_phy_clk))
+ return PTR_ERR(phy->apb_phy_clk);
- kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
- if (IS_ERR(kirin_pcie->apb_sys_clk))
- return PTR_ERR(kirin_pcie->apb_sys_clk);
+ phy->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
+ if (IS_ERR(phy->apb_sys_clk))
+ return PTR_ERR(phy->apb_sys_clk);
- kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
- if (IS_ERR(kirin_pcie->pcie_aclk))
- return PTR_ERR(kirin_pcie->pcie_aclk);
+ phy->aclk = devm_clk_get(dev, "pcie_aclk");
+ if (IS_ERR(phy->aclk))
+ return PTR_ERR(phy->aclk);
return 0;
}
-static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
- struct platform_device *pdev)
+static int hi3660_pcie_phy_get_resource(struct hi3660_pcie_phy *phy)
{
- struct device *dev = &pdev->dev;
- struct resource *apb;
- struct resource *phy;
- struct resource *dbi;
-
- apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
- kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
- if (IS_ERR(kirin_pcie->apb_base))
- return PTR_ERR(kirin_pcie->apb_base);
-
- phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
- if (IS_ERR(kirin_pcie->phy_base))
- return PTR_ERR(kirin_pcie->phy_base);
-
- dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
- if (IS_ERR(kirin_pcie->pci->dbi_base))
- return PTR_ERR(kirin_pcie->pci->dbi_base);
-
- kirin_pcie->crgctrl =
- syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
- if (IS_ERR(kirin_pcie->crgctrl))
- return PTR_ERR(kirin_pcie->crgctrl);
-
- kirin_pcie->sysctrl =
- syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
- if (IS_ERR(kirin_pcie->sysctrl))
- return PTR_ERR(kirin_pcie->sysctrl);
+ struct device *dev = phy->dev;
+ struct platform_device *pdev;
+
+ /* registers */
+ pdev = container_of(dev, struct platform_device, dev);
+
+ phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(phy->base))
+ return PTR_ERR(phy->base);
+
+ phy->crgctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
+ if (IS_ERR(phy->crgctrl))
+ return PTR_ERR(phy->crgctrl);
+
+ phy->sysctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
+ if (IS_ERR(phy->sysctrl))
+ return PTR_ERR(phy->sysctrl);
return 0;
}
-static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
+static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
{
- struct device *dev = kirin_pcie->pci->dev;
+ struct device *dev = phy->dev;
u32 reg_val;
- reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
reg_val &= ~PHY_REF_PAD_BIT;
- kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
- reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL0);
reg_val &= ~PHY_PWR_DOWN_BIT;
- kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL0);
usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
- reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
reg_val &= ~PHY_RST_ACK_BIT;
- kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
- reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
if (reg_val & PIPE_CLK_STABLE) {
dev_err(dev, "PIPE clk is not stable\n");
return -EINVAL;
@@ -208,102 +226,274 @@ static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
return 0;
}
-static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
+static void hi3660_pcie_phy_oe_enable(struct hi3660_pcie_phy *phy)
{
u32 val;
- regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
+ regmap_read(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
val |= PCIE_DEBOUNCE_PARAM;
val &= ~PCIE_OE_BYPASS;
- regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
+ regmap_write(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
}
-static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
+static int hi3660_pcie_phy_clk_ctrl(struct hi3660_pcie_phy *phy, bool enable)
{
int ret = 0;
if (!enable)
goto close_clk;
- ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
+ ret = clk_set_rate(phy->phy_ref_clk, REF_CLK_FREQ);
if (ret)
return ret;
- ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
+ ret = clk_prepare_enable(phy->phy_ref_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
+ ret = clk_prepare_enable(phy->apb_sys_clk);
if (ret)
goto apb_sys_fail;
- ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
+ ret = clk_prepare_enable(phy->apb_phy_clk);
if (ret)
goto apb_phy_fail;
- ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
+ ret = clk_prepare_enable(phy->aclk);
if (ret)
goto aclk_fail;
- ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
+ ret = clk_prepare_enable(phy->aux_clk);
if (ret)
goto aux_clk_fail;
return 0;
close_clk:
- clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
+ clk_disable_unprepare(phy->aux_clk);
aux_clk_fail:
- clk_disable_unprepare(kirin_pcie->pcie_aclk);
+ clk_disable_unprepare(phy->aclk);
aclk_fail:
- clk_disable_unprepare(kirin_pcie->apb_phy_clk);
+ clk_disable_unprepare(phy->apb_phy_clk);
apb_phy_fail:
- clk_disable_unprepare(kirin_pcie->apb_sys_clk);
+ clk_disable_unprepare(phy->apb_sys_clk);
apb_sys_fail:
- clk_disable_unprepare(kirin_pcie->phy_ref_clk);
+ clk_disable_unprepare(phy->phy_ref_clk);
return ret;
}
-static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
+static int hi3660_pcie_phy_power_on(struct kirin_pcie *pcie)
{
+ struct hi3660_pcie_phy *phy = pcie->phy_priv;
int ret;
/* Power supply for Host */
- regmap_write(kirin_pcie->sysctrl,
+ regmap_write(phy->sysctrl,
SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
- kirin_pcie_oe_enable(kirin_pcie);
- ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
+ hi3660_pcie_phy_oe_enable(phy);
+
+ ret = hi3660_pcie_phy_clk_ctrl(phy, true);
if (ret)
return ret;
/* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
- regmap_write(kirin_pcie->sysctrl,
+ regmap_write(phy->sysctrl,
SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
- regmap_write(kirin_pcie->crgctrl,
+ regmap_write(phy->crgctrl,
CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
- regmap_write(kirin_pcie->sysctrl,
+ regmap_write(phy->sysctrl,
SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
- ret = kirin_pcie_phy_init(kirin_pcie);
+ ret = hi3660_pcie_phy_start(phy);
if (ret)
- goto close_clk;
+ goto disable_clks;
- /* perst assert Endpoint */
- if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
- usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
- ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
- if (ret)
- goto close_clk;
- usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+ return 0;
+disable_clks:
+ hi3660_pcie_phy_clk_ctrl(phy, false);
+ return ret;
+}
+
+static int hi3660_pcie_phy_init(struct platform_device *pdev,
+ struct kirin_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+ struct hi3660_pcie_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ pcie->phy_priv = phy;
+ phy->dev = dev;
+
+ /* registers */
+ pdev = container_of(dev, struct platform_device, dev);
+
+ ret = hi3660_pcie_phy_get_clk(phy);
+ if (ret)
+ return ret;
+
+ return hi3660_pcie_phy_get_resource(phy);
+}
+
+static int hi3660_pcie_phy_power_off(struct kirin_pcie *pcie)
+{
+ struct hi3660_pcie_phy *phy = pcie->phy_priv;
+
+ /* Drop power supply for Host */
+ regmap_write(phy->sysctrl, SCTRL_PCIE_CMOS_OFFSET, 0x00);
+
+ hi3660_pcie_phy_clk_ctrl(phy, false);
+
+ return 0;
+}
+
+/*
+ * The non-PHY part starts here
+ */
+
+static const struct regmap_config pcie_kirin_regmap_conf = {
+ .name = "kirin_pcie_apb",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ char name[32];
+ int ret, i;
+
+ /* This is an optional property */
+ ret = of_gpio_named_count(np, "hisilicon,clken-gpios");
+ if (ret < 0)
return 0;
+
+ if (ret > MAX_PCI_SLOTS) {
+ dev_err(dev, "Too many GPIO clock requests!\n");
+ return -EINVAL;
}
-close_clk:
- kirin_pcie_clk_ctrl(kirin_pcie, false);
+ pcie->n_gpio_clkreq = ret;
+
+ for (i = 0; i < pcie->n_gpio_clkreq; i++) {
+ pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
+ "hisilicon,clken-gpios", i);
+ if (pcie->gpio_id_clkreq[i] < 0)
+ return pcie->gpio_id_clkreq[i];
+
+ sprintf(name, "pcie_clkreq_%d", i);
+ pcie->clkreq_names[i] = devm_kstrdup_const(dev, name,
+ GFP_KERNEL);
+ if (!pcie->clkreq_names[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
+ struct platform_device *pdev,
+ struct device_node *node)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent, *child;
+ int ret, slot, i;
+ char name[32];
+
+ for_each_available_child_of_node(node, parent) {
+ for_each_available_child_of_node(parent, child) {
+ i = pcie->num_slots;
+
+ pcie->gpio_id_reset[i] = of_get_named_gpio(child,
+ "reset-gpios", 0);
+ if (pcie->gpio_id_reset[i] < 0)
+ continue;
+
+ pcie->num_slots++;
+ if (pcie->num_slots > MAX_PCI_SLOTS) {
+ dev_err(dev, "Too many PCI slots!\n");
+ ret = -EINVAL;
+ goto put_node;
+ }
+
+ ret = of_pci_get_devfn(child);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse devfn: %d\n", ret);
+ goto put_node;
+ }
+
+ slot = PCI_SLOT(ret);
+
+ sprintf(name, "pcie_perst_%d", slot);
+ pcie->reset_names[i] = devm_kstrdup_const(dev, name,
+ GFP_KERNEL);
+ if (!pcie->reset_names[i]) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
+ }
+ }
+
+ return 0;
+
+put_node:
+ of_node_put(child);
+ of_node_put(parent);
+ return ret;
+}
+
+static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *node = dev->of_node;
+ void __iomem *apb_base;
+ int ret;
+
+ apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(apb_base))
+ return PTR_ERR(apb_base);
+
+ kirin_pcie->apb = devm_regmap_init_mmio(dev, apb_base,
+ &pcie_kirin_regmap_conf);
+ if (IS_ERR(kirin_pcie->apb))
+ return PTR_ERR(kirin_pcie->apb);
+
+ /* pcie internal PERST# gpio */
+ kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
+ "reset-gpios", 0);
+ if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
+ dev_err(dev, "unable to get a valid gpio pin\n");
+ return -ENODEV;
+ }
+
+ ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
+ if (ret)
+ return ret;
+
+ /* Parse OF children */
+ for_each_available_child_of_node(node, child) {
+ ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
+ if (ret)
+ goto put_node;
+ }
+
+ return 0;
+
+put_node:
+ of_node_put(child);
return ret;
}
@@ -312,13 +502,13 @@ static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
{
u32 val;
- val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
+ regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, &val);
if (on)
val = val | PCIE_ELBI_SLV_DBI_ENABLE;
else
val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
- kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
+ regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, val);
}
static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
@@ -326,43 +516,69 @@ static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
{
u32 val;
- val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
+ regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, &val);
if (on)
val = val | PCIE_ELBI_SLV_DBI_ENABLE;
else
val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
- kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
+ regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, val);
}
-static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
+static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+ if (PCI_SLOT(devfn)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
- return ret;
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
+static int kirin_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int kirin_pcie_add_bus(struct pci_bus *bus)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- int ret;
+ int i, ret;
- kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+ if (!kirin_pcie->num_slots)
+ return 0;
- return ret;
+ /* Send PERST# to each slot */
+ for (i = 0; i < kirin_pcie->num_slots; i++) {
+ ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
+ if (ret) {
+ dev_err(pci->dev, "PERST# %s error: %d\n",
+ kirin_pcie->reset_names[i], ret);
+ }
+ }
+ usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+ return 0;
}
+static struct pci_ops kirin_pci_ops = {
+ .read = kirin_pcie_rd_own_conf,
+ .write = kirin_pcie_wr_own_conf,
+ .add_bus = kirin_pcie_add_bus,
+};
+
static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size)
{
@@ -389,49 +605,67 @@ static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
static int kirin_pcie_link_up(struct dw_pcie *pci)
{
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+ u32 val;
+ regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
return 1;
return 0;
}
-static int kirin_pcie_establish_link(struct pcie_port *pp)
+static int kirin_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- struct device *dev = kirin_pcie->pci->dev;
- int count = 0;
-
- if (kirin_pcie_link_up(pci))
- return 0;
-
- dw_pcie_setup_rc(pp);
/* assert LTSSM enable */
- kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
- PCIE_APP_LTSSM_ENABLE);
-
- /* check if the link is up or not */
- while (!kirin_pcie_link_up(pci)) {
- usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
- count++;
- if (count == 1000) {
- dev_err(dev, "Link Fail\n");
- return -EINVAL;
- }
- }
+ regmap_write(kirin_pcie->apb, PCIE_APP_LTSSM_ENABLE,
+ PCIE_LTSSM_ENABLE_BIT);
return 0;
}
static int kirin_pcie_host_init(struct pcie_port *pp)
{
- kirin_pcie_establish_link(pp);
+ pp->bridge->ops = &kirin_pci_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ return 0;
+}
+
+static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
+ struct device *dev)
+{
+ int ret, i;
+
+ for (i = 0; i < kirin_pcie->num_slots; i++) {
+ if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
+ dev_err(dev, "unable to get a valid %s gpio\n",
+ kirin_pcie->reset_names[i]);
+ return -ENODEV;
+ }
+
+ ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
+ kirin_pcie->reset_names[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
+ if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
+ dev_err(dev, "unable to get a valid %s gpio\n",
+ kirin_pcie->clkreq_names[i]);
+ return -ENODEV;
+ }
+
+ ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
+ kirin_pcie->clkreq_names[i]);
+ if (ret)
+ return ret;
+
+ ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -440,49 +674,106 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {
.read_dbi = kirin_pcie_read_dbi,
.write_dbi = kirin_pcie_write_dbi,
.link_up = kirin_pcie_link_up,
+ .start_link = kirin_pcie_start_link,
};
static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
- .rd_own_conf = kirin_pcie_rd_own_conf,
- .wr_own_conf = kirin_pcie_wr_own_conf,
.host_init = kirin_pcie_host_init,
};
-static int kirin_pcie_add_msi(struct dw_pcie *pci,
- struct platform_device *pdev)
+static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
{
- int irq;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev,
- "failed to get MSI IRQ (%d)\n", irq);
- return irq;
- }
+ int i;
- pci->pp.msi_irq = irq;
- }
+ if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY)
+ return hi3660_pcie_phy_power_off(kirin_pcie);
+
+ for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
+ gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
+
+ phy_power_off(kirin_pcie->phy);
+ phy_exit(kirin_pcie->phy);
return 0;
}
-static int kirin_add_pcie_port(struct dw_pcie *pci,
- struct platform_device *pdev)
+static int kirin_pcie_power_on(struct platform_device *pdev,
+ struct kirin_pcie *kirin_pcie)
{
+ struct device *dev = &pdev->dev;
int ret;
- ret = kirin_pcie_add_msi(pci, pdev);
- if (ret)
- return ret;
+ if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY) {
+ ret = hi3660_pcie_phy_init(pdev, kirin_pcie);
+ if (ret)
+ return ret;
- pci->pp.ops = &kirin_pcie_host_ops;
+ ret = hi3660_pcie_phy_power_on(kirin_pcie);
+ if (ret)
+ return ret;
+ } else {
+ kirin_pcie->phy = devm_of_phy_get(dev, dev->of_node, NULL);
+ if (IS_ERR(kirin_pcie->phy))
+ return PTR_ERR(kirin_pcie->phy);
- return dw_pcie_host_init(&pci->pp);
+ ret = kirin_pcie_gpio_request(kirin_pcie, dev);
+ if (ret)
+ return ret;
+
+ ret = phy_init(kirin_pcie->phy);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(kirin_pcie->phy);
+ if (ret)
+ goto err;
+ }
+
+ /* perst assert Endpoint */
+ usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
+
+ if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
+ ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
+ if (ret)
+ goto err;
+ }
+
+ usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+ return 0;
+err:
+ kirin_pcie_power_off(kirin_pcie);
+
+ return ret;
+}
+
+static int __exit kirin_pcie_remove(struct platform_device *pdev)
+{
+ struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&kirin_pcie->pci->pp);
+
+ kirin_pcie_power_off(kirin_pcie);
+
+ return 0;
}
+static const struct of_device_id kirin_pcie_match[] = {
+ {
+ .compatible = "hisilicon,kirin960-pcie",
+ .data = (void *)PCIE_KIRIN_INTERNAL_PHY
+ },
+ {
+ .compatible = "hisilicon,kirin970-pcie",
+ .data = (void *)PCIE_KIRIN_EXTERNAL_PHY
+ },
+ {},
+};
+
static int kirin_pcie_probe(struct platform_device *pdev)
{
+ enum pcie_kirin_phy_type phy_type;
+ const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct kirin_pcie *kirin_pcie;
struct dw_pcie *pci;
@@ -493,6 +784,14 @@ static int kirin_pcie_probe(struct platform_device *pdev)
return -EINVAL;
}
+ of_id = of_match_device(kirin_pcie_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+
+ phy_type = (long)of_id->data;
+
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
return -ENOMEM;
@@ -503,41 +802,35 @@ static int kirin_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &kirin_dw_pcie_ops;
+ pci->pp.ops = &kirin_pcie_host_ops;
kirin_pcie->pci = pci;
-
- ret = kirin_pcie_get_clk(kirin_pcie, pdev);
- if (ret)
- return ret;
+ kirin_pcie->type = phy_type;
ret = kirin_pcie_get_resource(kirin_pcie, pdev);
if (ret)
return ret;
- kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
- "reset-gpios", 0);
- if (kirin_pcie->gpio_id_reset < 0)
- return -ENODEV;
+ platform_set_drvdata(pdev, kirin_pcie);
- ret = kirin_pcie_power_on(kirin_pcie);
+ ret = kirin_pcie_power_on(pdev, kirin_pcie);
if (ret)
return ret;
- platform_set_drvdata(pdev, kirin_pcie);
-
- return kirin_add_pcie_port(pci, pdev);
+ return dw_pcie_host_init(&pci->pp);
}
-static const struct of_device_id kirin_pcie_match[] = {
- { .compatible = "hisilicon,kirin960-pcie" },
- {},
-};
-
static struct platform_driver kirin_pcie_driver = {
.probe = kirin_pcie_probe,
+ .remove = __exit_p(kirin_pcie_remove),
.driver = {
.name = "kirin-pcie",
- .of_match_table = kirin_pcie_match,
- .suppress_bind_attrs = true,
+ .of_match_table = kirin_pcie_match,
+ .suppress_bind_attrs = true,
},
};
-builtin_platform_driver(kirin_pcie_driver);
+module_platform_driver(kirin_pcie_driver);
+
+MODULE_DEVICE_TABLE(of, kirin_pcie_match);
+MODULE_DESCRIPTION("PCIe host controller driver for Kirin Phone SoCs");
+MODULE_AUTHOR("Xiaowei Song <[email protected]>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
new file mode 100644
index 000000000000..7b17da2f9b3f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe Endpoint controller driver
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Author: Siddartha Mohanadoss <[email protected]
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <[email protected]
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/* PARF registers */
+#define PARF_SYS_CTRL 0x00
+#define PARF_DB_CTRL 0x10
+#define PARF_PM_CTRL 0x20
+#define PARF_MHI_BASE_ADDR_LOWER 0x178
+#define PARF_MHI_BASE_ADDR_UPPER 0x17c
+#define PARF_DEBUG_INT_EN 0x190
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4
+#define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8
+#define PARF_Q2A_FLUSH 0x1ac
+#define PARF_LTSSM 0x1b0
+#define PARF_CFG_BITS 0x210
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
+#define PARF_SLV_ADDR_MSB_CTRL 0x2c0
+#define PARF_DBI_BASE_ADDR 0x350
+#define PARF_DBI_BASE_ADDR_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
+#define PARF_SRIS_MODE 0x644
+#define PARF_DEVICE_TYPE 0x1000
+#define PARF_BDF_TO_SID_CFG 0x2c00
+
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_DOWN BIT(1)
+#define PARF_INT_ALL_BME BIT(2)
+#define PARF_INT_ALL_PM_TURNOFF BIT(3)
+#define PARF_INT_ALL_DEBUG BIT(4)
+#define PARF_INT_ALL_LTR BIT(5)
+#define PARF_INT_ALL_MHI_Q6 BIT(6)
+#define PARF_INT_ALL_MHI_A7 BIT(7)
+#define PARF_INT_ALL_DSTATE_CHANGE BIT(8)
+#define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9)
+#define PARF_INT_ALL_MMIO_WRITE BIT(10)
+#define PARF_INT_ALL_CFG_WRITE BIT(11)
+#define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12)
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_ALL_AER_LEGACY BIT(14)
+#define PARF_INT_ALL_PLS_ERR BIT(15)
+#define PARF_INT_ALL_PME_LEGACY BIT(16)
+#define PARF_INT_ALL_PLS_PME BIT(17)
+
+/* PARF_BDF_TO_SID_CFG register fields */
+#define PARF_BDF_TO_SID_BYPASS BIT(0)
+
+/* PARF_DEBUG_INT_EN register fields */
+#define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1)
+#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
+#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+
+/* PARF_DEVICE_TYPE register fields */
+#define PARF_DEVICE_TYPE_EP 0x0
+
+/* PARF_PM_CTRL register fields */
+#define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1)
+#define PARF_PM_CTRL_READY_ENTR_L23 BIT(2)
+#define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5)
+
+/* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0)
+
+/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
+#define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31)
+
+/* PARF_Q2A_FLUSH register fields */
+#define PARF_Q2A_FLUSH_EN BIT(16)
+
+/* PARF_SYS_CTRL register fields */
+#define PARF_SYS_CTRL_AUX_PWR_DET BIT(4)
+#define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6)
+#define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11)
+
+/* PARF_DB_CTRL register fields */
+#define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0)
+#define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1)
+#define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4)
+#define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5)
+#define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6)
+
+/* PARF_CFG_BITS register fields */
+#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+
+/* ELBI registers */
+#define ELBI_SYS_STTS 0x08
+
+/* DBI registers */
+#define DBI_CON_STATUS 0x44
+
+/* DBI register fields */
+#define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0)
+
+#define XMLH_LINK_UP 0x400
+#define CORE_RESET_TIME_US_MIN 1000
+#define CORE_RESET_TIME_US_MAX 1005
+#define WAKE_DELAY_US 2000 /* 2 ms */
+
+#define to_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+enum qcom_pcie_ep_link_status {
+ QCOM_PCIE_EP_LINK_DISABLED,
+ QCOM_PCIE_EP_LINK_ENABLED,
+ QCOM_PCIE_EP_LINK_UP,
+ QCOM_PCIE_EP_LINK_DOWN,
+};
+
+static struct clk_bulk_data qcom_pcie_ep_clks[] = {
+ { .id = "cfg" },
+ { .id = "aux" },
+ { .id = "bus_master" },
+ { .id = "bus_slave" },
+ { .id = "ref" },
+ { .id = "sleep" },
+ { .id = "slave_q2a" },
+};
+
+struct qcom_pcie_ep {
+ struct dw_pcie pci;
+
+ void __iomem *parf;
+ void __iomem *elbi;
+ struct regmap *perst_map;
+ struct resource *mmio_res;
+
+ struct reset_control *core_reset;
+ struct gpio_desc *reset;
+ struct gpio_desc *wake;
+ struct phy *phy;
+
+ u32 perst_en;
+ u32 perst_sep_en;
+
+ enum qcom_pcie_ep_link_status link_status;
+ int global_irq;
+ int perst_irq;
+};
+
+static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_assert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ ret = reset_control_deassert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot de-assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ return 0;
+}
+
+/*
+ * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid
+ * device reset during host reboot and hibernation. The driver is
+ * expected to handle this situation.
+ */
+static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
+{
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+}
+
+static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ u32 reg;
+
+ reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS);
+
+ return reg & XMLH_LINK_UP;
+}
+
+static int qcom_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ enable_irq(pcie_ep->perst_irq);
+
+ return 0;
+}
+
+static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ disable_irq(pcie_ep->perst_irq);
+}
+
+static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+ u32 val, offset;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+ if (ret)
+ return ret;
+
+ ret = qcom_pcie_ep_core_reset(pcie_ep);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_init(pcie_ep->phy);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_power_on(pcie_ep->phy);
+ if (ret)
+ goto err_phy_exit;
+
+ /* Assert WAKE# to RC to indicate device is ready */
+ gpiod_set_value_cansleep(pcie_ep->wake, 1);
+ usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
+ gpiod_set_value_cansleep(pcie_ep->wake, 0);
+
+ qcom_pcie_ep_configure_tcsr(pcie_ep);
+
+ /* Disable BDF to SID mapping */
+ val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+ val |= PARF_BDF_TO_SID_BYPASS;
+ writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+
+ /* Enable debug IRQ */
+ val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN);
+ val |= PARF_DEBUG_INT_RADM_PM_TURNOFF |
+ PARF_DEBUG_INT_CFG_BUS_MASTER_EN |
+ PARF_DEBUG_INT_PM_DSTATE_CHANGE;
+ writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN);
+
+ /* Configure PCIe to endpoint mode */
+ writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE);
+
+ /* Allow entering L1 state */
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+
+ /* Read halts write */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+ val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+
+ /* Write after write halt */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+ val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ /* Q2A flush disable */
+ val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH);
+ val &= ~PARF_Q2A_FLUSH_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH);
+
+ /* Disable DBI Wakeup, core clock CGC and enable AUX power */
+ val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL);
+ val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE |
+ PARF_SYS_CTRL_CORE_CLK_CGC_DIS |
+ PARF_SYS_CTRL_AUX_PWR_DET;
+ writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL);
+
+ /* Disable the debouncers */
+ val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL);
+ val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK |
+ PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK |
+ PARF_DB_CTRL_MST_WKP_BLOCK;
+ writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL);
+
+ /* Request to exit from L1SS for MSI and LTR MSG */
+ val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS);
+ val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /* Set the L0s Exit Latency to 2us-4us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L0SEL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L1EL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
+ val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME |
+ PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE |
+ PARF_INT_ALL_LINK_UP;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
+
+ ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_phy_power_off;
+ }
+
+ /*
+ * The physical address of the MMIO region which is exposed as the BAR
+ * should be written to MHI BASE registers.
+ */
+ writel_relaxed(pcie_ep->mmio_res->start,
+ pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER);
+ writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER);
+
+ dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+
+ /* Enable LTSSM */
+ val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
+ val |= BIT(8);
+ writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+
+ return 0;
+
+err_phy_power_off:
+ phy_power_off(pcie_ep->phy);
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+
+ return ret;
+}
+
+static void qcom_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+
+ if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
+ dev_dbg(dev, "Link is already disabled\n");
+ return;
+ }
+
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
+}
+
+/* Common DWC controller ops */
+static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_dw_link_up,
+ .start_link = qcom_pcie_dw_start_link,
+ .stop_link = qcom_pcie_dw_stop_link,
+};
+
+static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device_node *syscon;
+ struct resource *res;
+ int ret;
+
+ pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
+ if (IS_ERR(pcie_ep->parf))
+ return PTR_ERR(pcie_ep->parf);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ pci->dbi_base2 = pci->dbi_base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie_ep->elbi))
+ return PTR_ERR(pcie_ep->elbi);
+
+ pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mmio");
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0);
+ if (!syscon) {
+ dev_err(dev, "Failed to parse qcom,perst-regs\n");
+ return -EINVAL;
+ }
+
+ pcie_ep->perst_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(pcie_ep->perst_map))
+ return PTR_ERR(pcie_ep->perst_map);
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 1, &pcie_ep->perst_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Enable offset in syscon\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 2, &pcie_ep->perst_sep_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Separation Enable offset in syscon\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get io resources %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+ if (ret)
+ return ret;
+
+ pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core");
+ if (IS_ERR(pcie_ep->core_reset))
+ return PTR_ERR(pcie_ep->core_reset);
+
+ pcie_ep->reset = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie_ep->reset))
+ return PTR_ERR(pcie_ep->reset);
+
+ pcie_ep->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie_ep->wake))
+ return PTR_ERR(pcie_ep->wake);
+
+ pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy");
+ if (IS_ERR(pcie_ep->phy))
+ ret = PTR_ERR(pcie_ep->phy);
+
+ return ret;
+}
+
+/* TODO: Notify clients about PCIe state change */
+static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
+ u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
+ u32 dstate, val;
+
+ writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
+ status &= mask;
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
+ dev_dbg(dev, "Received Linkdown event\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
+ } else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
+ dev_dbg(dev, "Received BME event. Link is enabled!\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
+ } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
+ dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_READY_ENTR_L23;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) {
+ dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) &
+ DBI_CON_STATUS_POWER_STATE_MASK;
+ dev_dbg(dev, "Received D%d state event\n", dstate);
+ if (dstate == 3) {
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_REQ_EXIT_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ }
+ } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ dev_dbg(dev, "Received Linkup event. Enumeration complete!\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
+ } else {
+ dev_dbg(dev, "Received unknown event: %d\n", status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 perst;
+
+ perst = gpiod_get_value(pcie_ep->reset);
+ if (perst) {
+ dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n");
+ qcom_pcie_perst_assert(pci);
+ } else {
+ dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n");
+ qcom_pcie_perst_deassert(pci);
+ }
+
+ irq_set_irq_type(gpiod_to_irq(pcie_ep->reset),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ int irq, ret;
+
+ irq = platform_get_irq_byname(pdev, "global");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get Global IRQ\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_ep_global_irq_thread,
+ IRQF_ONESHOT,
+ "global_irq", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request Global IRQ\n");
+ return ret;
+ }
+
+ pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
+ irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
+ qcom_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "perst_irq", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
+ disable_irq(irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features qcom_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features *
+qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep)
+{
+ return &qcom_pcie_epc_features;
+}
+
+static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static struct dw_pcie_ep_ops pci_ep_ops = {
+ .ep_init = qcom_pcie_ep_init,
+ .raise_irq = qcom_pcie_ep_raise_irq,
+ .get_features = qcom_pcie_epc_get_features,
+};
+
+static int qcom_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_pcie_ep *pcie_ep;
+ int ret;
+
+ pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
+ if (!pcie_ep)
+ return -ENOMEM;
+
+ pcie_ep->pci.dev = dev;
+ pcie_ep->pci.ops = &pci_ops;
+ pcie_ep->pci.ep.ops = &pci_ep_ops;
+ platform_set_drvdata(pdev, pcie_ep);
+
+ ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+ if (ret)
+ return ret;
+
+ ret = qcom_pcie_ep_core_reset(pcie_ep);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_init(pcie_ep->phy);
+ if (ret)
+ goto err_disable_clk;
+
+ /* PHY needs to be powered on for dw_pcie_ep_init() */
+ ret = phy_power_on(pcie_ep->phy);
+ if (ret)
+ goto err_phy_exit;
+
+ ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
+ goto err_phy_power_off;
+ }
+
+ ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
+ if (ret)
+ goto err_phy_power_off;
+
+ return 0;
+
+err_phy_power_off:
+ phy_power_off(pcie_ep->phy);
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+
+ return ret;
+}
+
+static int qcom_pcie_ep_remove(struct platform_device *pdev)
+{
+ struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
+
+ if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
+ return 0;
+
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sdx55-pcie-ep", },
+ { }
+};
+
+static struct platform_driver qcom_pcie_ep_driver = {
+ .probe = qcom_pcie_ep_probe,
+ .remove = qcom_pcie_ep_remove,
+ .driver = {
+ .name = "qcom-pcie-ep",
+ .of_match_table = qcom_pcie_ep_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_ep_driver);
+
+MODULE_AUTHOR("Siddartha Mohanadoss <[email protected]>");
+MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
+MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 7e581748ee9f..1c3d1116bb60 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
+#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
@@ -27,6 +28,7 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define PCIE20_PARF_SYS_CTRL 0x00
@@ -39,13 +41,14 @@
#define L23_CLK_RMV_DIS BIT(2)
#define L1_CLK_RMV_DIS BIT(1)
-#define PCIE20_COMMAND_STATUS 0x04
-#define CMD_BME_VAL 0x4
-#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
-#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
-
#define PCIE20_PARF_PHY_CTRL 0x40
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
+
#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PHY_REFCLK_SSP_EN BIT(16)
+#define PHY_REFCLK_USE_PAD BIT(12)
+
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
@@ -54,6 +57,8 @@
#define PCIE20_PARF_LTSSM 0x1B0
#define PCIE20_PARF_SID_OFFSET 0x234
#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
+#define PCIE20_PARF_DEVICE_TYPE 0x1000
+#define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000
#define PCIE20_ELBI_SYS_CTRL 0x04
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -64,10 +69,6 @@
#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
#define CFG_BRIDGE_SB_INIT BIT(0)
-#define PCIE20_CAP 0x70
-#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
-#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
-#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
#define PCIE_CAP_LINK1_VAL 0x2FD7F
#define PCIE20_PARF_Q2A_FLUSH 0x1AC
@@ -76,20 +77,39 @@
#define DBI_RO_WR_EN 1
#define PERST_DELAY_US 1000
+/* PARF registers */
+#define PCIE20_PARF_PCS_DEEMPH 0x34
+#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
+
+#define PCIE20_PARF_PCS_SWING 0x38
+#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
+#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
+
+#define PCIE20_PARF_CONFIG_BITS 0x50
+#define PHY_RX0_EQ(x) ((x) << 24)
#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
#define SLV_ADDR_SPACE_SZ 0x10000000
+#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0
+
+#define DEVICE_TYPE_RC 0x4
+
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
+#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
+
+#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
+
struct qcom_pcie_resources_2_1_0 {
- struct clk *iface_clk;
- struct clk *core_clk;
- struct clk *phy_clk;
+ struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
struct reset_control *pci_reset;
struct reset_control *axi_reset;
struct reset_control *ahb_reset;
struct reset_control *por_reset;
struct reset_control *phy_reset;
+ struct reset_control *ext_reset;
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
};
@@ -139,12 +159,25 @@ struct qcom_pcie_resources_2_3_3 {
struct reset_control *rst[7];
};
+/* 6 clocks typically, 7 for sm8250 */
+struct qcom_pcie_resources_2_7_0 {
+ struct clk_bulk_data clks[7];
+ int num_clks;
+ struct regulator_bulk_data supplies[2];
+ struct reset_control *pci_reset;
+ struct clk *pipe_clk;
+ struct clk *pipe_clk_src;
+ struct clk *phy_pipe_clk;
+ struct clk *ref_clk_src;
+};
+
union qcom_pcie_resources {
struct qcom_pcie_resources_1_0_0 v1_0_0;
struct qcom_pcie_resources_2_1_0 v2_1_0;
struct qcom_pcie_resources_2_3_2 v2_3_2;
struct qcom_pcie_resources_2_3_3 v2_3_3;
struct qcom_pcie_resources_2_4_0 v2_4_0;
+ struct qcom_pcie_resources_2_7_0 v2_7_0;
};
struct qcom_pcie;
@@ -156,6 +189,12 @@ struct qcom_pcie_ops {
void (*deinit)(struct qcom_pcie *pcie);
void (*post_deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
+ int (*config_sid)(struct qcom_pcie *pcie);
+};
+
+struct qcom_pcie_cfg {
+ const struct qcom_pcie_ops *ops;
+ unsigned int pipe_clk_need_muxing:1;
};
struct qcom_pcie {
@@ -166,6 +205,7 @@ struct qcom_pcie {
struct phy *phy;
struct gpio_desc *reset;
const struct qcom_pcie_ops *ops;
+ unsigned int pipe_clk_need_muxing:1;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@@ -184,18 +224,15 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
-static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+static int qcom_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = pcie->pci;
-
- if (dw_pcie_link_up(pci))
- return 0;
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
/* Enable Link Training state machine */
if (pcie->ops->ltssm_enable)
pcie->ops->ltssm_enable(pcie);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
@@ -223,17 +260,21 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->iface_clk = devm_clk_get(dev, "iface");
- if (IS_ERR(res->iface_clk))
- return PTR_ERR(res->iface_clk);
+ res->clks[0].id = "iface";
+ res->clks[1].id = "core";
+ res->clks[2].id = "phy";
+ res->clks[3].id = "aux";
+ res->clks[4].id = "ref";
- res->core_clk = devm_clk_get(dev, "core");
- if (IS_ERR(res->core_clk))
- return PTR_ERR(res->core_clk);
+ /* iface, core, phy are required */
+ ret = devm_clk_bulk_get(dev, 3, res->clks);
+ if (ret < 0)
+ return ret;
- res->phy_clk = devm_clk_get(dev, "phy");
- if (IS_ERR(res->phy_clk))
- return PTR_ERR(res->phy_clk);
+ /* aux, ref are optional */
+ ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
+ if (ret < 0)
+ return ret;
res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
if (IS_ERR(res->pci_reset))
@@ -251,6 +292,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (IS_ERR(res->por_reset))
return PTR_ERR(res->por_reset);
+ res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
+ if (IS_ERR(res->ext_reset))
+ return PTR_ERR(res->ext_reset);
+
res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
return PTR_ERR_OR_ZERO(res->phy_reset);
}
@@ -259,14 +304,16 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
reset_control_assert(res->pci_reset);
reset_control_assert(res->axi_reset);
reset_control_assert(res->ahb_reset);
reset_control_assert(res->por_reset);
- reset_control_assert(res->pci_reset);
- clk_disable_unprepare(res->iface_clk);
- clk_disable_unprepare(res->core_clk);
- clk_disable_unprepare(res->phy_clk);
+ reset_control_assert(res->ext_reset);
+ reset_control_assert(res->phy_reset);
+
+ writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -275,83 +322,102 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
u32 val;
int ret;
+ /* reset the PCIe interface as uboot can leave it undefined state */
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+ reset_control_assert(res->ext_reset);
+ reset_control_assert(res->phy_reset);
+
+ writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
if (ret < 0) {
dev_err(dev, "cannot enable regulators\n");
return ret;
}
- ret = reset_control_assert(res->ahb_reset);
- if (ret) {
- dev_err(dev, "cannot assert ahb reset\n");
- goto err_assert_ahb;
- }
-
- ret = clk_prepare_enable(res->iface_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable iface clock\n");
- goto err_assert_ahb;
- }
-
- ret = clk_prepare_enable(res->phy_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable phy clock\n");
- goto err_clk_phy;
- }
-
- ret = clk_prepare_enable(res->core_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable core clock\n");
- goto err_clk_core;
- }
-
ret = reset_control_deassert(res->ahb_reset);
if (ret) {
dev_err(dev, "cannot deassert ahb reset\n");
goto err_deassert_ahb;
}
- /* enable PCIe clocks and resets */
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
-
- /* enable external reference clock */
- val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
- val |= BIT(16);
- writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ ret = reset_control_deassert(res->ext_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ext reset\n");
+ goto err_deassert_ext;
+ }
ret = reset_control_deassert(res->phy_reset);
if (ret) {
dev_err(dev, "cannot deassert phy reset\n");
- return ret;
+ goto err_deassert_phy;
}
ret = reset_control_deassert(res->pci_reset);
if (ret) {
dev_err(dev, "cannot deassert pci reset\n");
- return ret;
+ goto err_deassert_pci;
}
ret = reset_control_deassert(res->por_reset);
if (ret) {
dev_err(dev, "cannot deassert por reset\n");
- return ret;
+ goto err_deassert_por;
}
ret = reset_control_deassert(res->axi_reset);
if (ret) {
dev_err(dev, "cannot deassert axi reset\n");
- return ret;
+ goto err_deassert_axi;
}
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret)
+ goto err_clks;
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
+ of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
+ writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
+ pcie->parf + PCIE20_PARF_PCS_DEEMPH);
+ writel(PCS_SWING_TX_SWING_FULL(120) |
+ PCS_SWING_TX_SWING_LOW(120),
+ pcie->parf + PCIE20_PARF_PCS_SWING);
+ writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
+ }
+
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+ /* set TX termination offset */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
+ val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ }
+
+ /* enable external reference clock */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ /* USE_PAD is required only for ipq806x */
+ if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+ val &= ~PHY_REFCLK_USE_PAD;
+ val |= PHY_REFCLK_SSP_EN;
+ writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
/* wait for clock acquisition */
usleep_range(1000, 1500);
-
/* Set the Max TLP size to 2K, instead of using default of 4K */
writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
@@ -360,13 +426,19 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
return 0;
+err_clks:
+ reset_control_assert(res->axi_reset);
+err_deassert_axi:
+ reset_control_assert(res->por_reset);
+err_deassert_por:
+ reset_control_assert(res->pci_reset);
+err_deassert_pci:
+ reset_control_assert(res->phy_reset);
+err_deassert_phy:
+ reset_control_assert(res->ext_reset);
+err_deassert_ext:
+ reset_control_assert(res->ahb_reset);
err_deassert_ahb:
- clk_disable_unprepare(res->core_clk);
-err_clk_core:
- clk_disable_unprepare(res->phy_clk);
-err_clk_phy:
- clk_disable_unprepare(res->iface_clk);
-err_assert_ahb:
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
return ret;
@@ -963,6 +1035,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int i, ret;
u32 val;
@@ -1036,16 +1109,16 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
pcie->parf + PCIE20_PARF_SYS_CTRL);
writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
- writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
+ writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
- writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+ writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
- val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
- val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
- writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
- writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
- PCIE20_DEVICE_CONTROL2_STATUS2);
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
return 0;
@@ -1068,13 +1141,241 @@ err_clk_iface:
return ret;
}
+static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+ if (IS_ERR(res->pci_reset))
+ return PTR_ERR(res->pci_reset);
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->clks[0].id = "aux";
+ res->clks[1].id = "cfg";
+ res->clks[2].id = "bus_master";
+ res->clks[3].id = "bus_slave";
+ res->clks[4].id = "slave_q2a";
+ res->clks[5].id = "tbu";
+ if (of_device_is_compatible(dev->of_node, "qcom,pcie-sm8250")) {
+ res->clks[6].id = "ddrss_sf_tbu";
+ res->num_clks = 7;
+ } else {
+ res->num_clks = 6;
+ }
+
+ ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+ if (ret < 0)
+ return ret;
+
+ if (pcie->pipe_clk_need_muxing) {
+ res->pipe_clk_src = devm_clk_get(dev, "pipe_mux");
+ if (IS_ERR(res->pipe_clk_src))
+ return PTR_ERR(res->pipe_clk_src);
+
+ res->phy_pipe_clk = devm_clk_get(dev, "phy_pipe");
+ if (IS_ERR(res->phy_pipe_clk))
+ return PTR_ERR(res->phy_pipe_clk);
+
+ res->ref_clk_src = devm_clk_get(dev, "ref");
+ if (IS_ERR(res->ref_clk_src))
+ return PTR_ERR(res->ref_clk_src);
+ }
+
+ res->pipe_clk = devm_clk_get(dev, "pipe");
+ return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ /* Set TCXO as clock source for pcie_pipe_clk_src */
+ if (pcie->pipe_clk_need_muxing)
+ clk_set_parent(res->pipe_clk_src, res->ref_clk_src);
+
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret < 0)
+ goto err_disable_regulators;
+
+ ret = reset_control_assert(res->pci_reset);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert pci reset\n");
+ goto err_disable_clocks;
+ }
+
+ usleep_range(1000, 1500);
+
+ ret = reset_control_deassert(res->pci_reset);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert pci reset\n");
+ goto err_disable_clocks;
+ }
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clock\n");
+ goto err_disable_clocks;
+ }
+
+ /* configure PCIe to RC mode */
+ writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ return 0;
+err_disable_clocks:
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+err_disable_regulators:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+ /* Set pipe clock as clock source for pcie_pipe_clk_src */
+ if (pcie->pipe_clk_need_muxing)
+ clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk);
+
+ return clk_prepare_enable(res->pipe_clk);
+}
+
+static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+ clk_disable_unprepare(res->pipe_clk);
+}
+
static int qcom_pcie_link_up(struct dw_pcie *pci)
{
- u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
+static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
+{
+ /* iommu map structure */
+ struct {
+ u32 bdf;
+ u32 phandle;
+ u32 smmu_sid;
+ u32 smmu_sid_len;
+ } *map;
+ void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N;
+ struct device *dev = pcie->pci->dev;
+ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ int i, nr_map, size = 0;
+ u32 smmu_sid_base;
+
+ of_get_property(dev->of_node, "iommu-map", &size);
+ if (!size)
+ return 0;
+
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ of_property_read_u32_array(dev->of_node,
+ "iommu-map", (u32 *)map, size / sizeof(u32));
+
+ nr_map = size / (sizeof(*map));
+
+ crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
+
+ /* Registers need to be zero out first */
+ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+
+ /* Extract the SMMU SID base from the first entry of iommu-map */
+ smmu_sid_base = map[0].smmu_sid;
+
+ /* Look for an available entry to hold the mapping */
+ for (i = 0; i < nr_map; i++) {
+ u16 bdf_be = cpu_to_be16(map[i].bdf);
+ u32 val;
+ u8 hash;
+
+ hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
+ 0);
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+
+ /* If the register is already populated, look for next available entry */
+ while (val) {
+ u8 current_hash = hash++;
+ u8 next_mask = 0xff;
+
+ /* If NEXT field is NULL then update it with next hash */
+ if (!(val & next_mask)) {
+ val |= (u32)hash;
+ writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
+ }
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+ val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
+ writel(val, bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ kfree(map);
+
+ return 0;
+}
+
static int qcom_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -1097,18 +1398,16 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
goto err_disable_phy;
}
- dw_pcie_setup_rc(pp);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-
qcom_ep_reset_deassert(pcie);
- ret = qcom_pcie_establish_link(pcie);
- if (ret)
- goto err;
+ if (pcie->ops->config_sid) {
+ ret = pcie->ops->config_sid(pcie);
+ if (ret)
+ goto err;
+ }
return 0;
+
err:
qcom_ep_reset_assert(pcie);
if (pcie->ops->post_deinit)
@@ -1167,17 +1466,72 @@ static const struct qcom_pcie_ops ops_2_3_3 = {
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
+/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_7_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .post_deinit = qcom_pcie_post_deinit_2_7_0,
+};
+
+/* Qcom IP rev.: 1.9.0 */
+static const struct qcom_pcie_ops ops_1_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .post_deinit = qcom_pcie_post_deinit_2_7_0,
+ .config_sid = qcom_pcie_config_sid_sm8250,
+};
+
+static const struct qcom_pcie_cfg apq8084_cfg = {
+ .ops = &ops_1_0_0,
+};
+
+static const struct qcom_pcie_cfg ipq8064_cfg = {
+ .ops = &ops_2_1_0,
+};
+
+static const struct qcom_pcie_cfg msm8996_cfg = {
+ .ops = &ops_2_3_2,
+};
+
+static const struct qcom_pcie_cfg ipq8074_cfg = {
+ .ops = &ops_2_3_3,
+};
+
+static const struct qcom_pcie_cfg ipq4019_cfg = {
+ .ops = &ops_2_4_0,
+};
+
+static const struct qcom_pcie_cfg sdm845_cfg = {
+ .ops = &ops_2_7_0,
+};
+
+static const struct qcom_pcie_cfg sm8250_cfg = {
+ .ops = &ops_1_9_0,
+};
+
+static const struct qcom_pcie_cfg sc7280_cfg = {
+ .ops = &ops_1_9_0,
+ .pipe_clk_need_muxing = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
+ .start_link = qcom_pcie_start_link,
};
static int qcom_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct pcie_port *pp;
struct dw_pcie *pci;
struct qcom_pcie *pcie;
+ const struct qcom_pcie_cfg *pcie_cfg;
int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -1190,10 +1544,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_disable(dev);
- return ret;
- }
+ if (ret < 0)
+ goto err_pm_runtime_put;
pci->dev = dev;
pci->ops = &dw_pcie_ops;
@@ -1201,7 +1553,14 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->pci = pci;
- pcie->ops = of_device_get_match_data(dev);
+ pcie_cfg = of_device_get_match_data(dev);
+ if (!pcie_cfg || !pcie_cfg->ops) {
+ dev_err(dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ pcie->ops = pcie_cfg->ops;
+ pcie->pipe_clk_need_muxing = pcie_cfg->pipe_clk_need_muxing;
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
if (IS_ERR(pcie->reset)) {
@@ -1209,22 +1568,13 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
- pcie->parf = devm_ioremap_resource(dev, res);
+ pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
if (IS_ERR(pcie->parf)) {
ret = PTR_ERR(pcie->parf);
goto err_pm_runtime_put;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base)) {
- ret = PTR_ERR(pci->dbi_base);
- goto err_pm_runtime_put;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
- pcie->elbi = devm_ioremap_resource(dev, res);
+ pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
if (IS_ERR(pcie->elbi)) {
ret = PTR_ERR(pcie->elbi);
goto err_pm_runtime_put;
@@ -1242,14 +1592,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->ops = &qcom_pcie_dw_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0) {
- ret = pp->msi_irq;
- goto err_pm_runtime_put;
- }
- }
-
ret = phy_init(pcie->phy);
if (ret) {
pm_runtime_disable(&pdev->dev);
@@ -1275,13 +1617,18 @@ err_pm_runtime_put:
}
static const struct of_device_id qcom_pcie_match[] = {
- { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
- { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
- { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
- { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
- { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
- { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
- { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
+ { .compatible = "qcom,pcie-apq8084", .data = &apq8084_cfg },
+ { .compatible = "qcom,pcie-ipq8064", .data = &ipq8064_cfg },
+ { .compatible = "qcom,pcie-ipq8064-v2", .data = &ipq8064_cfg },
+ { .compatible = "qcom,pcie-apq8064", .data = &ipq8064_cfg },
+ { .compatible = "qcom,pcie-msm8996", .data = &msm8996_cfg },
+ { .compatible = "qcom,pcie-ipq8074", .data = &ipq8074_cfg },
+ { .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg },
+ { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
+ { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
+ { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
+ { .compatible = "qcom,pcie-sc8180x", .data = &sm8250_cfg },
+ { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
{ }
};
@@ -1289,7 +1636,13 @@ static void qcom_fixup_class(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 7d0cdfd8138b..1a9e353bef55 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -26,7 +26,6 @@ struct spear13xx_pcie {
void __iomem *app_base;
struct phy *phy;
struct clk *clk;
- bool is_gen1;
};
struct pcie_app_reg {
@@ -65,60 +64,12 @@ struct pcie_app_reg {
/* CR6 */
#define MSI_CTRL_INT (1 << 26)
-#define EXP_CAP_ID_OFFSET 0x70
-
#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev)
-static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
+static int spear13xx_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
- u32 val;
- u32 exp_cap_off = EXP_CAP_ID_OFFSET;
-
- if (dw_pcie_link_up(pci)) {
- dev_err(pci->dev, "link already up\n");
- return 0;
- }
-
- dw_pcie_setup_rc(pp);
-
- /*
- * this controller support only 128 bytes read size, however its
- * default value in capability register is 512 bytes. So force
- * it to 128 here.
- */
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val);
- val &= ~PCI_EXP_DEVCTL_READRQ;
- dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val);
-
- dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A);
- dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80);
-
- /*
- * if is_gen1 is set then handle it, so that some buggy card
- * also works
- */
- if (spear13xx_pcie->is_gen1) {
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
- 4, &val);
- if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCAP, 4, val);
- }
-
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
- 2, &val);
- if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCTL2, 2, val);
- }
- }
/* enable ltssm */
writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
@@ -126,7 +77,7 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
| ((u32)1 << REG_TRANSLATION_ENABLE),
&app_reg->app_ctrl_0);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
@@ -151,16 +102,12 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
{
- struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- dw_pcie_msi_init(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
writel(readl(&app_reg->int_mask) |
MSI_CTRL_INT, &app_reg->int_mask);
- }
}
static int spear13xx_pcie_link_up(struct dw_pcie *pci)
@@ -178,8 +125,23 @@ static int spear13xx_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
+
+ /*
+ * this controller support only 128 bytes read size, however its
+ * default value in capability register is 512 bytes. So force
+ * it to 128 here.
+ */
+ val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
- spear13xx_pcie_establish_link(spear13xx_pcie);
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
return 0;
@@ -198,10 +160,9 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
int ret;
pp->irq = platform_get_irq(pdev, 0);
- if (pp->irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (pp->irq < 0)
return pp->irq;
- }
+
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"spear1340-pcie", spear13xx_pcie);
@@ -211,6 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
}
pp->ops = &spear13xx_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -223,6 +185,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = spear13xx_pcie_link_up,
+ .start_link = spear13xx_pcie_start_link,
};
static int spear13xx_pcie_probe(struct platform_device *pdev)
@@ -231,7 +194,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct spear13xx_pcie *spear13xx_pcie;
struct device_node *np = dev->of_node;
- struct resource *dbi_base;
int ret;
spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
@@ -270,17 +232,8 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
return ret;
}
- dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
- if (IS_ERR(pci->dbi_base)) {
- dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
- ret = PTR_ERR(pci->dbi_base);
- goto fail_clk;
- }
- spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
-
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- spear13xx_pcie->is_gen1 = true;
+ pci->link_gen = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
new file mode 100644
index 000000000000..c2de6ed4d86f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ACPI quirks for Tegra194 PCIe host controller
+ *
+ * Copyright (C) 2021 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <[email protected]>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-designware.h"
+
+struct tegra194_pcie_ecam {
+ void __iomem *config_base;
+ void __iomem *iatu_base;
+ void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct tegra194_pcie_ecam *pcie_ecam;
+
+ pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+ if (!pcie_ecam)
+ return -ENOMEM;
+
+ pcie_ecam->config_base = cfg->win;
+ pcie_ecam->iatu_base = cfg->win + SZ_256K;
+ pcie_ecam->dbi_base = cfg->win + SZ_512K;
+ cfg->priv = pcie_ecam;
+
+ return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+ u32 busdev;
+ int type;
+
+ if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+ return NULL;
+
+ if (bus->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ return pcie_ecam->dbi_base + where;
+ else
+ return NULL;
+ }
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (bus->parent->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ return NULL;
+ } else {
+ type = PCIE_ATU_TYPE_CFG1;
+ }
+
+ program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+ SZ_256K);
+
+ return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+ .init = tegra194_acpi_init,
+ .pci_ops = {
+ .map_bus = tegra194_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index cbe95f0ea0ca..904976913081 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -53,6 +54,7 @@
#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
+#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
@@ -60,19 +62,26 @@
#define APPL_INTR_STATUS_L0 0xC
#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
+#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
+#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
#define APPL_INTR_EN_L1_0_0 0x1C
#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
+#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
#define APPL_INTR_STATUS_L1_0_0 0x20
#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
+#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
+#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
#define APPL_INTR_STATUS_L1_1 0x2C
#define APPL_INTR_STATUS_L1_2 0x30
#define APPL_INTR_STATUS_L1_3 0x34
#define APPL_INTR_STATUS_L1_6 0x3C
#define APPL_INTR_STATUS_L1_7 0x40
+#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
#define APPL_INTR_EN_L1_8_0 0x44
#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
@@ -103,8 +112,12 @@
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+#define APPL_MSI_CTRL_1 0xAC
+
#define APPL_MSI_CTRL_2 0xB0
+#define APPL_LEGACY_INTX 0xB8
+
#define APPL_LTR_MSG_1 0xC4
#define LTR_MSG_REQ BIT(15)
#define LTR_MST_NO_SNOOP_SHIFT 16
@@ -170,19 +183,7 @@
#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
#define EVENT_COUNTER_GROUP_5 0x5
-#define PORT_LOGIC_ACK_F_ASPM_CTRL 0x70C
-#define ENTER_ASPM BIT(30)
-#define L0S_ENTRANCE_LAT_SHIFT 24
-#define L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
-#define L1_ENTRANCE_LAT_SHIFT 27
-#define L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
-#define N_FTS_SHIFT 8
-#define N_FTS_MASK GENMASK(7, 0)
#define N_FTS_VAL 52
-
-#define PORT_LOGIC_GEN2_CTRL 0x80C
-#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE BIT(17)
-#define FTS_MASK GENMASK(7, 0)
#define FTS_VAL 52
#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
@@ -205,6 +206,13 @@
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define MSIX_ADDR_MATCH_LOW_OFF 0x940
+#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
+#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
+
+#define MSIX_ADDR_MATCH_HIGH_OFF 0x944
+#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
+
#define PORT_LOGIC_MSIX_DOORBELL 0x948
#define CAP_SPCIE_CAP_OFF 0x154
@@ -223,6 +231,13 @@
#define GEN3_CORE_CLK_FREQ 250000000
#define GEN4_CORE_CLK_FREQ 500000000
+#define LTR_MSG_TIMEOUT (100 * 1000)
+
+#define PERST_DEBOUNCE_TIME (5 * 1000)
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_ENABLED 1
+
static const unsigned int pcie_gen_freq[] = {
GEN1_CORE_CLK_FREQ,
GEN2_CORE_CLK_FREQ,
@@ -230,24 +245,6 @@ static const unsigned int pcie_gen_freq[] = {
GEN4_CORE_CLK_FREQ
};
-static const u32 event_cntr_ctrl_offset[] = {
- 0x1d8,
- 0x1a8,
- 0x1a8,
- 0x1a8,
- 0x1c4,
- 0x1d8
-};
-
-static const u32 event_cntr_data_offset[] = {
- 0x1dc,
- 0x1ac,
- 0x1ac,
- 0x1ac,
- 0x1c8,
- 0x1dc
-};
-
struct tegra_pcie_dw {
struct device *dev;
struct resource *appl_res;
@@ -260,6 +257,8 @@ struct tegra_pcie_dw {
struct dw_pcie pci;
struct tegra_bpmp *bpmp;
+ enum dw_pcie_device_mode mode;
+
bool supports_clkreq;
bool enable_cdm_check;
bool link_state;
@@ -267,7 +266,6 @@ struct tegra_pcie_dw {
u8 init_link_width;
u32 msi_ctrl_int;
u32 num_lanes;
- u32 max_speed;
u32 cid;
u32 cfg_link_cap_l1sub;
u32 pcie_cap_base;
@@ -283,6 +281,16 @@ struct tegra_pcie_dw {
struct phy **phys;
struct dentry *debugfs;
+
+ /* Endpoint mode specific */
+ struct gpio_desc *pex_rst_gpiod;
+ struct gpio_desc *pex_refclk_sel_gpiod;
+ unsigned int pex_rst_irq;
+ int ep_state;
+};
+
+struct tegra_pcie_dw_of_data {
+ enum dw_pcie_device_mode mode;
};
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
@@ -339,8 +347,9 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
}
}
-static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
+static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
{
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
u32 val, tmp;
@@ -361,9 +370,9 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
- val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
}
@@ -411,50 +420,180 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
return IRQ_HANDLED;
}
-static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg)
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+}
+
+static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, speed;
+
+ speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ PCI_EXP_LNKSTA_CLS;
+ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+
+ /* If EP doesn't advertise L1SS, just return */
+ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
+ return IRQ_HANDLED;
+
+ /* Check if BME is set to '1' */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ if (val & PCI_COMMAND_MASTER) {
+ ktime_t timeout;
+
+ /* 110us for both snoop and no-snoop */
+ val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
+ val |= (val << LTR_MST_NO_SNOOP_SHIFT);
+ appl_writel(pcie, val, APPL_LTR_MSG_1);
+
+ /* Send LTR upstream */
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+
+ timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
+ for (;;) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
+ break;
+ if (ktime_after(ktime_get(), timeout))
+ break;
+ usleep_range(1000, 1100);
+ }
+ if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
+ dev_err(pcie->dev, "Failed to send LTR message\n");
+ }
- return tegra_pcie_rp_irq_handler(pcie);
+ return IRQ_HANDLED;
}
-static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+ int spurious = 1;
+ u32 status_l0, status_l1, link_status;
+
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ pex_ep_event_hot_rst_done(pcie);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ link_status = appl_readl(pcie, APPL_LINK_STATUS);
+ if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ dev_dbg(pcie->dev, "Link is up with Host\n");
+ dw_pcie_ep_linkup(ep);
+ }
+ }
+
+ spurious = 0;
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
+ if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ return IRQ_WAKE_THREAD;
+
+ spurious = 0;
+ }
+
+ if (spurious) {
+ dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
+ status_l0);
+ appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (where == PORT_LOGIC_MSIX_DOORBELL) {
+ if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
*val = 0x00000000;
return PCIBIOS_SUCCESSFUL;
}
- return dw_pcie_read(pci->dbi_base + where, size, val);
+ return pci_generic_config_read(bus, devfn, where, size, val);
}
-static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
+static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (where == PORT_LOGIC_MSIX_DOORBELL)
+ if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
return PCIBIOS_SUCCESSFUL;
- return dw_pcie_write(pci->dbi_base + where, size, val);
+ return pci_generic_config_write(bus, devfn, where, size, val);
}
+static struct pci_ops tegra_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = tegra_pcie_dw_rd_own_conf,
+ .write = tegra_pcie_dw_wr_own_conf,
+};
+
#if defined(CONFIG_PCIEASPM)
+static const u32 event_cntr_ctrl_offset[] = {
+ 0x1d8,
+ 0x1a8,
+ 0x1a8,
+ 0x1a8,
+ 0x1c4,
+ 0x1d8
+};
+
+static const u32 event_cntr_data_offset[] = {
+ 0x1dc,
+ 0x1ac,
+ 0x1ac,
+ 0x1ac,
+ 0x1c8,
+ 0x1dc
+};
+
static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -542,30 +681,23 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
/* Program L0s and L1 entrance latencies */
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
- val &= ~L0S_ENTRANCE_LAT_MASK;
- val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT);
- val |= ENTER_ASPM;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
+ val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
+ val |= PORT_AFR_ENTER_ASPM;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
-static int init_debugfs(struct tegra_pcie_dw *pcie)
+static void init_debugfs(struct tegra_pcie_dw *pcie)
{
- struct dentry *d;
-
- d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt",
- pcie->debugfs, aspm_state_cnt);
- if (IS_ERR_OR_NULL(d))
- dev_err(pcie->dev,
- "Failed to create debugfs file \"aspm_state_cnt\"\n");
-
- return 0;
+ debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ aspm_state_cnt);
}
#else
static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
-static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; }
+static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
#endif
static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
@@ -633,8 +765,6 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
- dw_pcie_msi_init(pp);
-
/* Enable MSI interrupt generation */
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
@@ -677,26 +807,24 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
/* Program init preset */
for (i = 0; i < pcie->num_lanes; i++) {
- dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF
- + (i * 2), 2, &val);
+ val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
val |= GEN3_GEN4_EQ_PRESET_INIT;
val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
val |= (GEN3_GEN4_EQ_PRESET_INIT <<
CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
- dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF
- + (i * 2), 2, val);
+ dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
offset = dw_pcie_find_ext_capability(pci,
PCI_EXT_CAP_ID_PL_16GT) +
PCI_PL_16GT_LE_CTRL;
- dw_pcie_read(pci->dbi_base + offset + i, 1, &val);
+ val = dw_pcie_readb_dbi(pci, offset + i);
val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
val |= GEN3_GEN4_EQ_PRESET_INIT;
val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
val |= (GEN3_GEN4_EQ_PRESET_INIT <<
PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
- dw_pcie_write(pci->dbi_base + offset + i, 1, val);
+ dw_pcie_writeb_dbi(pci, offset + i, val);
}
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -725,12 +853,18 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static void tegra_pcie_prepare_host(struct pcie_port *pp)
+static int tegra_pcie_dw_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
+ pp->bridge->ops = &tegra_pci_ops;
+
+ if (!pcie->pcie_cap_base)
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+
val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
@@ -742,17 +876,6 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Configure FTS */
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
- val &= ~(N_FTS_MASK << N_FTS_SHIFT);
- val |= N_FTS_VAL << N_FTS_SHIFT;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
-
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
- val &= ~FTS_MASK;
- val |= FTS_VAL;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
-
/* Enable as 0xFFFF0001 response for CRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
@@ -760,16 +883,6 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
AMBA_ERROR_RESPONSE_CRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
- /* Configure Max Speed from DT */
- if (pcie->max_speed && pcie->max_speed != -EINVAL) {
- val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
- PCI_EXP_LNKCAP);
- val &= ~PCI_EXP_LNKCAP_SLS;
- val |= pcie->max_speed;
- dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
- val);
- }
-
/* Configure Max lane width from DT */
val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
val &= ~PCI_EXP_LNKCAP_MLW;
@@ -780,6 +893,12 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
init_host_aspm(pcie);
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
@@ -790,10 +909,24 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
}
- dw_pcie_setup_rc(pp);
-
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+ return 0;
+}
+
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ u32 val, offset, speed, tmp;
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct pcie_port *pp = &pci->pp;
+ bool retry = true;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE) {
+ enable_irq(pcie->pex_rst_irq);
+ return 0;
+ }
+
+retry_link:
/* Assert RST */
val = appl_readl(pcie, APPL_PINMUX);
val &= ~APPL_PINMUX_PEX_RST;
@@ -812,17 +945,10 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
appl_writel(pcie, val, APPL_PINMUX);
msleep(100);
-}
-
-static int tegra_pcie_dw_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
- u32 val, tmp, offset, speed;
-
- tegra_pcie_prepare_host(pp);
if (dw_pcie_wait_for_link(pci)) {
+ if (!retry)
+ return 0;
/*
* There are some endpoints which can't get the link up if
* root port has Data Link Feature (DLF) enabled.
@@ -856,10 +982,11 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp)
val &= ~PCI_DLF_EXCHANGE_ENABLE;
dw_pcie_writel_dbi(pci, offset, val);
- tegra_pcie_prepare_host(pp);
+ tegra_pcie_dw_host_init(pp);
+ dw_pcie_setup_rc(pp);
- if (dw_pcie_wait_for_link(pci))
- return 0;
+ retry = false;
+ goto retry_link;
}
speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
@@ -879,20 +1006,21 @@ static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
{
- pp->num_vectors = MAX_MSI_IRQS;
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ disable_irq(pcie->pex_rst_irq);
}
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
.link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
};
-static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
- .rd_own_conf = tegra_pcie_dw_rd_own_conf,
- .wr_own_conf = tegra_pcie_dw_wr_own_conf,
+static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
.host_init = tegra_pcie_dw_host_init,
- .set_num_vectors = tegra_pcie_set_msi_vec_num,
};
static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
@@ -934,9 +1062,16 @@ phy_exit:
static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
struct device_node *np = pcie->dev->of_node;
int ret;
+ pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ if (!pcie->dbi_res) {
+ dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
+ return -ENODEV;
+ }
+
ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
if (ret < 0) {
dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
@@ -961,8 +1096,6 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
return ret;
}
- pcie->max_speed = of_pci_get_max_link_speed(np);
-
ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
if (ret) {
dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
@@ -986,6 +1119,40 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
pcie->enable_cdm_check =
of_property_read_bool(np, "snps,enable-cdm-check");
+ if (pcie->mode == DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* Endpoint mode specific DT entries */
+ pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie->pex_rst_gpiod)) {
+ int err = PTR_ERR(pcie->pex_rst_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get PERST GPIO: %d\n"),
+ err);
+ return err;
+ }
+
+ pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
+ "nvidia,refclk-select",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
+ int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
+ err);
+ pcie->pex_refclk_sel_gpiod = NULL;
+ }
+
return 0;
}
@@ -1017,6 +1184,34 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ if (enable) {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
+ req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
+ } else {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
+ req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
struct pcie_port *pp = &pcie->pci.pp;
@@ -1032,9 +1227,9 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
* 5.2 Link State Power Management (Page #428).
*/
- list_for_each_entry(child, &pp->root_bus->children, node) {
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
/* Bring downstream devices to D0 if they are not already in */
- if (child->parent == pp->root_bus) {
+ if (child->parent == pp->bridge->bus) {
root_bus = child;
break;
}
@@ -1203,15 +1398,6 @@ static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
reset_control_deassert(pcie->core_rst);
- pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
- PCI_CAP_ID_EXP);
-
- /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
- if (!pcie->supports_clkreq) {
- disable_aspm_l11(pcie);
- disable_aspm_l12(pcie);
- }
-
return ret;
fail_phy:
@@ -1228,43 +1414,32 @@ fail_slot_reg_en:
return ret;
}
-static int __deinit_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
{
int ret;
ret = reset_control_assert(pcie->core_rst);
- if (ret) {
- dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
tegra_pcie_disable_phy(pcie);
ret = reset_control_assert(pcie->core_apb_rst);
- if (ret) {
+ if (ret)
dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
- return ret;
- }
clk_disable_unprepare(pcie->core_clk);
ret = regulator_disable(pcie->pex_ctl_supply);
- if (ret) {
+ if (ret)
dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
- return ret;
- }
tegra_pcie_disable_slot_regulators(pcie);
ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
- if (ret) {
+ if (ret)
dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
pcie->cid, ret);
- return ret;
- }
-
- return ret;
}
static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
@@ -1288,7 +1463,8 @@ static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
return 0;
fail_host_init:
- return __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
}
static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
@@ -1317,6 +1493,16 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
return;
}
+ /*
+ * PCIe controller exits from L2 only if reset is applied, so
+ * controller doesn't handle interrupts. But in cases where
+ * L2 entry fails, PERST# is asserted which can trigger surprise
+ * link down AER. However this function call happens in
+ * suspend_noirq(), so AER interrupt will not be processed.
+ * Disable all interrupts to avoid such a scenario.
+ */
+ appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
+
if (tegra_pcie_try_link_l2(pcie)) {
dev_info(pcie->dev, "Link didn't transition to L2 state\n");
/*
@@ -1329,6 +1515,14 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
data &= ~APPL_PINMUX_PEX_RST;
appl_writel(pcie, data, APPL_PINMUX);
+ /*
+ * Some cards do not go to detect state even after de-asserting
+ * PERST#. So, de-assert LTSSM to bring link to detect state.
+ */
+ data = readl(pcie->appl_base + APPL_CTRL);
+ data &= ~APPL_CTRL_LTSSM_EN;
+ writel(data, pcie->appl_base + APPL_CTRL);
+
err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
data,
((data &
@@ -1336,14 +1530,8 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
APPL_DEBUG_LTSSM_STATE_SHIFT) ==
LTSSM_STATE_PRE_DETECT,
1, LTSSM_TIMEOUT);
- if (err) {
+ if (err)
dev_info(pcie->dev, "Link didn't go to detect state\n");
- } else {
- /* Disable LTSSM after link is in detect state */
- data = appl_readl(pcie, APPL_CTRL);
- data &= ~APPL_CTRL_LTSSM_EN;
- appl_writel(pcie, data, APPL_CTRL);
- }
}
/*
* DBI registers may not be accessible after this as PLL-E would be
@@ -1357,30 +1545,20 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
appl_writel(pcie, data, APPL_PINMUX);
}
-static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
{
tegra_pcie_downstream_dev_to_D0(pcie);
dw_pcie_host_deinit(&pcie->pci.pp);
tegra_pcie_dw_pme_turnoff(pcie);
-
- return __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
}
static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
- struct pcie_port *pp = &pcie->pci.pp;
struct device *dev = pcie->dev;
char *name;
int ret;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = of_irq_get_byname(dev->of_node, "msi");
- if (!pp->msi_irq) {
- dev_err(dev, "Failed to get MSI interrupt\n");
- return -ENODEV;
- }
- }
-
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
@@ -1393,10 +1571,14 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
ret = pinctrl_pm_select_default_state(dev);
if (ret < 0) {
dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
- goto fail_pinctrl;
+ goto fail_pm_get_sync;
}
- tegra_pcie_init_controller(pcie);
+ ret = tegra_pcie_init_controller(pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize controller: %d\n", ret);
+ goto fail_pm_get_sync;
+ }
pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
if (!pcie->link_state) {
@@ -1411,28 +1593,376 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
}
pcie->debugfs = debugfs_create_dir(name, NULL);
- if (!pcie->debugfs)
- dev_err(dev, "Failed to create debugfs\n");
- else
- init_debugfs(pcie);
+ init_debugfs(pcie);
return ret;
fail_host_init:
tegra_pcie_deinit_controller(pcie);
-fail_pinctrl:
- pm_runtime_put_sync(dev);
fail_pm_get_sync:
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
}
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_DISABLED)
+ return;
+
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (ret)
+ dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+
+ reset_control_assert(pcie->core_rst);
+
+ tegra_pcie_disable_phy(pcie);
+
+ reset_control_assert(pcie->core_apb_rst);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ pm_runtime_put_sync(pcie->dev);
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+ dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
+}
+
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+ struct device *dev = pcie->dev;
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ return;
+ }
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
+ goto fail_pll_init;
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk_enable;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
+ goto fail_core_apb_rst;
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Clear any stale interrupt statuses */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ /* configure this core for EP mode operation */
+ val = appl_readl(pcie, APPL_DM_TYPE);
+ val &= ~APPL_DM_TYPE_MASK;
+ val |= APPL_DM_TYPE_EP;
+ appl_writel(pcie, val, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_SYS_PRE_DET_STATE;
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= APPL_CFG_MISC_SLV_EP_MODE;
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ appl_writel(pcie, pcie->atu_dma_res->start &
+ APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
+ val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+
+ reset_control_deassert(pcie->core_rst);
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
+ val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
+ val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
+
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto fail_init_complete;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ pcie->ep_state = EP_STATE_ENABLED;
+ dev_dbg(dev, "Initialization of endpoint is completed\n");
+
+ return;
+
+fail_init_complete:
+ reset_control_assert(pcie->core_rst);
+ tegra_pcie_disable_phy(pcie);
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk_enable:
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+
+ if (gpiod_get_value(pcie->pex_rst_gpiod))
+ pex_ep_event_pex_rst_assert(pcie);
+ else
+ pex_ep_event_pex_rst_deassert(pcie);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ /* Tegra194 supports only INTA */
+ if (irq > 1)
+ return -EINVAL;
+
+ appl_writel(pcie, 1, APPL_LEGACY_INTX);
+ usleep_range(1000, 2000);
+ appl_writel(pcie, 0, APPL_LEGACY_INTX);
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ if (unlikely(irq > 31))
+ return -EINVAL;
+
+ appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+
+ writel(irq, ep->msi_mem);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSI:
+ return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSIX:
+ return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
+
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features tegra_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = false,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[0] = SZ_1M,
+};
+
+static const struct pci_epc_features*
+tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &tegra_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .raise_irq = tegra_pcie_ep_raise_irq,
+ .get_features = tegra_pcie_ep_get_features,
+};
+
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pcie->dev;
+ struct dw_pcie_ep *ep;
+ char *name;
+ int ret;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ ep->page_size = SZ_64K;
+
+ ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = gpiod_to_irq(pcie->pex_rst_gpiod);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
+ return ret;
+ }
+ pcie->pex_rst_irq = (unsigned int)ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PERST IRQ string\n");
+ return -ENOMEM;
+ }
+
+ irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+
+ ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
+ tegra_pcie_ep_pex_rst_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, (void *)pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int tegra_pcie_dw_probe(struct platform_device *pdev)
{
+ const struct tegra_pcie_dw_of_data *data;
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
struct tegra_pcie_dw *pcie;
- struct resource *dbi_res;
struct pcie_port *pp;
struct dw_pcie *pci;
struct phy **phys;
@@ -1440,6 +1970,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
int ret;
u32 i;
+ data = of_device_get_match_data(dev);
+
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -1447,21 +1979,44 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pci = &pcie->pci;
pci->dev = &pdev->dev;
pci->ops = &tegra_dw_pcie_ops;
+ pci->n_fts[0] = N_FTS_VAL;
+ pci->n_fts[1] = FTS_VAL;
+ pci->version = 0x490A;
+
pp = &pci->pp;
+ pp->num_vectors = MAX_MSI_IRQS;
pcie->dev = &pdev->dev;
+ pcie->mode = (enum dw_pcie_device_mode)data->mode;
ret = tegra_pcie_dw_parse_dt(pcie);
if (ret < 0) {
- dev_err(dev, "Failed to parse device tree: %d\n", ret);
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to parse device tree: %d\n"),
+ ret);
return ret;
}
ret = tegra_pcie_get_slot_regulators(pcie);
if (ret < 0) {
- dev_err(dev, "Failed to get slot regulators: %d\n", ret);
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to get slot regulators: %d\n"),
+ ret);
return ret;
}
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
+
pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
if (IS_ERR(pcie->pex_ctl_supply)) {
ret = PTR_ERR(pcie->pex_ctl_supply);
@@ -1518,20 +2073,6 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pcie->phys = phys;
- dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- if (!dbi_res) {
- dev_err(dev, "Failed to find \"dbi\" region\n");
- return -ENODEV;
- }
- pcie->dbi_res = dbi_res;
-
- pci->dbi_base = devm_ioremap_resource(dev, dbi_res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /* Tegra HW locates DBI2 at a fixed offset from DBI */
- pci->dbi_base2 = pci->dbi_base + 0x1000;
-
atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"atu_dma");
if (!atu_dma_res) {
@@ -1540,6 +2081,7 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
}
pcie->atu_dma_res = atu_dma_res;
+ pci->atu_size = resource_size(atu_dma_res);
pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
@@ -1552,17 +2094,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
}
pp->irq = platform_get_irq_byname(pdev, "intr");
- if (!pp->irq) {
- dev_err(dev, "Failed to get \"intr\" interrupt\n");
- return -ENODEV;
- }
-
- ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler,
- IRQF_SHARED, "tegra-pcie-intr", pcie);
- if (ret) {
- dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret);
- return ret;
- }
+ if (pp->irq < 0)
+ return pp->irq;
pcie->bpmp = tegra_bpmp_get(dev);
if (IS_ERR(pcie->bpmp))
@@ -1570,11 +2103,43 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
- ret = tegra_pcie_config_rp(pcie);
- if (ret && ret != -ENOMEDIUM)
- goto fail;
- else
- return 0;
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
+ IRQF_SHARED, "tegra-pcie-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_rp(pcie);
+ if (ret && ret != -ENOMEDIUM)
+ goto fail;
+ else
+ return 0;
+ break;
+
+ case DW_PCIE_EP_TYPE:
+ ret = devm_request_threaded_irq(dev, pp->irq,
+ tegra_pcie_ep_hard_irq,
+ tegra_pcie_ep_irq_thread,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "tegra-pcie-ep-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_ep(pcie, pdev);
+ if (ret < 0)
+ goto fail;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
+ }
fail:
tegra_bpmp_put(pcie->bpmp);
@@ -1593,6 +2158,8 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev)
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
tegra_bpmp_put(pcie->bpmp);
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
return 0;
}
@@ -1627,8 +2194,9 @@ static int tegra_pcie_dw_suspend_noirq(struct device *dev)
PORT_LOGIC_MSI_CTRL_INT_0_EN);
tegra_pcie_downstream_dev_to_D0(pcie);
tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
- return __deinit_controller(pcie);
+ return 0;
}
static int tegra_pcie_dw_resume_noirq(struct device *dev)
@@ -1649,6 +2217,12 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
goto fail_host_init;
}
+ dw_pcie_setup_rc(&pcie->pci.pp);
+
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
+ if (ret < 0)
+ goto fail_host_init;
+
/* Restore MSI interrupt vector */
dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
pcie->msi_ctrl_int);
@@ -1656,7 +2230,8 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
return 0;
fail_host_init:
- return __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
}
static int tegra_pcie_dw_resume_early(struct device *dev)
@@ -1664,6 +2239,11 @@ static int tegra_pcie_dw_resume_early(struct device *dev)
struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
+ if (pcie->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Suspend is not supported in EP mode");
+ return -ENOTSUPP;
+ }
+
if (!pcie->link_state)
return 0;
@@ -1694,12 +2274,25 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
disable_irq(pcie->pci.pp.msi_irq);
tegra_pcie_dw_pme_turnoff(pcie);
- __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
}
+static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id tegra_pcie_dw_of_match[] = {
{
.compatible = "nvidia,tegra194-pcie",
+ .data = &tegra_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra194-pcie-ep",
+ .data = &tegra_pcie_dw_ep_of_data,
},
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
new file mode 100644
index 000000000000..69810c6b0d58
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe endpoint controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <[email protected]>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/* Link Glue registers */
+#define PCL_RSTCTRL0 0x0010
+#define PCL_RSTCTRL_AXI_REG BIT(3)
+#define PCL_RSTCTRL_AXI_SLAVE BIT(2)
+#define PCL_RSTCTRL_AXI_MASTER BIT(1)
+#define PCL_RSTCTRL_PIPE3 BIT(0)
+
+#define PCL_RSTCTRL1 0x0020
+#define PCL_RSTCTRL_PERST BIT(0)
+
+#define PCL_RSTCTRL2 0x0024
+#define PCL_RSTCTRL_PHY_RESET BIT(0)
+
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
+#define PCL_APP_CLK_CTRL 0x8004
+#define PCL_APP_CLK_REQ BIT(0)
+
+#define PCL_APP_READY_CTRL 0x8008
+#define PCL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCL_APP_MSI0 0x8040
+#define PCL_APP_VEN_MSI_TC_MASK GENMASK(10, 8)
+#define PCL_APP_VEN_MSI_VECTOR_MASK GENMASK(4, 0)
+
+#define PCL_APP_MSI1 0x8044
+#define PCL_APP_MSI_REQ BIT(0)
+
+#define PCL_APP_INTX 0x8074
+#define PCL_APP_INTX_SYS_INT BIT(0)
+
+/* assertion time of INTx in usec */
+#define PCL_INTX_WIDTH_USEC 30
+
+struct uniphier_pcie_ep_priv {
+ void __iomem *base;
+ struct dw_pcie pci;
+ struct clk *clk, *clk_gio;
+ struct reset_control *rst, *rst_gio;
+ struct phy *phy;
+ const struct pci_epc_features *features;
+};
+
+#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_ep_priv *priv,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_APP_READY_CTRL);
+ if (enable)
+ val |= PCL_APP_LTSSM_ENABLE;
+ else
+ val &= ~PCL_APP_LTSSM_ENABLE;
+ writel(val, priv->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv,
+ bool assert)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_RSTCTRL2);
+ if (assert)
+ val |= PCL_RSTCTRL_PHY_RESET;
+ else
+ val &= ~PCL_RSTCTRL_PHY_RESET;
+ writel(val, priv->base + PCL_RSTCTRL2);
+}
+
+static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* clock request */
+ val = readl(priv->base + PCL_APP_CLK_CTRL);
+ val &= ~PCL_APP_CLK_REQ;
+ writel(val, priv->base + PCL_APP_CLK_CTRL);
+
+ /* deassert PIPE3 and AXI reset */
+ val = readl(priv->base + PCL_RSTCTRL0);
+ val |= PCL_RSTCTRL_AXI_REG | PCL_RSTCTRL_AXI_SLAVE
+ | PCL_RSTCTRL_AXI_MASTER | PCL_RSTCTRL_PIPE3;
+ writel(val, priv->base + PCL_RSTCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ msleep(100);
+}
+
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, true);
+
+ return 0;
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+}
+
+static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ /*
+ * This makes pulse signal to send INTx to the RC, so this should
+ * be cleared as soon as possible. This sequence is covered with
+ * mutex in pci_epc_raise_irq().
+ */
+ /* assert INTx */
+ val = readl(priv->base + PCL_APP_INTX);
+ val |= PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ udelay(PCL_INTX_WIDTH_USEC);
+
+ /* deassert INTx */
+ val &= ~PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
+ u8 func_no, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ val = FIELD_PREP(PCL_APP_VEN_MSI_TC_MASK, func_no)
+ | FIELD_PREP(PCL_APP_VEN_MSI_VECTOR_MASK, interrupt_num - 1);
+ writel(val, priv->base + PCL_APP_MSI0);
+
+ val = readl(priv->base + PCL_APP_MSI1);
+ val |= PCL_APP_MSI_REQ;
+ writel(val, priv->base + PCL_APP_MSI1);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return uniphier_pcie_ep_raise_legacy_irq(ep);
+ case PCI_EPC_IRQ_MSI:
+ return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type (%d)\n", type);
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features*
+uniphier_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ return priv->features;
+}
+
+static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
+ .ep_init = uniphier_pcie_ep_init,
+ .raise_irq = uniphier_pcie_ep_raise_irq,
+ .get_features = uniphier_pcie_get_features,
+};
+
+static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk_gio);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ uniphier_pcie_init_ep(priv);
+
+ uniphier_pcie_phy_reset(priv, true);
+
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto out_rst_gio_assert;
+
+ uniphier_pcie_phy_reset(priv, false);
+
+ return 0;
+
+out_rst_gio_assert:
+ reset_control_assert(priv->rst_gio);
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_gio);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = uniphier_pcie_start_link,
+ .stop_link = uniphier_pcie_stop_link,
+};
+
+static int uniphier_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pcie_ep_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->features = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->features))
+ return -EINVAL;
+
+ priv->pci.dev = dev;
+ priv->pci.ops = &dw_pcie_ops;
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_gio))
+ return PTR_ERR(priv->clk_gio);
+
+ priv->rst_gio = devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_gio))
+ return PTR_ERR(priv->rst_gio);
+
+ priv->clk = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ dev_err(dev, "Failed to get phy (%d)\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = uniphier_pcie_ep_enable(priv);
+ if (ret)
+ return ret;
+
+ priv->pci.ep.ops = &uniphier_pcie_ep_ops;
+ return dw_pcie_ep_init(&priv->pci.ep);
+}
+
+static const struct pci_epc_features uniphier_pro5_data = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 16,
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .reserved_bar = BIT(BAR_4),
+};
+
+static const struct of_device_id uniphier_pcie_ep_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro5-pcie-ep",
+ .data = &uniphier_pro5_data,
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver uniphier_pcie_ep_driver = {
+ .probe = uniphier_pcie_ep_probe,
+ .driver = {
+ .name = "uniphier-pcie-ep",
+ .of_match_table = uniphier_pcie_ep_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(uniphier_pcie_ep_driver);
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 8fd7badd59c2..d05be942956e 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -9,11 +9,11 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -146,16 +146,13 @@ static int uniphier_pcie_link_up(struct dw_pcie *pci)
return (val & mask) == mask;
}
-static int uniphier_pcie_establish_link(struct dw_pcie *pci)
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
{
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
- if (dw_pcie_link_up(pci))
- return 0;
-
uniphier_pcie_ltssm_enable(priv, true);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static void uniphier_pcie_stop_link(struct dw_pcie *pci)
@@ -171,36 +168,21 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
}
-static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
-{
- writel(0, priv->base + PCL_RCV_INT);
- writel(0, priv->base + PCL_RCV_INTX);
-}
-
-static void uniphier_pcie_irq_ack(struct irq_data *d)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
- u32 val;
-
- val = readl(priv->base + PCL_RCV_INTX);
- val &= ~PCL_RCV_INTX_ALL_STATUS;
- val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_STATUS_SHIFT);
- writel(val, priv->base + PCL_RCV_INTX);
-}
-
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ unsigned long flags;
u32 val;
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
val = readl(priv->base + PCL_RCV_INTX);
- val &= ~PCL_RCV_INTX_ALL_MASK;
val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
writel(val, priv->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static void uniphier_pcie_irq_unmask(struct irq_data *d)
@@ -208,17 +190,20 @@ static void uniphier_pcie_irq_unmask(struct irq_data *d)
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ unsigned long flags;
u32 val;
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
val = readl(priv->base + PCL_RCV_INTX);
- val &= ~PCL_RCV_INTX_ALL_MASK;
val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
writel(val, priv->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip uniphier_pcie_irq_chip = {
.name = "PCI",
- .irq_ack = uniphier_pcie_irq_ack,
.irq_mask = uniphier_pcie_irq_mask,
.irq_unmask = uniphier_pcie_irq_unmask,
};
@@ -244,7 +229,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long reg;
- u32 val, bit, virq;
+ u32 val, bit;
/* INT for debug */
val = readl(priv->base + PCL_RCV_INT);
@@ -266,10 +251,8 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
val = readl(priv->base + PCL_RCV_INTX);
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(priv->legacy_irq_domain, bit);
chained_irq_exit(chip, desc);
}
@@ -323,14 +306,6 @@ static int uniphier_pcie_host_init(struct pcie_port *pp)
uniphier_pcie_irq_enable(priv);
- dw_pcie_setup_rc(pp);
- ret = uniphier_pcie_establish_link(pci);
- if (ret)
- return ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-
return 0;
}
@@ -338,31 +313,6 @@ static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
.host_init = uniphier_pcie_host_init,
};
-static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = &priv->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- pp->ops = &uniphier_pcie_host_ops;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "Failed to initialize host (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
{
int ret;
@@ -397,16 +347,8 @@ out_clk_disable:
return ret;
}
-static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
-{
- uniphier_pcie_irq_disable(priv);
- phy_exit(priv->phy);
- reset_control_assert(priv->rst);
- clk_disable_unprepare(priv->clk);
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = uniphier_pcie_establish_link,
+ .start_link = uniphier_pcie_start_link,
.stop_link = uniphier_pcie_stop_link,
.link_up = uniphier_pcie_link_up,
};
@@ -415,7 +357,6 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_pcie_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -425,13 +366,7 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
priv->pci.dev = dev;
priv->pci.ops = &dw_pcie_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(priv->pci.dbi_base))
- return PTR_ERR(priv->pci.dbi_base);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -453,34 +388,21 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- return uniphier_add_pcie_port(priv, pdev);
-}
-
-static int uniphier_pcie_remove(struct platform_device *pdev)
-{
- struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
+ priv->pci.pp.ops = &uniphier_pcie_host_ops;
- uniphier_pcie_host_disable(priv);
-
- return 0;
+ return dw_pcie_host_init(&priv->pci.pp);
}
static const struct of_device_id uniphier_pcie_match[] = {
{ .compatible = "socionext,uniphier-pcie", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
static struct platform_driver uniphier_pcie_driver = {
.probe = uniphier_pcie_probe,
- .remove = uniphier_pcie_remove,
.driver = {
.name = "uniphier-pcie",
.of_match_table = uniphier_pcie_match,
},
};
builtin_platform_driver(uniphier_pcie_driver);
-
-MODULE_AUTHOR("Kunihiko Hayashi <[email protected]>");
-MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
new file mode 100644
index 000000000000..50f80f07e4db
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DWC PCIe RC driver for Toshiba Visconti ARM SoC
+ *
+ * Copyright (C) 2021 Toshiba Electronic Device & Storage Corporation
+ * Copyright (C) 2021 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <[email protected]>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+#include "../../pci.h"
+
+struct visconti_pcie {
+ struct dw_pcie pci;
+ void __iomem *ulreg_base;
+ void __iomem *smu_base;
+ void __iomem *mpu_base;
+ struct clk *refclk;
+ struct clk *coreclk;
+ struct clk *auxclk;
+};
+
+#define PCIE_UL_REG_S_PCIE_MODE 0x00F4
+#define PCIE_UL_REG_S_PCIE_MODE_EP 0x00
+#define PCIE_UL_REG_S_PCIE_MODE_RC 0x04
+
+#define PCIE_UL_REG_S_PERSTN_CTRL 0x00F8
+#define PCIE_UL_IOM_PCIE_PERSTN_I_EN BIT(3)
+#define PCIE_UL_DIRECT_PERSTN_EN BIT(2)
+#define PCIE_UL_PERSTN_OUT BIT(1)
+#define PCIE_UL_DIRECT_PERSTN BIT(0)
+#define PCIE_UL_REG_S_PERSTN_CTRL_INIT (PCIE_UL_IOM_PCIE_PERSTN_I_EN | \
+ PCIE_UL_DIRECT_PERSTN_EN | \
+ PCIE_UL_DIRECT_PERSTN)
+
+#define PCIE_UL_REG_S_PHY_INIT_02 0x0104
+#define PCIE_UL_PHY0_SRAM_EXT_LD_DONE BIT(0)
+
+#define PCIE_UL_REG_S_PHY_INIT_03 0x0108
+#define PCIE_UL_PHY0_SRAM_INIT_DONE BIT(0)
+
+#define PCIE_UL_REG_S_INT_EVENT_MASK1 0x0138
+#define PCIE_UL_CFG_PME_INT BIT(0)
+#define PCIE_UL_CFG_LINK_EQ_REQ_INT BIT(1)
+#define PCIE_UL_EDMA_INT0 BIT(2)
+#define PCIE_UL_EDMA_INT1 BIT(3)
+#define PCIE_UL_EDMA_INT2 BIT(4)
+#define PCIE_UL_EDMA_INT3 BIT(5)
+#define PCIE_UL_S_INT_EVENT_MASK1_ALL (PCIE_UL_CFG_PME_INT | \
+ PCIE_UL_CFG_LINK_EQ_REQ_INT | \
+ PCIE_UL_EDMA_INT0 | \
+ PCIE_UL_EDMA_INT1 | \
+ PCIE_UL_EDMA_INT2 | \
+ PCIE_UL_EDMA_INT3)
+
+#define PCIE_UL_REG_S_SB_MON 0x0198
+#define PCIE_UL_REG_S_SIG_MON 0x019C
+#define PCIE_UL_CORE_RST_N_MON BIT(0)
+
+#define PCIE_UL_REG_V_SII_DBG_00 0x0844
+#define PCIE_UL_REG_V_SII_GEN_CTRL_01 0x0860
+#define PCIE_UL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_00 0x0864
+#define PCIE_UL_SMLH_LINK_UP BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_02 0x0868
+#define PCIE_UL_S_DETECT_ACT 0x01
+#define PCIE_UL_S_L0 0x11
+
+#define PISMU_CKON_PCIE 0x0038
+#define PISMU_CKON_PCIE_AUX_CLK BIT(1)
+#define PISMU_CKON_PCIE_MSTR_ACLK BIT(0)
+
+#define PISMU_RSOFF_PCIE 0x0538
+#define PISMU_RSOFF_PCIE_ULREG_RST_N BIT(1)
+#define PISMU_RSOFF_PCIE_PWR_UP_RST_N BIT(0)
+
+#define PCIE_MPU_REG_MP_EN 0x0
+#define MPU_MP_EN_DISABLE BIT(0)
+
+/* Access registers in PCIe ulreg */
+static void visconti_ulreg_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->ulreg_base + reg);
+}
+
+static u32 visconti_ulreg_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->ulreg_base + reg);
+}
+
+/* Access registers in PCIe smu */
+static void visconti_smu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->smu_base + reg);
+}
+
+/* Access registers in PCIe mpu */
+static void visconti_mpu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->mpu_base + reg);
+}
+
+static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->mpu_base + reg);
+}
+
+static int visconti_pcie_link_up(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
+
+ return !!(val & PCIE_UL_S_L0);
+}
+
+static int visconti_pcie_start_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val;
+ int ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_APP_LTSSM_ENABLE,
+ PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ ret = readl_relaxed_poll_timeout(addr + PCIE_UL_REG_V_PHY_ST_02,
+ val, (val & PCIE_UL_S_L0),
+ 90000, 100000);
+ if (ret)
+ return ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_S_INT_EVENT_MASK1_ALL,
+ PCIE_UL_REG_S_INT_EVENT_MASK1);
+
+ if (dw_pcie_link_up(pci)) {
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val & ~MPU_MP_EN_DISABLE,
+ PCIE_MPU_REG_MP_EN);
+ }
+
+ return 0;
+}
+
+static void visconti_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = visconti_ulreg_readl(pcie, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+ val &= ~PCIE_UL_APP_LTSSM_ENABLE;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val | MPU_MP_EN_DISABLE, PCIE_MPU_REG_MP_EN);
+}
+
+/*
+ * In this SoC specification, the CPU bus outputs the offset value from
+ * 0x40000000 to the PCIe bus, so 0x40000000 is subtracted from the CPU
+ * bus address. This 0x40000000 is also based on io_base from DT.
+ */
+static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
+{
+ struct pcie_port *pp = &pci->pp;
+
+ return cpu_addr & ~pp->io_base;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = visconti_pcie_cpu_addr_fixup,
+ .link_up = visconti_pcie_link_up,
+ .start_link = visconti_pcie_start_link,
+ .stop_link = visconti_pcie_stop_link,
+};
+
+static int visconti_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr;
+ int err;
+ u32 val;
+
+ visconti_smu_writel(pcie,
+ PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK,
+ PISMU_CKON_PCIE);
+ ndelay(250);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_ULREG_RST_N,
+ PISMU_RSOFF_PCIE);
+ visconti_ulreg_writel(pcie, PCIE_UL_REG_S_PCIE_MODE_RC,
+ PCIE_UL_REG_S_PCIE_MODE);
+
+ val = PCIE_UL_REG_S_PERSTN_CTRL_INIT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ val |= PCIE_UL_PERSTN_OUT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_PWR_UP_RST_N,
+ PISMU_RSOFF_PCIE);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_PHY_INIT_03;
+ err = readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_PHY0_SRAM_INIT_DONE),
+ 100, 1000);
+ if (err)
+ return err;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_PHY0_SRAM_EXT_LD_DONE,
+ PCIE_UL_REG_S_PHY_INIT_02);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_SIG_MON;
+ return readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_CORE_RST_N_MON), 100,
+ 1000);
+}
+
+static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
+ .host_init = visconti_pcie_host_init,
+};
+
+static int visconti_get_resources(struct platform_device *pdev,
+ struct visconti_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+
+ pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg");
+ if (IS_ERR(pcie->ulreg_base))
+ return PTR_ERR(pcie->ulreg_base);
+
+ pcie->smu_base = devm_platform_ioremap_resource_byname(pdev, "smu");
+ if (IS_ERR(pcie->smu_base))
+ return PTR_ERR(pcie->smu_base);
+
+ pcie->mpu_base = devm_platform_ioremap_resource_byname(pdev, "mpu");
+ if (IS_ERR(pcie->mpu_base))
+ return PTR_ERR(pcie->mpu_base);
+
+ pcie->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(pcie->refclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->refclk),
+ "Failed to get ref clock\n");
+
+ pcie->coreclk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->coreclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->coreclk),
+ "Failed to get core clock\n");
+
+ pcie->auxclk = devm_clk_get(dev, "aux");
+ if (IS_ERR(pcie->auxclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->auxclk),
+ "Failed to get aux clock\n");
+
+ return 0;
+}
+
+static int visconti_add_pcie_port(struct visconti_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0)
+ return pp->irq;
+
+ pp->ops = &visconti_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int visconti_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ ret = visconti_get_resources(pdev, pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return visconti_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id visconti_pcie_match[] = {
+ { .compatible = "toshiba,visconti-pcie" },
+ {},
+};
+
+static struct platform_driver visconti_pcie_driver = {
+ .probe = visconti_pcie_probe,
+ .driver = {
+ .name = "visconti-pcie",
+ .of_match_table = visconti_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(visconti_pcie_driver);