diff options
Diffstat (limited to 'drivers/pci/controller/dwc')
23 files changed, 1359 insertions, 644 deletions
| diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 8afacc90c63b..4c38181acffa 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -311,16 +311,30 @@ config PCIE_RCAR_GEN4_EP  	  SoCs. To compile this driver as a module, choose M here: the module  	  will be called pcie-rcar-gen4.ko. This uses the DesignWare core. +config PCIE_ROCKCHIP_DW +	bool +  config PCIE_ROCKCHIP_DW_HOST -	bool "Rockchip DesignWare PCIe controller" -	select PCIE_DW -	select PCIE_DW_HOST +	bool "Rockchip DesignWare PCIe controller (host mode)"  	depends on PCI_MSI  	depends on ARCH_ROCKCHIP || COMPILE_TEST  	depends on OF +	select PCIE_DW_HOST +	select PCIE_ROCKCHIP_DW +	help +	  Enables support for the DesignWare PCIe controller in the +	  Rockchip SoC (except RK3399) to work in host mode. + +config PCIE_ROCKCHIP_DW_EP +	bool "Rockchip DesignWare PCIe controller (endpoint mode)" +	depends on ARCH_ROCKCHIP || COMPILE_TEST +	depends on OF +	depends on PCI_ENDPOINT +	select PCIE_DW_EP +	select PCIE_ROCKCHIP_DW  	help  	  Enables support for the DesignWare PCIe controller in the -	  Rockchip SoC except RK3399. +	  Rockchip SoC (except RK3399) to work in endpoint mode.  config PCI_EXYNOS  	tristate "Samsung Exynos PCIe controller" diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index bac103faa523..ec215b3d6191 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -16,7 +16,7 @@ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o  obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o  obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o  obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o -obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o +obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o  obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o  obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o  obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index d2d17d37d3e0..4fe3b0cb72ec 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -13,11 +13,11 @@  #include <linux/err.h>  #include <linux/interrupt.h>  #include <linux/irq.h> +#include <linux/irqchip/chained_irq.h>  #include <linux/irqdomain.h>  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/of.h> -#include <linux/of_gpio.h>  #include <linux/of_pci.h>  #include <linux/pci.h>  #include <linux/phy/phy.h> @@ -113,9 +113,9 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,  	writel(value, pcie->base + offset);  } -static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) +static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)  { -	return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; +	return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;  }  static int dra7xx_pcie_link_up(struct dw_pcie *pci) @@ -474,7 +474,7 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,  		return ret;  	} -	dw_pcie_ep_init_notify(ep); +	pci_epc_init_notify(ep->epc);  	return 0;  } diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index a33fa98a252e..fa45da28a218 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -54,43 +54,11 @@  struct exynos_pcie {  	struct dw_pcie			pci;  	void __iomem			*elbi_base; -	struct clk			*clk; -	struct clk			*bus_clk; +	struct clk_bulk_data		*clks;  	struct phy			*phy;  	struct regulator_bulk_data	supplies[2];  }; -static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep) -{ -	struct device *dev = ep->pci.dev; -	int ret; - -	ret = clk_prepare_enable(ep->clk); -	if (ret) { -		dev_err(dev, "cannot enable pcie rc clock"); -		return ret; -	} - -	ret = clk_prepare_enable(ep->bus_clk); -	if (ret) { -		dev_err(dev, "cannot enable pcie bus clock"); -		goto err_bus_clk; -	} - -	return 0; - -err_bus_clk: -	clk_disable_unprepare(ep->clk); - -	return ret; -} - -static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep) -{ -	clk_disable_unprepare(ep->bus_clk); -	clk_disable_unprepare(ep->clk); -} -  static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)  {  	writel(val, base + reg); @@ -332,17 +300,9 @@ static int exynos_pcie_probe(struct platform_device *pdev)  	if (IS_ERR(ep->elbi_base))  		return PTR_ERR(ep->elbi_base); -	ep->clk = devm_clk_get(dev, "pcie"); -	if (IS_ERR(ep->clk)) { -		dev_err(dev, "Failed to get pcie rc clock\n"); -		return PTR_ERR(ep->clk); -	} - -	ep->bus_clk = devm_clk_get(dev, "pcie_bus"); -	if (IS_ERR(ep->bus_clk)) { -		dev_err(dev, "Failed to get pcie bus clock\n"); -		return PTR_ERR(ep->bus_clk); -	} +	ret = devm_clk_bulk_get_all_enable(dev, &ep->clks); +	if (ret < 0) +		return ret;  	ep->supplies[0].supply = "vdd18";  	ep->supplies[1].supply = "vdd10"; @@ -351,10 +311,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)  	if (ret)  		return ret; -	ret = exynos_pcie_init_clk_resources(ep); -	if (ret) -		return ret; -  	ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);  	if (ret)  		return ret; @@ -369,7 +325,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)  fail_probe:  	phy_exit(ep->phy); -	exynos_pcie_deinit_clk_resources(ep);  	regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);  	return ret; @@ -383,7 +338,6 @@ static void exynos_pcie_remove(struct platform_device *pdev)  	exynos_pcie_assert_core_reset(ep);  	phy_power_off(ep->phy);  	phy_exit(ep->phy); -	exynos_pcie_deinit_clk_resources(ep);  	regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);  } @@ -437,5 +391,6 @@ static struct platform_driver exynos_pcie_driver = {  	},  };  module_platform_driver(exynos_pcie_driver); +MODULE_DESCRIPTION("Samsung Exynos PCIe host controller driver");  MODULE_LICENSE("GPL v2");  MODULE_DEVICE_TABLE(of, exynos_pcie_of_match); diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 917c69edee1d..964d67756eb2 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -11,14 +11,13 @@  #include <linux/bitfield.h>  #include <linux/clk.h>  #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h>  #include <linux/kernel.h>  #include <linux/mfd/syscon.h>  #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>  #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>  #include <linux/module.h>  #include <linux/of.h> -#include <linux/of_gpio.h>  #include <linux/of_address.h>  #include <linux/pci.h>  #include <linux/platform_device.h> @@ -107,8 +106,7 @@ struct imx6_pcie_drvdata {  struct imx6_pcie {  	struct dw_pcie		*pci; -	int			reset_gpio; -	bool			gpio_active_high; +	struct gpio_desc	*reset_gpiod;  	bool			link_is_up;  	struct clk_bulk_data	clks[IMX6_PCIE_MAX_CLKS];  	struct regmap		*iomuxc_gpr; @@ -721,9 +719,7 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)  	}  	/* Some boards don't have PCIe reset GPIO. */ -	if (gpio_is_valid(imx6_pcie->reset_gpio)) -		gpio_set_value_cansleep(imx6_pcie->reset_gpio, -					imx6_pcie->gpio_active_high); +	gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 1);  }  static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) @@ -771,10 +767,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)  	}  	/* Some boards don't have PCIe reset GPIO. */ -	if (gpio_is_valid(imx6_pcie->reset_gpio)) { +	if (imx6_pcie->reset_gpiod) {  		msleep(100); -		gpio_set_value_cansleep(imx6_pcie->reset_gpio, -					!imx6_pcie->gpio_active_high); +		gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 0);  		/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */  		msleep(100);  	} @@ -1131,7 +1126,7 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,  		return ret;  	} -	dw_pcie_ep_init_notify(ep); +	pci_epc_init_notify(ep->epc);  	/* Start LTSSM. */  	imx6_pcie_ltssm_enable(dev); @@ -1285,22 +1280,11 @@ static int imx6_pcie_probe(struct platform_device *pdev)  		return PTR_ERR(pci->dbi_base);  	/* Fetch GPIOs */ -	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); -	imx6_pcie->gpio_active_high = of_property_read_bool(node, -						"reset-gpio-active-high"); -	if (gpio_is_valid(imx6_pcie->reset_gpio)) { -		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, -				imx6_pcie->gpio_active_high ? -					GPIOF_OUT_INIT_HIGH : -					GPIOF_OUT_INIT_LOW, -				"PCIe reset"); -		if (ret) { -			dev_err(dev, "unable to get reset gpio\n"); -			return ret; -		} -	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { -		return imx6_pcie->reset_gpio; -	} +	imx6_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); +	if (IS_ERR(imx6_pcie->reset_gpiod)) +		return dev_err_probe(dev, PTR_ERR(imx6_pcie->reset_gpiod), +				     "unable to get reset gpio\n"); +	gpiod_set_consumer_name(imx6_pcie->reset_gpiod, "PCIe reset");  	if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)  		return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index d3a7d14ee685..52c6420ae200 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -34,6 +34,11 @@  #define PCIE_DEVICEID_SHIFT	16  /* Application registers */ +#define PID				0x000 +#define RTL				GENMASK(15, 11) +#define RTL_SHIFT			11 +#define AM6_PCI_PG1_RTL_VER		0x15 +  #define CMD_STATUS			0x004  #define LTSSM_EN_VAL		        BIT(0)  #define OB_XLAT_EN_VAL		        BIT(1) @@ -104,6 +109,8 @@  #define to_keystone_pcie(x)		dev_get_drvdata((x)->dev) +#define PCI_DEVICE_ID_TI_AM654X		0xb00c +  struct ks_pcie_of_data {  	enum dw_pcie_device_mode mode;  	const struct dw_pcie_host_ops *host_ops; @@ -245,8 +252,68 @@ static struct irq_chip ks_pcie_msi_irq_chip = {  	.irq_unmask = ks_pcie_msi_unmask,  }; +/** + * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers + * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone + *	     PCIe host controller driver information. + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) +{ +	u32 val; + +	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); +	val |= DBI_CS2; +	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); + +	do { +		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); +	} while (!(val & DBI_CS2)); +} + +/** + * ks_pcie_clear_dbi_mode() - Disable DBI mode + * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone + *	     PCIe host controller driver information. + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) +{ +	u32 val; + +	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); +	val &= ~DBI_CS2; +	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); + +	do { +		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); +	} while (val & DBI_CS2); +} +  static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)  { +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + +	/* Configure and set up BAR0 */ +	ks_pcie_set_dbi_mode(ks_pcie); + +	/* Enable BAR0 */ +	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); +	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); + +	ks_pcie_clear_dbi_mode(ks_pcie); + +	/* +	 * For BAR0, just setting bus address for inbound writes (MSI) should +	 * be sufficient.  Use physical address to avoid any conflicts. +	 */ +	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); +  	pp->msi_irq_chip = &ks_pcie_msi_irq_chip;  	return dw_pcie_allocate_domains(pp);  } @@ -340,59 +407,22 @@ static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {  	.xlate = irq_domain_xlate_onetwocell,  }; -/** - * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers - * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone - *	     PCIe host controller driver information. - * - * Since modification of dbi_cs2 involves different clock domain, read the - * status back to ensure the transition is complete. - */ -static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) -{ -	u32 val; - -	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); -	val |= DBI_CS2; -	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); - -	do { -		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); -	} while (!(val & DBI_CS2)); -} - -/** - * ks_pcie_clear_dbi_mode() - Disable DBI mode - * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone - *	     PCIe host controller driver information. - * - * Since modification of dbi_cs2 involves different clock domain, read the - * status back to ensure the transition is complete. - */ -static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) -{ -	u32 val; - -	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); -	val &= ~DBI_CS2; -	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); - -	do { -		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); -	} while (val & DBI_CS2); -} - -static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) +static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)  {  	u32 val;  	u32 num_viewport = ks_pcie->num_viewport;  	struct dw_pcie *pci = ks_pcie->pci;  	struct dw_pcie_rp *pp = &pci->pp; -	u64 start, end; +	struct resource_entry *entry;  	struct resource *mem; +	u64 start, end;  	int i; -	mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res; +	entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); +	if (!entry) +		return -ENODEV; + +	mem = entry->res;  	start = mem->start;  	end = mem->end; @@ -403,7 +433,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)  	ks_pcie_clear_dbi_mode(ks_pcie);  	if (ks_pcie->is_am6) -		return; +		return 0;  	val = ilog2(OB_WIN_SIZE);  	ks_pcie_app_writel(ks_pcie, OB_SIZE, val); @@ -420,6 +450,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)  	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);  	val |= OB_XLAT_EN_VAL;  	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); + +	return 0;  }  static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus, @@ -445,44 +477,10 @@ static struct pci_ops ks_child_pcie_ops = {  	.write = pci_generic_config_write,  }; -/** - * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization - * @bus: A pointer to the PCI bus structure. - * - * This sets BAR0 to enable inbound access for MSI_IRQ register - */ -static int ks_pcie_v3_65_add_bus(struct pci_bus *bus) -{ -	struct dw_pcie_rp *pp = bus->sysdata; -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - -	if (!pci_is_root_bus(bus)) -		return 0; - -	/* Configure and set up BAR0 */ -	ks_pcie_set_dbi_mode(ks_pcie); - -	/* Enable BAR0 */ -	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); -	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); - -	ks_pcie_clear_dbi_mode(ks_pcie); - -	 /* -	  * For BAR0, just setting bus address for inbound writes (MSI) should -	  * be sufficient.  Use physical address to avoid any conflicts. -	  */ -	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); - -	return 0; -} -  static struct pci_ops ks_pcie_ops = {  	.map_bus = dw_pcie_own_conf_map_bus,  	.read = pci_generic_config_read,  	.write = pci_generic_config_write, -	.add_bus = ks_pcie_v3_65_add_bus,  };  /** @@ -525,7 +523,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)  static void ks_pcie_quirk(struct pci_dev *dev)  {  	struct pci_bus *bus = dev->bus; +	struct keystone_pcie *ks_pcie; +	struct device *bridge_dev;  	struct pci_dev *bridge; +	u32 val; +  	static const struct pci_device_id rc_pci_devids[] = {  		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),  		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, @@ -537,6 +539,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)  		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },  		{ 0, },  	}; +	static const struct pci_device_id am6_pci_devids[] = { +		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X), +		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, +		{ 0, }, +	};  	if (pci_is_root_bus(bus))  		bridge = dev; @@ -558,10 +565,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)  	 */  	if (pci_match_id(rc_pci_devids, bridge)) {  		if (pcie_get_readrq(dev) > 256) { -			dev_info(&dev->dev, "limiting MRRS to 256\n"); +			dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");  			pcie_set_readrq(dev, 256);  		}  	} + +	/* +	 * Memory transactions fail with PCI controller in AM654 PG1.0 +	 * when MRRS is set to more than 128 bytes. Force the MRRS to +	 * 128 bytes in all downstream devices. +	 */ +	if (pci_match_id(am6_pci_devids, bridge)) { +		bridge_dev = pci_get_host_bridge_device(dev); +		if (!bridge_dev && !bridge_dev->parent) +			return; + +		ks_pcie = dev_get_drvdata(bridge_dev->parent); +		if (!ks_pcie) +			return; + +		val = ks_pcie_app_readl(ks_pcie, PID); +		val &= RTL; +		val >>= RTL_SHIFT; +		if (val != AM6_PCI_PG1_RTL_VER) +			return; + +		if (pcie_get_readrq(dev) > 128) { +			dev_info(&dev->dev, "limiting MRRS to 128 bytes\n"); +			pcie_set_readrq(dev, 128); +		} +	}  }  DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); @@ -814,7 +847,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)  		return ret;  	ks_pcie_stop_link(pci); -	ks_pcie_setup_rc_app_regs(ks_pcie); +	ret = ks_pcie_setup_rc_app_regs(ks_pcie); +	if (ret) +		return ret; +  	writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),  			pci->dbi_base + PCI_IO_BASE); @@ -1293,7 +1329,7 @@ static int ks_pcie_probe(struct platform_device *pdev)  			goto err_ep_init;  		} -		dw_pcie_ep_init_notify(&pci->ep); +		pci_epc_init_notify(pci->ep.epc);  		break;  	default: diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index 7dde6d5fa4d8..a4a800699f89 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -104,7 +104,7 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)  		dev_dbg(pci->dev, "Link up\n");  	} else if (val & PEX_PF0_PME_MES_DR_LDD) {  		dev_dbg(pci->dev, "Link down\n"); -		pci_epc_linkdown(pci->ep.epc); +		dw_pcie_ep_linkdown(&pci->ep);  	} else if (val & PEX_PF0_PME_MES_DR_HRD) {  		dev_dbg(pci->dev, "Hot reset\n");  	} @@ -286,7 +286,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)  		return ret;  	} -	dw_pcie_ep_init_notify(&pci->ep); +	pci_epc_init_notify(pci->ep.epc);  	return ls_pcie_ep_interrupt_init(pcie, pdev);  } diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 6477c83262c2..db9482a113e9 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -9,7 +9,6 @@  #include <linux/clk.h>  #include <linux/delay.h>  #include <linux/gpio/consumer.h> -#include <linux/of_gpio.h>  #include <linux/pci.h>  #include <linux/platform_device.h>  #include <linux/reset.h> diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index 6dfdda59f328..643115f74092 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {  	.write = pci_generic_config_write,  }; -static void al_pcie_config_prepare(struct al_pcie *pcie) +static int al_pcie_config_prepare(struct al_pcie *pcie)  {  	struct al_pcie_target_bus_cfg *target_bus_cfg;  	struct dw_pcie_rp *pp = &pcie->pci->pp;  	unsigned int ecam_bus_mask; +	struct resource_entry *ft;  	u32 cfg_control_offset; +	struct resource *bus;  	u8 subordinate_bus;  	u8 secondary_bus;  	u32 cfg_control;  	u32 reg; -	struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res; +	ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS); +	if (!ft) +		return -ENODEV; + +	bus = ft->res;  	target_bus_cfg = &pcie->target_bus_cfg;  	ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1; @@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)  	       FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);  	al_pcie_controller_writel(pcie, cfg_control_offset, reg); + +	return 0;  }  static int al_pcie_host_init(struct dw_pcie_rp *pp) @@ -305,7 +313,9 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)  	if (rc)  		return rc; -	al_pcie_config_prepare(pcie); +	rc = al_pcie_config_prepare(pcie); +	if (rc) +		return rc;  	return 0;  } diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index a4630b92489b..f8e7283dacd4 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -94,7 +94,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u  	regmap_write(artpec6_pcie->regmap, offset, val);  } -static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) +static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)  {  	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);  	struct dw_pcie_rp *pp = &pci->pp; @@ -102,13 +102,13 @@ static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)  	switch (artpec6_pcie->mode) {  	case DW_PCIE_RC_TYPE: -		return pci_addr - pp->cfg0_base; +		return cpu_addr - pp->cfg0_base;  	case DW_PCIE_EP_TYPE: -		return pci_addr - ep->phys_base; +		return cpu_addr - ep->phys_base;  	default:  		dev_err(pci->dev, "UNKNOWN device type\n");  	} -	return pci_addr; +	return cpu_addr;  }  static int artpec6_pcie_establish_link(struct dw_pcie *pci) @@ -452,7 +452,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)  			return ret;  		} -		dw_pcie_ep_init_notify(&pci->ep); +		pci_epc_init_notify(pci->ep.epc);  		break;  	default: diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 47391d7d3a73..43ba5c6738df 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -16,30 +16,6 @@  #include <linux/pci-epf.h>  /** - * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event - * @ep: DWC EP device - */ -void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) -{ -	struct pci_epc *epc = ep->epc; - -	pci_epc_linkup(epc); -} -EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup); - -/** - * dw_pcie_ep_init_notify - Notify EPF drivers about EPC initialization complete - * @ep: DWC EP device - */ -void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep) -{ -	struct pci_epc *epc = ep->epc; - -	pci_epc_init_notify(epc); -} -EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify); - -/**   * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to   *				 the endpoint function   * @ep: DWC EP device @@ -161,7 +137,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,  	if (!ep->bar_to_atu[bar])  		free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);  	else -		free_win = ep->bar_to_atu[bar]; +		free_win = ep->bar_to_atu[bar] - 1;  	if (free_win >= pci->num_ib_windows) {  		dev_err(pci->dev, "No free inbound window\n"); @@ -175,15 +151,18 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,  		return ret;  	} -	ep->bar_to_atu[bar] = free_win; +	/* +	 * Always increment free_win before assignment, since value 0 is used to identify +	 * unallocated mapping. +	 */ +	ep->bar_to_atu[bar] = free_win + 1;  	set_bit(free_win, ep->ib_window_map);  	return 0;  } -static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no, -				   phys_addr_t phys_addr, -				   u64 pci_addr, size_t size) +static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, +				   struct dw_pcie_ob_atu_cfg *atu)  {  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	u32 free_win; @@ -195,13 +174,13 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,  		return -EINVAL;  	} -	ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM, -					   phys_addr, pci_addr, size); +	atu->index = free_win; +	ret = dw_pcie_prog_outbound_atu(pci, atu);  	if (ret)  		return ret;  	set_bit(free_win, ep->ob_window_map); -	ep->outbound_addr[free_win] = phys_addr; +	ep->outbound_addr[free_win] = atu->cpu_addr;  	return 0;  } @@ -212,7 +191,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	enum pci_barno bar = epf_bar->barno; -	u32 atu_index = ep->bar_to_atu[bar]; +	u32 atu_index = ep->bar_to_atu[bar] - 1; + +	if (!ep->bar_to_atu[bar]) +		return;  	__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags); @@ -233,6 +215,13 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	int ret, type;  	u32 reg; +	/* +	 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs +	 * 1 and 2 to form a 64-bit BAR. +	 */ +	if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1)) +		return -EINVAL; +  	reg = PCI_BASE_ADDRESS_0 + (4 * bar);  	if (!(flags & PCI_BASE_ADDRESS_SPACE)) @@ -301,8 +290,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	int ret;  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - -	ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size); +	struct dw_pcie_ob_atu_cfg atu = { 0 }; + +	atu.func_no = func_no; +	atu.type = PCIE_ATU_TYPE_MEM; +	atu.cpu_addr = addr; +	atu.pci_addr = pci_addr; +	atu.size = size; +	ret = dw_pcie_ep_outbound_atu(ep, &atu);  	if (ret) {  		dev_err(pci->dev, "Failed to enable address\n");  		return ret; @@ -632,7 +627,6 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	dw_pcie_edma_remove(pci); -	ep->epc->init_complete = false;  }  EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup); @@ -674,6 +668,34 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)  	return 0;  } +static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) +{ +	unsigned int offset; +	unsigned int nbars; +	u32 reg, i; + +	offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); + +	dw_pcie_dbi_ro_wr_en(pci); + +	if (offset) { +		reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); +		nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> +			PCI_REBAR_CTRL_NBAR_SHIFT; + +		/* +		 * PCIe r6.0, sec 7.8.6.2 require us to support at least one +		 * size in the range from 1 MB to 512 GB. Advertise support +		 * for 1 MB BAR size only. +		 */ +		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) +			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); +	} + +	dw_pcie_setup(pci); +	dw_pcie_dbi_ro_wr_dis(pci); +} +  /**   * dw_pcie_ep_init_registers - Initialize DWC EP specific registers   * @ep: DWC EP device @@ -688,13 +710,11 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)  	struct dw_pcie_ep_func *ep_func;  	struct device *dev = pci->dev;  	struct pci_epc *epc = ep->epc; -	unsigned int offset, ptm_cap_base; -	unsigned int nbars; +	u32 ptm_cap_base, reg;  	u8 hdr_type;  	u8 func_no; -	int i, ret;  	void *addr; -	u32 reg; +	int ret;  	hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &  		   PCI_HEADER_TYPE_MASK; @@ -757,25 +777,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)  	if (ep->ops->init)  		ep->ops->init(ep); -	offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);  	ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); -	dw_pcie_dbi_ro_wr_en(pci); - -	if (offset) { -		reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); -		nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> -			PCI_REBAR_CTRL_NBAR_SHIFT; - -		/* -		 * PCIe r6.0, sec 7.8.6.2 require us to support at least one -		 * size in the range from 1 MB to 512 GB. Advertise support -		 * for 1 MB BAR size only. -		 */ -		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) -			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); -	} -  	/*  	 * PTM responder capability can be disabled only after disabling  	 * PTM root capability. @@ -792,8 +795,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)  		dw_pcie_dbi_ro_wr_dis(pci);  	} -	dw_pcie_setup(pci); -	dw_pcie_dbi_ro_wr_dis(pci); +	dw_pcie_ep_init_non_sticky_registers(pci);  	return 0; @@ -805,6 +807,43 @@ err_remove_edma:  EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);  /** + * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event + * @ep: DWC EP device + */ +void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) +{ +	struct pci_epc *epc = ep->epc; + +	pci_epc_linkup(epc); +} +EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup); + +/** + * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event + * @ep: DWC EP device + * + * Non-sticky registers are also initialized before sending the notification to + * the EPF drivers. This is needed since the registers need to be initialized + * before the link comes back again. + */ +void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct pci_epc *epc = ep->epc; + +	/* +	 * Initialize the non-sticky DWC registers as they would've reset post +	 * Link Down. This is specifically needed for drivers not supporting +	 * PERST# as they have no way to reinitialize the registers before the +	 * link comes back again. +	 */ +	dw_pcie_ep_init_non_sticky_registers(pci); + +	pci_epc_linkdown(epc); +} +EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown); + +/**   * dw_pcie_ep_init - Initialize the endpoint device   * @ep: DWC EP device   * diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index d15a5c2d5b48..a0822d5371bc 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -398,6 +398,32 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)  	return 0;  } +static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp) +{ +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct resource_entry *win; +	struct resource *res; + +	win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); +	if (win) { +		res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL); +		if (!res) +			return; + +		/* +		 * Allocate MSG TLP region of size 'region_align' at the end of +		 * the host bridge window. +		 */ +		res->start = win->res->end - pci->region_align + 1; +		res->end = win->res->end; +		res->name = "msg"; +		res->flags = win->res->flags | IORESOURCE_BUSY; + +		if (!devm_request_resource(pci->dev, win->res, res)) +			pp->msg_res = res; +	} +} +  int dw_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); @@ -484,6 +510,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)  	dw_pcie_iatu_detect(pci); +	/* +	 * Allocate the resource for MSG TLP before programming the iATU +	 * outbound window in dw_pcie_setup_rc(). Since the allocation depends +	 * on the value of 'region_align', this has to be done after +	 * dw_pcie_iatu_detect(). +	 * +	 * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to +	 * make use of the generic MSG TLP implementation. +	 */ +	if (pp->use_atu_msg) +		dw_pcie_host_request_msg_tlp_res(pp); +  	ret = dw_pcie_edma_detect(pci);  	if (ret)  		goto err_free_msi; @@ -554,6 +592,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,  {  	struct dw_pcie_rp *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct dw_pcie_ob_atu_cfg atu = { 0 };  	int type, ret;  	u32 busdev; @@ -576,8 +615,12 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,  	else  		type = PCIE_ATU_TYPE_CFG1; -	ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, -					pp->cfg0_size); +	atu.type = type; +	atu.cpu_addr = pp->cfg0_base; +	atu.pci_addr = busdev; +	atu.size = pp->cfg0_size; + +	ret = dw_pcie_prog_outbound_atu(pci, &atu);  	if (ret)  		return NULL; @@ -589,6 +632,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,  {  	struct dw_pcie_rp *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct dw_pcie_ob_atu_cfg atu = { 0 };  	int ret;  	ret = pci_generic_config_read(bus, devfn, where, size, val); @@ -596,9 +640,12 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,  		return ret;  	if (pp->cfg0_io_shared) { -		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, -						pp->io_base, pp->io_bus_addr, -						pp->io_size); +		atu.type = PCIE_ATU_TYPE_IO; +		atu.cpu_addr = pp->io_base; +		atu.pci_addr = pp->io_bus_addr; +		atu.size = pp->io_size; + +		ret = dw_pcie_prog_outbound_atu(pci, &atu);  		if (ret)  			return PCIBIOS_SET_FAILED;  	} @@ -611,6 +658,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,  {  	struct dw_pcie_rp *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct dw_pcie_ob_atu_cfg atu = { 0 };  	int ret;  	ret = pci_generic_config_write(bus, devfn, where, size, val); @@ -618,9 +666,12 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,  		return ret;  	if (pp->cfg0_io_shared) { -		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, -						pp->io_base, pp->io_bus_addr, -						pp->io_size); +		atu.type = PCIE_ATU_TYPE_IO; +		atu.cpu_addr = pp->io_base; +		atu.pci_addr = pp->io_bus_addr; +		atu.size = pp->io_size; + +		ret = dw_pcie_prog_outbound_atu(pci, &atu);  		if (ret)  			return PCIBIOS_SET_FAILED;  	} @@ -655,6 +706,7 @@ static struct pci_ops dw_pcie_ops = {  static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct dw_pcie_ob_atu_cfg atu = { 0 };  	struct resource_entry *entry;  	int i, ret; @@ -682,10 +734,19 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)  		if (pci->num_ob_windows <= ++i)  			break; -		ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM, -						entry->res->start, -						entry->res->start - entry->offset, -						resource_size(entry->res)); +		atu.index = i; +		atu.type = PCIE_ATU_TYPE_MEM; +		atu.cpu_addr = entry->res->start; +		atu.pci_addr = entry->res->start - entry->offset; + +		/* Adjust iATU size if MSG TLP region was allocated before */ +		if (pp->msg_res && pp->msg_res->parent == entry->res) +			atu.size = resource_size(entry->res) - +					resource_size(pp->msg_res); +		else +			atu.size = resource_size(entry->res); + +		ret = dw_pcie_prog_outbound_atu(pci, &atu);  		if (ret) {  			dev_err(pci->dev, "Failed to set MEM range %pr\n",  				entry->res); @@ -695,10 +756,13 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)  	if (pp->io_size) {  		if (pci->num_ob_windows > ++i) { -			ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO, -							pp->io_base, -							pp->io_bus_addr, -							pp->io_size); +			atu.index = i; +			atu.type = PCIE_ATU_TYPE_IO; +			atu.cpu_addr = pp->io_base; +			atu.pci_addr = pp->io_bus_addr; +			atu.size = pp->io_size; + +			ret = dw_pcie_prog_outbound_atu(pci, &atu);  			if (ret) {  				dev_err(pci->dev, "Failed to set IO range %pr\n",  					entry->res); @@ -713,6 +777,8 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)  		dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",  			 pci->num_ob_windows); +	pp->msg_atu_index = i; +  	i = 0;  	resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {  		if (resource_type(entry->res) != IORESOURCE_MEM) @@ -818,11 +884,47 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)  }  EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); +static int dw_pcie_pme_turn_off(struct dw_pcie *pci) +{ +	struct dw_pcie_ob_atu_cfg atu = { 0 }; +	void __iomem *mem; +	int ret; + +	if (pci->num_ob_windows <= pci->pp.msg_atu_index) +		return -ENOSPC; + +	if (!pci->pp.msg_res) +		return -ENOSPC; + +	atu.code = PCIE_MSG_CODE_PME_TURN_OFF; +	atu.routing = PCIE_MSG_TYPE_R_BC; +	atu.type = PCIE_ATU_TYPE_MSG; +	atu.size = resource_size(pci->pp.msg_res); +	atu.index = pci->pp.msg_atu_index; + +	atu.cpu_addr = pci->pp.msg_res->start; + +	ret = dw_pcie_prog_outbound_atu(pci, &atu); +	if (ret) +		return ret; + +	mem = ioremap(atu.cpu_addr, pci->region_align); +	if (!mem) +		return -ENOMEM; + +	/* A dummy write is converted to a Msg TLP */ +	writel(0, mem); + +	iounmap(mem); + +	return 0; +} +  int dw_pcie_suspend_noirq(struct dw_pcie *pci)  {  	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);  	u32 val; -	int ret; +	int ret = 0;  	/*  	 * If L1SS is supported, then do not put the link into L2 as some @@ -834,10 +936,13 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)  	if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)  		return 0; -	if (!pci->pp.ops->pme_turn_off) -		return 0; +	if (pci->pp.ops->pme_turn_off) +		pci->pp.ops->pme_turn_off(&pci->pp); +	else +		ret = dw_pcie_pme_turn_off(pci); -	pci->pp.ops->pme_turn_off(&pci->pp); +	if (ret) +		return ret;  	ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,  				PCIE_PME_TO_L2_TIMEOUT_US/10, diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 8490c5d6ff9f..771b9d9be077 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -154,7 +154,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)  			dw_pcie_ep_deinit(&pci->ep);  		} -		dw_pcie_ep_init_notify(&pci->ep); +		pci_epc_init_notify(pci->ep.epc);  		break;  	default: diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 250cf7f40b85..1b5aba1f0c92 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -465,56 +465,61 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)  	return val | PCIE_ATU_TD;  } -static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, -				       int index, int type, u64 cpu_addr, -				       u64 pci_addr, u64 size) +int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, +			      const struct dw_pcie_ob_atu_cfg *atu)  { +	u64 cpu_addr = atu->cpu_addr;  	u32 retries, val;  	u64 limit_addr;  	if (pci->ops && pci->ops->cpu_addr_fixup)  		cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); -	limit_addr = cpu_addr + size - 1; +	limit_addr = cpu_addr + atu->size - 1;  	if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||  	    !IS_ALIGNED(cpu_addr, pci->region_align) || -	    !IS_ALIGNED(pci_addr, pci->region_align) || !size) { +	    !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {  		return -EINVAL;  	} -	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE, +	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,  			      lower_32_bits(cpu_addr)); -	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE, +	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,  			      upper_32_bits(cpu_addr)); -	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT, +	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,  			      lower_32_bits(limit_addr));  	if (dw_pcie_ver_is_ge(pci, 460A)) -		dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT, +		dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,  				      upper_32_bits(limit_addr)); -	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET, -			      lower_32_bits(pci_addr)); -	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET, -			      upper_32_bits(pci_addr)); +	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET, +			      lower_32_bits(atu->pci_addr)); +	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET, +			      upper_32_bits(atu->pci_addr)); -	val = type | PCIE_ATU_FUNC_NUM(func_no); +	val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);  	if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&  	    dw_pcie_ver_is_ge(pci, 460A))  		val |= PCIE_ATU_INCREASE_REGION_SIZE;  	if (dw_pcie_ver_is(pci, 490A))  		val = dw_pcie_enable_ecrc(val); -	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val); +	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val); -	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); +	val = PCIE_ATU_ENABLE; +	if (atu->type == PCIE_ATU_TYPE_MSG) { +		/* The data-less messages only for now */ +		val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code; +	} +	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);  	/*  	 * Make sure ATU enable takes effect before any subsequent config  	 * and I/O accesses.  	 */  	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { -		val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2); +		val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);  		if (val & PCIE_ATU_ENABLE)  			return 0; @@ -526,21 +531,6 @@ static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,  	return -ETIMEDOUT;  } -int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, -			      u64 cpu_addr, u64 pci_addr, u64 size) -{ -	return __dw_pcie_prog_outbound_atu(pci, 0, index, type, -					   cpu_addr, pci_addr, size); -} - -int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, -				 int type, u64 cpu_addr, u64 pci_addr, -				 u64 size) -{ -	return __dw_pcie_prog_outbound_atu(pci, func_no, index, type, -					   cpu_addr, pci_addr, size); -} -  static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)  {  	return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg); @@ -655,7 +645,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)  		if (dw_pcie_link_up(pci))  			break; -		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); +		msleep(LINK_WAIT_SLEEP_MS);  	}  	if (retries >= LINK_WAIT_MAX_RETRIES) { @@ -880,30 +870,40 @@ static struct dw_edma_plat_ops dw_pcie_edma_ops = {  	.irq_vector = dw_pcie_edma_irq_vector,  }; -static int dw_pcie_edma_find_chip(struct dw_pcie *pci) +static void dw_pcie_edma_init_data(struct dw_pcie *pci) +{ +	pci->edma.dev = pci->dev; + +	if (!pci->edma.ops) +		pci->edma.ops = &dw_pcie_edma_ops; + +	pci->edma.flags |= DW_EDMA_CHIP_LOCAL; +} + +static int dw_pcie_edma_find_mf(struct dw_pcie *pci)  {  	u32 val;  	/* +	 * Bail out finding the mapping format if it is already set by the glue +	 * driver. Also ensure that the edma.reg_base is pointing to a valid +	 * memory region. +	 */ +	if (pci->edma.mf != EDMA_MF_EDMA_LEGACY) +		return pci->edma.reg_base ? 0 : -ENODEV; + +	/*  	 * Indirect eDMA CSRs access has been completely removed since v5.40a  	 * thus no space is now reserved for the eDMA channels viewport and  	 * former DMA CTRL register is no longer fixed to FFs. -	 * -	 * Note that Renesas R-Car S4-8's PCIe controllers for unknown reason -	 * have zeros in the eDMA CTRL register even though the HW-manual -	 * explicitly states there must FFs if the unrolled mapping is enabled. -	 * For such cases the low-level drivers are supposed to manually -	 * activate the unrolled mapping to bypass the auto-detection procedure.  	 */ -	if (dw_pcie_ver_is_ge(pci, 540A) || dw_pcie_cap_is(pci, EDMA_UNROLL)) +	if (dw_pcie_ver_is_ge(pci, 540A))  		val = 0xFFFFFFFF;  	else  		val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);  	if (val == 0xFFFFFFFF && pci->edma.reg_base) {  		pci->edma.mf = EDMA_MF_EDMA_UNROLL; - -		val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);  	} else if (val != 0xFFFFFFFF) {  		pci->edma.mf = EDMA_MF_EDMA_LEGACY; @@ -912,15 +912,25 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)  		return -ENODEV;  	} -	pci->edma.dev = pci->dev; +	return 0; +} -	if (!pci->edma.ops) -		pci->edma.ops = &dw_pcie_edma_ops; +static int dw_pcie_edma_find_channels(struct dw_pcie *pci) +{ +	u32 val; -	pci->edma.flags |= DW_EDMA_CHIP_LOCAL; +	/* +	 * Autodetect the read/write channels count only for non-HDMA platforms. +	 * HDMA platforms with native CSR mapping doesn't support autodetect, +	 * so the glue drivers should've passed the valid count already. If not, +	 * the below sanity check will catch it. +	 */ +	if (pci->edma.mf != EDMA_MF_HDMA_NATIVE) { +		val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL); -	pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val); -	pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val); +		pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val); +		pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val); +	}  	/* Sanity check the channels count if the mapping was incorrect */  	if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH || @@ -930,6 +940,19 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)  	return 0;  } +static int dw_pcie_edma_find_chip(struct dw_pcie *pci) +{ +	int ret; + +	dw_pcie_edma_init_data(pci); + +	ret = dw_pcie_edma_find_mf(pci); +	if (ret) +		return ret; + +	return dw_pcie_edma_find_channels(pci); +} +  static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)  {  	struct platform_device *pdev = to_platform_device(pci->dev); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index f8e5431a207b..53c4c8f399c8 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -51,9 +51,8 @@  /* DWC PCIe controller capabilities */  #define DW_PCIE_CAP_REQ_RES		0 -#define DW_PCIE_CAP_EDMA_UNROLL		1 -#define DW_PCIE_CAP_IATU_UNROLL		2 -#define DW_PCIE_CAP_CDM_CHECK		3 +#define DW_PCIE_CAP_IATU_UNROLL		1 +#define DW_PCIE_CAP_CDM_CHECK		2  #define dw_pcie_cap_is(_pci, _cap) \  	test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps) @@ -63,14 +62,16 @@  /* Parameters for the waiting for link up routine */  #define LINK_WAIT_MAX_RETRIES		10 -#define LINK_WAIT_USLEEP_MIN		90000 -#define LINK_WAIT_USLEEP_MAX		100000 +#define LINK_WAIT_SLEEP_MS		90  /* Parameters for the waiting for iATU enabled routine */  #define LINK_WAIT_MAX_IATU_RETRIES	5  #define LINK_WAIT_IATU			9  /* Synopsys-specific PCIe configuration registers */ +#define PCIE_PORT_FORCE			0x708 +#define PORT_FORCE_DO_DESKEW_FOR_SRIS	BIT(23) +  #define PCIE_PORT_AFR			0x70C  #define PORT_AFR_N_FTS_MASK		GENMASK(15, 8)  #define PORT_AFR_N_FTS(n)		FIELD_PREP(PORT_AFR_N_FTS_MASK, n) @@ -92,6 +93,9 @@  #define PORT_LINK_MODE_4_LANES		PORT_LINK_MODE(0x7)  #define PORT_LINK_MODE_8_LANES		PORT_LINK_MODE(0xf) +#define PCIE_PORT_LANE_SKEW		0x714 +#define PORT_LANE_SKEW_INSERT_MASK	GENMASK(23, 0) +  #define PCIE_PORT_DEBUG0		0x728  #define PORT_LOGIC_LTSSM_STATE_MASK	0x1f  #define PORT_LOGIC_LTSSM_STATE_L0	0x11 @@ -148,11 +152,13 @@  #define PCIE_ATU_TYPE_IO		0x2  #define PCIE_ATU_TYPE_CFG0		0x4  #define PCIE_ATU_TYPE_CFG1		0x5 +#define PCIE_ATU_TYPE_MSG		0x10  #define PCIE_ATU_TD			BIT(8)  #define PCIE_ATU_FUNC_NUM(pf)           ((pf) << 20)  #define PCIE_ATU_REGION_CTRL2		0x004  #define PCIE_ATU_ENABLE			BIT(31)  #define PCIE_ATU_BAR_MODE_ENABLE	BIT(30) +#define PCIE_ATU_INHIBIT_PAYLOAD	BIT(22)  #define PCIE_ATU_FUNC_NUM_MATCH_EN      BIT(19)  #define PCIE_ATU_LOWER_BASE		0x008  #define PCIE_ATU_UPPER_BASE		0x00C @@ -299,6 +305,17 @@ enum dw_pcie_ltssm {  	DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,  }; +struct dw_pcie_ob_atu_cfg { +	int index; +	int type; +	u8 func_no; +	u8 code; +	u8 routing; +	u64 cpu_addr; +	u64 pci_addr; +	u64 size; +}; +  struct dw_pcie_host_ops {  	int (*init)(struct dw_pcie_rp *pp);  	void (*deinit)(struct dw_pcie_rp *pp); @@ -328,6 +345,9 @@ struct dw_pcie_rp {  	struct pci_host_bridge  *bridge;  	raw_spinlock_t		lock;  	DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); +	bool			use_atu_msg; +	int			msg_atu_index; +	struct resource		*msg_res;  };  struct dw_pcie_ep_ops { @@ -433,10 +453,8 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);  int dw_pcie_link_up(struct dw_pcie *pci);  void dw_pcie_upconfig_setup(struct dw_pcie *pci);  int dw_pcie_wait_for_link(struct dw_pcie *pci); -int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, -			      u64 cpu_addr, u64 pci_addr, u64 size); -int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, -				 int type, u64 cpu_addr, u64 pci_addr, u64 size); +int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, +			      const struct dw_pcie_ob_atu_cfg *atu);  int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,  			     u64 cpu_addr, u64 pci_addr, u64 size);  int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, @@ -668,9 +686,9 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,  #ifdef CONFIG_PCIE_DW_EP  void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); +void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);  int dw_pcie_ep_init(struct dw_pcie_ep *ep);  int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep); -void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);  void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);  void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);  int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no); @@ -688,18 +706,18 @@ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)  {  } -static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) +static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)  { -	return 0;  } -static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) +static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)  {  	return 0;  } -static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep) +static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)  { +	return 0;  }  static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep) diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index d6842141d384..1170e1107508 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -34,10 +34,16 @@  #define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)  #define PCIE_CLIENT_RC_MODE		HIWORD_UPDATE_BIT(0x40) +#define PCIE_CLIENT_EP_MODE		HIWORD_UPDATE(0xf0, 0x0)  #define PCIE_CLIENT_ENABLE_LTSSM	HIWORD_UPDATE_BIT(0xc) +#define PCIE_CLIENT_DISABLE_LTSSM	HIWORD_UPDATE(0x0c, 0x8) +#define PCIE_CLIENT_INTR_STATUS_MISC	0x10 +#define PCIE_CLIENT_INTR_MASK_MISC	0x24  #define PCIE_SMLH_LINKUP		BIT(16)  #define PCIE_RDLH_LINKUP		BIT(17)  #define PCIE_LINKUP			(PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP) +#define PCIE_RDLH_LINK_UP_CHGED		BIT(1) +#define PCIE_LINK_REQ_RST_NOT_INT	BIT(2)  #define PCIE_L0S_ENTRY			0x11  #define PCIE_CLIENT_GENERAL_CONTROL	0x0  #define PCIE_CLIENT_INTR_STATUS_LEGACY	0x8 @@ -49,25 +55,30 @@  #define PCIE_LTSSM_STATUS_MASK		GENMASK(5, 0)  struct rockchip_pcie { -	struct dw_pcie			pci; -	void __iomem			*apb_base; -	struct phy			*phy; -	struct clk_bulk_data		*clks; -	unsigned int			clk_cnt; -	struct reset_control		*rst; -	struct gpio_desc		*rst_gpio; -	struct regulator                *vpcie3v3; -	struct irq_domain		*irq_domain; +	struct dw_pcie pci; +	void __iomem *apb_base; +	struct phy *phy; +	struct clk_bulk_data *clks; +	unsigned int clk_cnt; +	struct reset_control *rst; +	struct gpio_desc *rst_gpio; +	struct regulator *vpcie3v3; +	struct irq_domain *irq_domain; +	const struct rockchip_pcie_of_data *data;  }; -static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, -					     u32 reg) +struct rockchip_pcie_of_data { +	enum dw_pcie_device_mode mode; +	const struct pci_epc_features *epc_features; +}; + +static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)  {  	return readl_relaxed(rockchip->apb_base + reg);  } -static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, -						u32 val, u32 reg) +static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val, +				     u32 reg)  {  	writel_relaxed(val, rockchip->apb_base + reg);  } @@ -144,16 +155,27 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)  	return 0;  } +static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip) +{ +	return rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS); +} +  static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)  {  	rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,  				 PCIE_CLIENT_GENERAL_CONTROL);  } +static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip) +{ +	rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM, +				 PCIE_CLIENT_GENERAL_CONTROL); +} +  static int rockchip_pcie_link_up(struct dw_pcie *pci)  {  	struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); -	u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS); +	u32 val = rockchip_pcie_get_ltssm(rockchip);  	if ((val & PCIE_LINKUP) == PCIE_LINKUP &&  	    (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY) @@ -186,12 +208,18 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)  	return 0;  } +static void rockchip_pcie_stop_link(struct dw_pcie *pci) +{ +	struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); + +	rockchip_pcie_disable_ltssm(rockchip); +} +  static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);  	struct device *dev = rockchip->pci.dev; -	u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);  	int irq, ret;  	irq = of_irq_get_byname(dev->of_node, "legacy"); @@ -205,12 +233,6 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)  	irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,  					 rockchip); -	/* LTSSM enable control mode */ -	rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); - -	rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE, -				 PCIE_CLIENT_GENERAL_CONTROL); -  	return 0;  } @@ -218,6 +240,82 @@ static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {  	.init = rockchip_pcie_host_init,  }; +static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	enum pci_barno bar; + +	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) +		dw_pcie_ep_reset_bar(pci, bar); +}; + +static int rockchip_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, +				   unsigned int type, u16 interrupt_num) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + +	switch (type) { +	case PCI_IRQ_INTX: +		return dw_pcie_ep_raise_intx_irq(ep, func_no); +	case PCI_IRQ_MSI: +		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); +	case PCI_IRQ_MSIX: +		return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); +	default: +		dev_err(pci->dev, "UNKNOWN IRQ type\n"); +	} + +	return 0; +} + +static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = { +	.linkup_notifier = true, +	.msi_capable = true, +	.msix_capable = true, +	.align = SZ_64K, +	.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +}; + +/* + * BAR4 on rk3588 exposes the ATU Port Logic Structure to the host regardless of + * iATU settings for BAR4. This means that BAR4 cannot be used by an EPF driver, + * so mark it as RESERVED. (rockchip_pcie_ep_init() will disable all BARs by + * default.) If the host could write to BAR4, the iATU settings (for all other + * BARs) would be overwritten, resulting in (all other BARs) no longer working. + */ +static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = { +	.linkup_notifier = true, +	.msi_capable = true, +	.msix_capable = true, +	.align = SZ_64K, +	.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_4] = { .type = BAR_RESERVED, }, +	.bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +}; + +static const struct pci_epc_features * +rockchip_pcie_get_features(struct dw_pcie_ep *ep) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); + +	return rockchip->data->epc_features; +} + +static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = { +	.init = rockchip_pcie_ep_init, +	.raise_irq = rockchip_pcie_raise_irq, +	.get_features = rockchip_pcie_get_features, +}; +  static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)  {  	struct device *dev = rockchip->pci.dev; @@ -225,11 +323,15 @@ static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)  	ret = devm_clk_bulk_get_all(dev, &rockchip->clks);  	if (ret < 0) -		return ret; +		return dev_err_probe(dev, ret, "failed to get clocks\n");  	rockchip->clk_cnt = ret; -	return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks); +	ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks); +	if (ret) +		return dev_err_probe(dev, ret, "failed to enable clocks\n"); + +	return 0;  }  static int rockchip_pcie_resource_get(struct platform_device *pdev, @@ -237,12 +339,14 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,  {  	rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");  	if (IS_ERR(rockchip->apb_base)) -		return PTR_ERR(rockchip->apb_base); +		return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->apb_base), +				     "failed to map apb registers\n");  	rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", -						     GPIOD_OUT_HIGH); +						     GPIOD_OUT_LOW);  	if (IS_ERR(rockchip->rst_gpio)) -		return PTR_ERR(rockchip->rst_gpio); +		return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst_gpio), +				     "failed to get reset gpio\n");  	rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);  	if (IS_ERR(rockchip->rst)) @@ -282,15 +386,127 @@ static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)  static const struct dw_pcie_ops dw_pcie_ops = {  	.link_up = rockchip_pcie_link_up,  	.start_link = rockchip_pcie_start_link, +	.stop_link = rockchip_pcie_stop_link,  }; +static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg) +{ +	struct rockchip_pcie *rockchip = arg; +	struct dw_pcie *pci = &rockchip->pci; +	struct device *dev = pci->dev; +	u32 reg, val; + +	reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC); +	rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC); + +	dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg); +	dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip)); + +	if (reg & PCIE_LINK_REQ_RST_NOT_INT) { +		dev_dbg(dev, "hot reset or link-down reset\n"); +		dw_pcie_ep_linkdown(&pci->ep); +	} + +	if (reg & PCIE_RDLH_LINK_UP_CHGED) { +		val = rockchip_pcie_get_ltssm(rockchip); +		if ((val & PCIE_LINKUP) == PCIE_LINKUP) { +			dev_dbg(dev, "link up\n"); +			dw_pcie_ep_linkup(&pci->ep); +		} +	} + +	return IRQ_HANDLED; +} + +static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip) +{ +	struct dw_pcie_rp *pp; +	u32 val; + +	if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST)) +		return -ENODEV; + +	/* LTSSM enable control mode */ +	val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE); +	rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); + +	rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE, +				 PCIE_CLIENT_GENERAL_CONTROL); + +	pp = &rockchip->pci.pp; +	pp->ops = &rockchip_pcie_host_ops; + +	return dw_pcie_host_init(pp); +} + +static int rockchip_pcie_configure_ep(struct platform_device *pdev, +				      struct rockchip_pcie *rockchip) +{ +	struct device *dev = &pdev->dev; +	int irq, ret; +	u32 val; + +	if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_EP)) +		return -ENODEV; + +	irq = platform_get_irq_byname(pdev, "sys"); +	if (irq < 0) { +		dev_err(dev, "missing sys IRQ resource\n"); +		return irq; +	} + +	ret = devm_request_threaded_irq(dev, irq, NULL, +					rockchip_pcie_ep_sys_irq_thread, +					IRQF_ONESHOT, "pcie-sys", rockchip); +	if (ret) { +		dev_err(dev, "failed to request PCIe sys IRQ\n"); +		return ret; +	} + +	/* LTSSM enable control mode */ +	val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE); +	rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); + +	rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE, +				 PCIE_CLIENT_GENERAL_CONTROL); + +	rockchip->pci.ep.ops = &rockchip_pcie_ep_ops; +	rockchip->pci.ep.page_size = SZ_64K; + +	dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + +	ret = dw_pcie_ep_init(&rockchip->pci.ep); +	if (ret) { +		dev_err(dev, "failed to initialize endpoint\n"); +		return ret; +	} + +	ret = dw_pcie_ep_init_registers(&rockchip->pci.ep); +	if (ret) { +		dev_err(dev, "failed to initialize DWC endpoint registers\n"); +		dw_pcie_ep_deinit(&rockchip->pci.ep); +		return ret; +	} + +	pci_epc_init_notify(rockchip->pci.ep.epc); + +	/* unmask DLL up/down indicator and hot reset/link-down reset */ +	rockchip_pcie_writel_apb(rockchip, 0x60000, PCIE_CLIENT_INTR_MASK_MISC); + +	return ret; +} +  static int rockchip_pcie_probe(struct platform_device *pdev)  {  	struct device *dev = &pdev->dev;  	struct rockchip_pcie *rockchip; -	struct dw_pcie_rp *pp; +	const struct rockchip_pcie_of_data *data;  	int ret; +	data = of_device_get_match_data(dev); +	if (!data) +		return -EINVAL; +  	rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);  	if (!rockchip)  		return -ENOMEM; @@ -299,9 +515,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)  	rockchip->pci.dev = dev;  	rockchip->pci.ops = &dw_pcie_ops; - -	pp = &rockchip->pci.pp; -	pp->ops = &rockchip_pcie_host_ops; +	rockchip->data = data;  	ret = rockchip_pcie_resource_get(pdev, rockchip);  	if (ret) @@ -320,10 +534,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)  		rockchip->vpcie3v3 = NULL;  	} else {  		ret = regulator_enable(rockchip->vpcie3v3); -		if (ret) { -			dev_err(dev, "failed to enable vpcie3v3 regulator\n"); -			return ret; -		} +		if (ret) +			return dev_err_probe(dev, ret, +					     "failed to enable vpcie3v3 regulator\n");  	}  	ret = rockchip_pcie_phy_init(rockchip); @@ -338,10 +551,26 @@ static int rockchip_pcie_probe(struct platform_device *pdev)  	if (ret)  		goto deinit_phy; -	ret = dw_pcie_host_init(pp); -	if (!ret) -		return 0; +	switch (data->mode) { +	case DW_PCIE_RC_TYPE: +		ret = rockchip_pcie_configure_rc(rockchip); +		if (ret) +			goto deinit_clk; +		break; +	case DW_PCIE_EP_TYPE: +		ret = rockchip_pcie_configure_ep(pdev, rockchip); +		if (ret) +			goto deinit_clk; +		break; +	default: +		dev_err(dev, "INVALID device type %d\n", data->mode); +		ret = -EINVAL; +		goto deinit_clk; +	} + +	return 0; +deinit_clk:  	clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);  deinit_phy:  	rockchip_pcie_phy_deinit(rockchip); @@ -352,8 +581,33 @@ disable_regulator:  	return ret;  } +static const struct rockchip_pcie_of_data rockchip_pcie_rc_of_data_rk3568 = { +	.mode = DW_PCIE_RC_TYPE, +}; + +static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3568 = { +	.mode = DW_PCIE_EP_TYPE, +	.epc_features = &rockchip_pcie_epc_features_rk3568, +}; + +static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3588 = { +	.mode = DW_PCIE_EP_TYPE, +	.epc_features = &rockchip_pcie_epc_features_rk3588, +}; +  static const struct of_device_id rockchip_pcie_of_match[] = { -	{ .compatible = "rockchip,rk3568-pcie", }, +	{ +		.compatible = "rockchip,rk3568-pcie", +		.data = &rockchip_pcie_rc_of_data_rk3568, +	}, +	{ +		.compatible = "rockchip,rk3568-pcie-ep", +		.data = &rockchip_pcie_ep_of_data_rk3568, +	}, +	{ +		.compatible = "rockchip,rk3588-pcie-ep", +		.data = &rockchip_pcie_ep_of_data_rk3588, +	},  	{},  }; diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c index 98bbc83182b4..278205db60a2 100644 --- a/drivers/pci/controller/dwc/pcie-keembay.c +++ b/drivers/pci/controller/dwc/pcie-keembay.c @@ -442,7 +442,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)  			return ret;  		} -		dw_pcie_ep_init_notify(&pci->ep); +		pci_epc_init_notify(pci->ep.epc);  		break;  	default: diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index d5523f302102..0a29136491b8 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -12,12 +12,10 @@  #include <linux/compiler.h>  #include <linux/delay.h>  #include <linux/err.h> -#include <linux/gpio.h>  #include <linux/gpio/consumer.h>  #include <linux/interrupt.h>  #include <linux/mfd/syscon.h>  #include <linux/of.h> -#include <linux/of_gpio.h>  #include <linux/of_pci.h>  #include <linux/phy/phy.h>  #include <linux/pci.h> @@ -78,16 +76,16 @@ struct kirin_pcie {  	void		*phy_priv;	/* only for PCIE_KIRIN_INTERNAL_PHY */  	/* DWC PERST# */ -	int		gpio_id_dwc_perst; +	struct gpio_desc *id_dwc_perst_gpio;  	/* Per-slot PERST# */  	int		num_slots; -	int		gpio_id_reset[MAX_PCI_SLOTS]; +	struct gpio_desc *id_reset_gpio[MAX_PCI_SLOTS];  	const char	*reset_names[MAX_PCI_SLOTS];  	/* Per-slot clkreq */  	int		n_gpio_clkreq; -	int		gpio_id_clkreq[MAX_PCI_SLOTS]; +	struct gpio_desc *id_clkreq_gpio[MAX_PCI_SLOTS];  	const char	*clkreq_names[MAX_PCI_SLOTS];  }; @@ -381,15 +379,20 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,  	pcie->n_gpio_clkreq = ret;  	for (i = 0; i < pcie->n_gpio_clkreq; i++) { -		pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node, -						    "hisilicon,clken-gpios", i); -		if (pcie->gpio_id_clkreq[i] < 0) -			return pcie->gpio_id_clkreq[i]; +		pcie->id_clkreq_gpio[i] = devm_gpiod_get_index(dev, +							"hisilicon,clken", i, +							GPIOD_OUT_LOW); +		if (IS_ERR(pcie->id_clkreq_gpio[i])) +			return dev_err_probe(dev, PTR_ERR(pcie->id_clkreq_gpio[i]), +					     "unable to get a valid clken gpio\n");  		pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,  						       "pcie_clkreq_%d", i);  		if (!pcie->clkreq_names[i])  			return -ENOMEM; + +		gpiod_set_consumer_name(pcie->id_clkreq_gpio[i], +					pcie->clkreq_names[i]);  	}  	return 0; @@ -400,29 +403,33 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,  				 struct device_node *node)  {  	struct device *dev = &pdev->dev; -	struct device_node *parent, *child;  	int ret, slot, i; -	for_each_available_child_of_node(node, parent) { -		for_each_available_child_of_node(parent, child) { +	for_each_available_child_of_node_scoped(node, parent) { +		for_each_available_child_of_node_scoped(parent, child) {  			i = pcie->num_slots; -			pcie->gpio_id_reset[i] = of_get_named_gpio(child, -							"reset-gpios", 0); -			if (pcie->gpio_id_reset[i] < 0) -				continue; +			pcie->id_reset_gpio[i] = devm_fwnode_gpiod_get_index(dev, +							 of_fwnode_handle(child), +							 "reset", 0, GPIOD_OUT_LOW, +							 NULL); +			if (IS_ERR(pcie->id_reset_gpio[i])) { +				if (PTR_ERR(pcie->id_reset_gpio[i]) == -ENOENT) +					continue; +				return dev_err_probe(dev, PTR_ERR(pcie->id_reset_gpio[i]), +						     "unable to get a valid reset gpio\n"); +			}  			pcie->num_slots++;  			if (pcie->num_slots > MAX_PCI_SLOTS) {  				dev_err(dev, "Too many PCI slots!\n"); -				ret = -EINVAL; -				goto put_node; +				return -EINVAL;  			}  			ret = of_pci_get_devfn(child);  			if (ret < 0) {  				dev_err(dev, "failed to parse devfn: %d\n", ret); -				goto put_node; +				return ret;  			}  			slot = PCI_SLOT(ret); @@ -430,19 +437,15 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,  			pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,  							      "pcie_perst_%d",  							      slot); -			if (!pcie->reset_names[i]) { -				ret = -ENOMEM; -				goto put_node; -			} +			if (!pcie->reset_names[i]) +				return -ENOMEM; + +			gpiod_set_consumer_name(pcie->id_reset_gpio[i], +						pcie->reset_names[i]);  		}  	}  	return 0; - -put_node: -	of_node_put(child); -	of_node_put(parent); -	return ret;  }  static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, @@ -463,14 +466,11 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,  		return PTR_ERR(kirin_pcie->apb);  	/* pcie internal PERST# gpio */ -	kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node, -							  "reset-gpios", 0); -	if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) { -		return -EPROBE_DEFER; -	} else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) { -		dev_err(dev, "unable to get a valid gpio pin\n"); -		return -ENODEV; -	} +	kirin_pcie->id_dwc_perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); +	if (IS_ERR(kirin_pcie->id_dwc_perst_gpio)) +		return dev_err_probe(dev, PTR_ERR(kirin_pcie->id_dwc_perst_gpio), +				     "unable to get a valid gpio pin\n"); +	gpiod_set_consumer_name(kirin_pcie->id_dwc_perst_gpio, "pcie_perst_bridge");  	ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);  	if (ret) @@ -553,7 +553,7 @@ static int kirin_pcie_add_bus(struct pci_bus *bus)  	/* Send PERST# to each slot */  	for (i = 0; i < kirin_pcie->num_slots; i++) { -		ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1); +		ret = gpiod_direction_output_raw(kirin_pcie->id_reset_gpio[i], 1);  		if (ret) {  			dev_err(pci->dev, "PERST# %s error: %d\n",  				kirin_pcie->reset_names[i], ret); @@ -623,44 +623,6 @@ static int kirin_pcie_host_init(struct dw_pcie_rp *pp)  	return 0;  } -static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie, -				   struct device *dev) -{ -	int ret, i; - -	for (i = 0; i < kirin_pcie->num_slots; i++) { -		if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) { -			dev_err(dev, "unable to get a valid %s gpio\n", -				kirin_pcie->reset_names[i]); -			return -ENODEV; -		} - -		ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i], -					kirin_pcie->reset_names[i]); -		if (ret) -			return ret; -	} - -	for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) { -		if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) { -			dev_err(dev, "unable to get a valid %s gpio\n", -				kirin_pcie->clkreq_names[i]); -			return -ENODEV; -		} - -		ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i], -					kirin_pcie->clkreq_names[i]); -		if (ret) -			return ret; - -		ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0); -		if (ret) -			return ret; -	} - -	return 0; -} -  static const struct dw_pcie_ops kirin_dw_pcie_ops = {  	.read_dbi = kirin_pcie_read_dbi,  	.write_dbi = kirin_pcie_write_dbi, @@ -680,7 +642,7 @@ static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)  		return hi3660_pcie_phy_power_off(kirin_pcie);  	for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) -		gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1); +		gpiod_direction_output_raw(kirin_pcie->id_clkreq_gpio[i], 1);  	phy_power_off(kirin_pcie->phy);  	phy_exit(kirin_pcie->phy); @@ -707,10 +669,6 @@ static int kirin_pcie_power_on(struct platform_device *pdev,  		if (IS_ERR(kirin_pcie->phy))  			return PTR_ERR(kirin_pcie->phy); -		ret = kirin_pcie_gpio_request(kirin_pcie, dev); -		if (ret) -			return ret; -  		ret = phy_init(kirin_pcie->phy);  		if (ret)  			goto err; @@ -723,11 +681,9 @@ static int kirin_pcie_power_on(struct platform_device *pdev,  	/* perst assert Endpoint */  	usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); -	if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) { -		ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1); -		if (ret) -			goto err; -	} +	ret = gpiod_direction_output_raw(kirin_pcie->id_dwc_perst_gpio, 1); +	if (ret) +		goto err;  	usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 2fb8c15e7a91..236229f66c80 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -47,6 +47,7 @@  #define PARF_DBI_BASE_ADDR_HI			0x354  #define PARF_SLV_ADDR_SPACE_SIZE		0x358  #define PARF_SLV_ADDR_SPACE_SIZE_HI		0x35c +#define PARF_NO_SNOOP_OVERIDE			0x3d4  #define PARF_ATU_BASE_ADDR			0x634  #define PARF_ATU_BASE_ADDR_HI			0x638  #define PARF_SRIS_MODE				0x644 @@ -86,6 +87,10 @@  #define PARF_DEBUG_INT_CFG_BUS_MASTER_EN	BIT(2)  #define PARF_DEBUG_INT_RADM_PM_TURNOFF		BIT(3) +/* PARF_NO_SNOOP_OVERIDE register fields */ +#define WR_NO_SNOOP_OVERIDE_EN                 BIT(1) +#define RD_NO_SNOOP_OVERIDE_EN                 BIT(3) +  /* PARF_DEVICE_TYPE register fields */  #define PARF_DEVICE_TYPE_EP			0x0 @@ -150,6 +155,16 @@ enum qcom_pcie_ep_link_status {  };  /** + * struct qcom_pcie_ep_cfg - Per SoC config struct + * @hdma_support: HDMA support on this SoC + * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping + */ +struct qcom_pcie_ep_cfg { +	bool hdma_support; +	bool override_no_snoop; +}; + +/**   * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller   * @pci: Designware PCIe controller struct   * @parf: Qualcomm PCIe specific PARF register base @@ -167,6 +182,7 @@ enum qcom_pcie_ep_link_status {   * @num_clks: PCIe clocks count   * @perst_en: Flag for PERST enable   * @perst_sep_en: Flag for PERST separation enable + * @cfg: PCIe EP config struct   * @link_status: PCIe Link status   * @global_irq: Qualcomm PCIe specific Global IRQ   * @perst_irq: PERST# IRQ @@ -194,6 +210,7 @@ struct qcom_pcie_ep {  	u32 perst_en;  	u32 perst_sep_en; +	const struct qcom_pcie_ep_cfg *cfg;  	enum qcom_pcie_ep_link_status link_status;  	int global_irq;  	int perst_irq; @@ -482,13 +499,17 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)  	val &= ~PARF_MSTR_AXI_CLK_EN;  	writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL); -	dw_pcie_ep_init_notify(&pcie_ep->pci.ep); +	pci_epc_init_notify(pcie_ep->pci.ep.epc);  	/* Enable LTSSM */  	val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);  	val |= BIT(8);  	writel_relaxed(val, pcie_ep->parf + PARF_LTSSM); +	if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop) +		writel_relaxed(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN, +				pcie_ep->parf + PARF_NO_SNOOP_OVERIDE); +  	return 0;  err_disable_resources: @@ -500,13 +521,8 @@ err_disable_resources:  static void qcom_pcie_perst_assert(struct dw_pcie *pci)  {  	struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); -	struct device *dev = pci->dev; - -	if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) { -		dev_dbg(dev, "Link is already disabled\n"); -		return; -	} +	pci_epc_deinit_notify(pci->ep.epc);  	dw_pcie_ep_cleanup(&pci->ep);  	qcom_pcie_disable_resources(pcie_ep);  	pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED; @@ -640,12 +656,12 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)  	if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {  		dev_dbg(dev, "Received Linkdown event\n");  		pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN; -		pci_epc_linkdown(pci->ep.epc); +		dw_pcie_ep_linkdown(&pci->ep);  	} else if (FIELD_GET(PARF_INT_ALL_BME, status)) { -		dev_dbg(dev, "Received BME event. Link is enabled!\n"); +		dev_dbg(dev, "Received Bus Master Enable event\n");  		pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;  		qcom_pcie_ep_icc_update(pcie_ep); -		pci_epc_bme_notify(pci->ep.epc); +		pci_epc_bus_master_enable_notify(pci->ep.epc);  	} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {  		dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");  		val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); @@ -816,6 +832,14 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)  	pcie_ep->pci.ops = &pci_ops;  	pcie_ep->pci.ep.ops = &pci_ep_ops;  	pcie_ep->pci.edma.nr_irqs = 1; + +	pcie_ep->cfg = of_device_get_match_data(dev); +	if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) { +		pcie_ep->pci.edma.ll_wr_cnt = 8; +		pcie_ep->pci.edma.ll_rd_cnt = 8; +		pcie_ep->pci.edma.mf = EDMA_MF_HDMA_NATIVE; +	} +  	platform_set_drvdata(pdev, pcie_ep);  	ret = qcom_pcie_ep_get_resources(pdev, pcie_ep); @@ -874,7 +898,13 @@ static void qcom_pcie_ep_remove(struct platform_device *pdev)  	qcom_pcie_disable_resources(pcie_ep);  } +static const struct qcom_pcie_ep_cfg cfg_1_34_0 = { +	.hdma_support = true, +	.override_no_snoop = true, +}; +  static const struct of_device_id qcom_pcie_ep_match[] = { +	{ .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},  	{ .compatible = "qcom,sdx55-pcie-ep", },  	{ .compatible = "qcom,sm8450-pcie-ep", },  	{ } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 14772edcf0d3..0180edf3310e 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -18,10 +18,11 @@  #include <linux/io.h>  #include <linux/iopoll.h>  #include <linux/kernel.h> +#include <linux/limits.h>  #include <linux/init.h>  #include <linux/of.h> -#include <linux/of_gpio.h>  #include <linux/pci.h> +#include <linux/pm_opp.h>  #include <linux/pm_runtime.h>  #include <linux/platform_device.h>  #include <linux/phy/pcie.h> @@ -30,6 +31,7 @@  #include <linux/reset.h>  #include <linux/slab.h>  #include <linux/types.h> +#include <linux/units.h>  #include "../../pci.h"  #include "pcie-designware.h" @@ -51,6 +53,7 @@  #define PARF_SID_OFFSET				0x234  #define PARF_BDF_TRANSLATE_CFG			0x24c  #define PARF_SLV_ADDR_SPACE_SIZE		0x358 +#define PARF_NO_SNOOP_OVERIDE			0x3d4  #define PARF_DEVICE_TYPE			0x1000  #define PARF_BDF_TO_SID_TABLE_N			0x2000  #define PARF_BDF_TO_SID_CFG			0x2c00 @@ -118,6 +121,10 @@  /* PARF_LTSSM register fields */  #define LTSSM_EN				BIT(8) +/* PARF_NO_SNOOP_OVERIDE register fields */ +#define WR_NO_SNOOP_OVERIDE_EN			BIT(1) +#define RD_NO_SNOOP_OVERIDE_EN			BIT(3) +  /* PARF_DEVICE_TYPE register fields */  #define DEVICE_TYPE_RC				0x4 @@ -154,58 +161,56 @@  #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \  		Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed])) -#define QCOM_PCIE_1_0_0_MAX_CLOCKS		4  struct qcom_pcie_resources_1_0_0 { -	struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS]; +	struct clk_bulk_data *clks; +	int num_clks;  	struct reset_control *core;  	struct regulator *vdda;  }; -#define QCOM_PCIE_2_1_0_MAX_CLOCKS		5  #define QCOM_PCIE_2_1_0_MAX_RESETS		6  #define QCOM_PCIE_2_1_0_MAX_SUPPLY		3  struct qcom_pcie_resources_2_1_0 { -	struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; +	struct clk_bulk_data *clks; +	int num_clks;  	struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];  	int num_resets;  	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];  }; -#define QCOM_PCIE_2_3_2_MAX_CLOCKS		4  #define QCOM_PCIE_2_3_2_MAX_SUPPLY		2  struct qcom_pcie_resources_2_3_2 { -	struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS]; +	struct clk_bulk_data *clks; +	int num_clks;  	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];  }; -#define QCOM_PCIE_2_3_3_MAX_CLOCKS		5  #define QCOM_PCIE_2_3_3_MAX_RESETS		7  struct qcom_pcie_resources_2_3_3 { -	struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS]; +	struct clk_bulk_data *clks; +	int num_clks;  	struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];  }; -#define QCOM_PCIE_2_4_0_MAX_CLOCKS		4  #define QCOM_PCIE_2_4_0_MAX_RESETS		12  struct qcom_pcie_resources_2_4_0 { -	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; +	struct clk_bulk_data *clks;  	int num_clks;  	struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];  	int num_resets;  }; -#define QCOM_PCIE_2_7_0_MAX_CLOCKS		15  #define QCOM_PCIE_2_7_0_MAX_SUPPLIES		2  struct qcom_pcie_resources_2_7_0 { -	struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS]; +	struct clk_bulk_data *clks;  	int num_clks;  	struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];  	struct reset_control *rst;  }; -#define QCOM_PCIE_2_9_0_MAX_CLOCKS		5  struct qcom_pcie_resources_2_9_0 { -	struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS]; +	struct clk_bulk_data *clks; +	int num_clks;  	struct reset_control *rst;  }; @@ -231,8 +236,15 @@ struct qcom_pcie_ops {  	int (*config_sid)(struct qcom_pcie *pcie);  }; + /** +  * struct qcom_pcie_cfg - Per SoC config struct +  * @ops: qcom PCIe ops structure +  * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache +  * snooping +  */  struct qcom_pcie_cfg {  	const struct qcom_pcie_ops *ops; +	bool override_no_snoop;  	bool no_l0s;  }; @@ -245,6 +257,7 @@ struct qcom_pcie {  	struct phy *phy;  	struct gpio_desc *reset;  	struct icc_path *icc_mem; +	struct icc_path *icc_cpu;  	const struct qcom_pcie_cfg *cfg;  	struct dentry *debugfs;  	bool suspended; @@ -337,21 +350,11 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)  	if (ret)  		return ret; -	res->clks[0].id = "iface"; -	res->clks[1].id = "core"; -	res->clks[2].id = "phy"; -	res->clks[3].id = "aux"; -	res->clks[4].id = "ref"; - -	/* iface, core, phy are required */ -	ret = devm_clk_bulk_get(dev, 3, res->clks); -	if (ret < 0) -		return ret; - -	/* aux, ref are optional */ -	ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); -	if (ret < 0) -		return ret; +	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); +	if (res->num_clks < 0) { +		dev_err(dev, "Failed to get clocks\n"); +		return res->num_clks; +	}  	res->resets[0].id = "pci";  	res->resets[1].id = "axi"; @@ -373,7 +376,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; -	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); +	clk_bulk_disable_unprepare(res->num_clks, res->clks);  	reset_control_bulk_assert(res->num_resets, res->resets);  	writel(1, pcie->parf + PARF_PHY_CTRL); @@ -425,7 +428,7 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)  	val &= ~PHY_TEST_PWR_DOWN;  	writel(val, pcie->parf + PARF_PHY_CTRL); -	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); +	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);  	if (ret)  		return ret; @@ -476,20 +479,16 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; -	int ret;  	res->vdda = devm_regulator_get(dev, "vdda");  	if (IS_ERR(res->vdda))  		return PTR_ERR(res->vdda); -	res->clks[0].id = "iface"; -	res->clks[1].id = "aux"; -	res->clks[2].id = "master_bus"; -	res->clks[3].id = "slave_bus"; - -	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); -	if (ret < 0) -		return ret; +	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); +	if (res->num_clks < 0) { +		dev_err(dev, "Failed to get clocks\n"); +		return res->num_clks; +	}  	res->core = devm_reset_control_get_exclusive(dev, "core");  	return PTR_ERR_OR_ZERO(res->core); @@ -500,7 +499,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;  	reset_control_assert(res->core); -	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); +	clk_bulk_disable_unprepare(res->num_clks, res->clks);  	regulator_disable(res->vdda);  } @@ -517,7 +516,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)  		return ret;  	} -	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); +	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);  	if (ret) {  		dev_err(dev, "cannot prepare/enable clocks\n");  		goto err_assert_reset; @@ -532,7 +531,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)  	return 0;  err_disable_clks: -	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); +	clk_bulk_disable_unprepare(res->num_clks, res->clks);  err_assert_reset:  	reset_control_assert(res->core); @@ -580,14 +579,11 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)  	if (ret)  		return ret; -	res->clks[0].id = "aux"; -	res->clks[1].id = "cfg"; -	res->clks[2].id = "bus_master"; -	res->clks[3].id = "bus_slave"; - -	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); -	if (ret < 0) -		return ret; +	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); +	if (res->num_clks < 0) { +		dev_err(dev, "Failed to get clocks\n"); +		return res->num_clks; +	}  	return 0;  } @@ -596,7 +592,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; -	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); +	clk_bulk_disable_unprepare(res->num_clks, res->clks);  	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);  } @@ -613,7 +609,7 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)  		return ret;  	} -	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); +	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);  	if (ret) {  		dev_err(dev, "cannot prepare/enable clocks\n");  		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); @@ -661,17 +657,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)  	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");  	int ret; -	res->clks[0].id = "aux"; -	res->clks[1].id = "master_bus"; -	res->clks[2].id = "slave_bus"; -	res->clks[3].id = "iface"; - -	/* qcom,pcie-ipq4019 is defined without "iface" */ -	res->num_clks = is_ipq ? 3 : 4; - -	ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); -	if (ret < 0) -		return ret; +	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); +	if (res->num_clks < 0) { +		dev_err(dev, "Failed to get clocks\n"); +		return res->num_clks; +	}  	res->resets[0].id = "axi_m";  	res->resets[1].id = "axi_s"; @@ -742,15 +732,11 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)  	struct device *dev = pci->dev;  	int ret; -	res->clks[0].id = "iface"; -	res->clks[1].id = "axi_m"; -	res->clks[2].id = "axi_s"; -	res->clks[3].id = "ahb"; -	res->clks[4].id = "aux"; - -	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); -	if (ret < 0) -		return ret; +	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); +	if (res->num_clks < 0) { +		dev_err(dev, "Failed to get clocks\n"); +		return res->num_clks; +	}  	res->rst[0].id = "axi_m";  	res->rst[1].id = "axi_s"; @@ -771,7 +757,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; -	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); +	clk_bulk_disable_unprepare(res->num_clks, res->clks);  }  static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) @@ -801,7 +787,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)  	 */  	usleep_range(2000, 2500); -	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); +	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);  	if (ret) {  		dev_err(dev, "cannot prepare/enable clocks\n");  		goto err_assert_resets; @@ -862,8 +848,6 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; -	unsigned int num_clks, num_opt_clks; -	unsigned int idx;  	int ret;  	res->rst = devm_reset_control_array_get_exclusive(dev); @@ -877,36 +861,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)  	if (ret)  		return ret; -	idx = 0; -	res->clks[idx++].id = "aux"; -	res->clks[idx++].id = "cfg"; -	res->clks[idx++].id = "bus_master"; -	res->clks[idx++].id = "bus_slave"; -	res->clks[idx++].id = "slave_q2a"; - -	num_clks = idx; - -	ret = devm_clk_bulk_get(dev, num_clks, res->clks); -	if (ret < 0) -		return ret; - -	res->clks[idx++].id = "tbu"; -	res->clks[idx++].id = "ddrss_sf_tbu"; -	res->clks[idx++].id = "aggre0"; -	res->clks[idx++].id = "aggre1"; -	res->clks[idx++].id = "noc_aggr"; -	res->clks[idx++].id = "noc_aggr_4"; -	res->clks[idx++].id = "noc_aggr_south_sf"; -	res->clks[idx++].id = "cnoc_qx"; -	res->clks[idx++].id = "sleep"; -	res->clks[idx++].id = "cnoc_sf_axi"; - -	num_opt_clks = idx - num_clks; -	res->num_clks = idx; - -	ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks); -	if (ret < 0) -		return ret; +	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); +	if (res->num_clks < 0) { +		dev_err(dev, "Failed to get clocks\n"); +		return res->num_clks; +	}  	return 0;  } @@ -986,6 +945,12 @@ err_disable_regulators:  static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)  { +	const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; + +	if (pcie_cfg->override_no_snoop) +		writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN, +				pcie->parf + PARF_NO_SNOOP_OVERIDE); +  	qcom_pcie_clear_aspm_l0s(pcie->pci);  	qcom_pcie_clear_hpc(pcie->pci); @@ -1101,17 +1066,12 @@ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; -	int ret; -	res->clks[0].id = "iface"; -	res->clks[1].id = "axi_m"; -	res->clks[2].id = "axi_s"; -	res->clks[3].id = "axi_bridge"; -	res->clks[4].id = "rchng"; - -	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); -	if (ret < 0) -		return ret; +	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); +	if (res->num_clks < 0) { +		dev_err(dev, "Failed to get clocks\n"); +		return res->num_clks; +	}  	res->rst = devm_reset_control_array_get_exclusive(dev);  	if (IS_ERR(res->rst)) @@ -1124,7 +1084,7 @@ static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; -	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); +	clk_bulk_disable_unprepare(res->num_clks, res->clks);  }  static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) @@ -1153,7 +1113,7 @@ static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)  	usleep_range(2000, 2500); -	return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); +	return clk_bulk_prepare_enable(res->num_clks, res->clks);  }  static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) @@ -1366,6 +1326,11 @@ static const struct qcom_pcie_cfg cfg_1_9_0 = {  	.ops = &ops_1_9_0,  }; +static const struct qcom_pcie_cfg cfg_1_34_0 = { +	.ops = &ops_1_9_0, +	.override_no_snoop = true, +}; +  static const struct qcom_pcie_cfg cfg_2_1_0 = {  	.ops = &ops_2_1_0,  }; @@ -1409,6 +1374,9 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)  	if (IS_ERR(pcie->icc_mem))  		return PTR_ERR(pcie->icc_mem); +	pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie"); +	if (IS_ERR(pcie->icc_cpu)) +		return PTR_ERR(pcie->icc_cpu);  	/*  	 * Some Qualcomm platforms require interconnect bandwidth constraints  	 * to be set before enabling interconnect clocks. @@ -1418,23 +1386,35 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)  	 */  	ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));  	if (ret) { -		dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", +		dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", +			ret); +		return ret; +	} + +	/* +	 * Since the CPU-PCIe path is only used for activities like register +	 * access of the host controller and endpoint Config/BAR space access, +	 * HW team has recommended to use a minimal bandwidth of 1KBps just to +	 * keep the path active. +	 */ +	ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1)); +	if (ret) { +		dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",  			ret); +		icc_set_bw(pcie->icc_mem, 0, 0);  		return ret;  	}  	return 0;  } -static void qcom_pcie_icc_update(struct qcom_pcie *pcie) +static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)  { +	u32 offset, status, width, speed;  	struct dw_pcie *pci = pcie->pci; -	u32 offset, status; -	int speed, width; -	int ret; - -	if (!pcie->icc_mem) -		return; +	unsigned long freq_kbps; +	struct dev_pm_opp *opp; +	int ret, freq_mbps;  	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);  	status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); @@ -1446,10 +1426,28 @@ static void qcom_pcie_icc_update(struct qcom_pcie *pcie)  	speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);  	width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); -	ret = icc_set_bw(pcie->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed)); -	if (ret) { -		dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", -			ret); +	if (pcie->icc_mem) { +		ret = icc_set_bw(pcie->icc_mem, 0, +				 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed)); +		if (ret) { +			dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", +				ret); +		} +	} else { +		freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]); +		if (freq_mbps < 0) +			return; + +		freq_kbps = freq_mbps * KILO; +		opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, +						 true); +		if (!IS_ERR(opp)) { +			ret = dev_pm_opp_set_opp(pci->dev, opp); +			if (ret) +				dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n", +					freq_kbps * width, ret); +			dev_pm_opp_put(opp); +		}  	}  } @@ -1493,7 +1491,9 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)  static int qcom_pcie_probe(struct platform_device *pdev)  {  	const struct qcom_pcie_cfg *pcie_cfg; +	unsigned long max_freq = ULONG_MAX;  	struct device *dev = &pdev->dev; +	struct dev_pm_opp *opp;  	struct qcom_pcie *pcie;  	struct dw_pcie_rp *pp;  	struct resource *res; @@ -1561,9 +1561,43 @@ static int qcom_pcie_probe(struct platform_device *pdev)  		goto err_pm_runtime_put;  	} -	ret = qcom_pcie_icc_init(pcie); -	if (ret) +	/* OPP table is optional */ +	ret = devm_pm_opp_of_add_table(dev); +	if (ret && ret != -ENODEV) { +		dev_err_probe(dev, ret, "Failed to add OPP table\n");  		goto err_pm_runtime_put; +	} + +	/* +	 * Before the PCIe link is initialized, vote for highest OPP in the OPP +	 * table, so that we are voting for maximum voltage corner for the +	 * link to come up in maximum supported speed. At the end of the +	 * probe(), OPP will be updated using qcom_pcie_icc_opp_update(). +	 */ +	if (!ret) { +		opp = dev_pm_opp_find_freq_floor(dev, &max_freq); +		if (IS_ERR(opp)) { +			ret = PTR_ERR(opp); +			dev_err_probe(pci->dev, ret, +				      "Unable to find max freq OPP\n"); +			goto err_pm_runtime_put; +		} else { +			ret = dev_pm_opp_set_opp(dev, opp); +		} + +		dev_pm_opp_put(opp); +		if (ret) { +			dev_err_probe(pci->dev, ret, +				      "Failed to set OPP for freq %lu\n", +				      max_freq); +			goto err_pm_runtime_put; +		} +	} else { +		/* Skip ICC init if OPP is supported as it is handled by OPP */ +		ret = qcom_pcie_icc_init(pcie); +		if (ret) +			goto err_pm_runtime_put; +	}  	ret = pcie->cfg->ops->get_resources(pcie);  	if (ret) @@ -1583,7 +1617,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)  		goto err_phy_exit;  	} -	qcom_pcie_icc_update(pcie); +	qcom_pcie_icc_opp_update(pcie);  	if (pcie->mhi)  		qcom_pcie_init_debugfs(pcie); @@ -1602,16 +1636,20 @@ err_pm_runtime_put:  static int qcom_pcie_suspend_noirq(struct device *dev)  {  	struct qcom_pcie *pcie = dev_get_drvdata(dev); -	int ret; +	int ret = 0;  	/*  	 * Set minimum bandwidth required to keep data path functional during  	 * suspend.  	 */ -	ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); -	if (ret) { -		dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret); -		return ret; +	if (pcie->icc_mem) { +		ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); +		if (ret) { +			dev_err(dev, +				"Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", +				ret); +			return ret; +		}  	}  	/* @@ -1634,7 +1672,21 @@ static int qcom_pcie_suspend_noirq(struct device *dev)  		pcie->suspended = true;  	} -	return 0; +	/* +	 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM. +	 * Because on some platforms, DBI access can happen very late during the +	 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC +	 * error. +	 */ +	if (pm_suspend_target_state != PM_SUSPEND_MEM) { +		ret = icc_disable(pcie->icc_cpu); +		if (ret) +			dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret); + +		if (!pcie->icc_mem) +			dev_pm_opp_set_opp(pcie->pci->dev, NULL); +	} +	return ret;  }  static int qcom_pcie_resume_noirq(struct device *dev) @@ -1642,6 +1694,14 @@ static int qcom_pcie_resume_noirq(struct device *dev)  	struct qcom_pcie *pcie = dev_get_drvdata(dev);  	int ret; +	if (pm_suspend_target_state != PM_SUSPEND_MEM) { +		ret = icc_enable(pcie->icc_cpu); +		if (ret) { +			dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret); +			return ret; +		} +	} +  	if (pcie->suspended) {  		ret = qcom_pcie_host_init(&pcie->pci->pp);  		if (ret) @@ -1650,7 +1710,7 @@ static int qcom_pcie_resume_noirq(struct device *dev)  		pcie->suspended = false;  	} -	qcom_pcie_icc_update(pcie); +	qcom_pcie_icc_opp_update(pcie);  	return 0;  } @@ -1667,7 +1727,7 @@ static const struct of_device_id qcom_pcie_match[] = {  	{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },  	{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },  	{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, -	{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0}, +	{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},  	{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },  	{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },  	{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp }, diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index cfeccc2f9ee1..f0f3ebd1a033 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -2,11 +2,17 @@  /*   * PCIe controller driver for Renesas R-Car Gen4 Series SoCs   * Copyright (C) 2022-2023 Renesas Electronics Corporation + * + * The r8a779g0 (R-Car V4H) controller requires a specific firmware to be + * provided, to initialize the PHY. Otherwise, the PCIe controller will not + * work.   */  #include <linux/delay.h> +#include <linux/firmware.h>  #include <linux/interrupt.h>  #include <linux/io.h> +#include <linux/iopoll.h>  #include <linux/module.h>  #include <linux/of.h>  #include <linux/pci.h> @@ -20,9 +26,10 @@  /* Renesas-specific */  /* PCIe Mode Setting Register 0 */  #define PCIEMSR0		0x0000 -#define BIFUR_MOD_SET_ON	BIT(0) +#define APP_SRIS_MODE		BIT(6)  #define DEVICE_TYPE_EP		0  #define DEVICE_TYPE_RC		BIT(4) +#define BIFUR_MOD_SET_ON	BIT(0)  /* PCIe Interrupt Status 0 */  #define PCIEINTSTS0		0x0084 @@ -37,47 +44,49 @@  #define PCIEDMAINTSTSEN		0x0314  #define PCIEDMAINTSTSEN_INIT	GENMASK(15, 0) +/* Port Logic Registers 89 */ +#define PRTLGC89		0x0b70 + +/* Port Logic Registers 90 */ +#define PRTLGC90		0x0b74 +  /* PCIe Reset Control Register 1 */  #define PCIERSTCTRL1		0x0014  #define APP_HOLD_PHY_RST	BIT(16)  #define APP_LTSSM_ENABLE	BIT(0) +/* PCIe Power Management Control */ +#define PCIEPWRMNGCTRL		0x0070 +#define APP_CLK_REQ_N		BIT(11) +#define APP_CLK_PM_EN		BIT(10) +  #define RCAR_NUM_SPEED_CHANGE_RETRIES	10  #define RCAR_MAX_LINK_SPEED		4  #define RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET	0x1000  #define RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET	0x800 +#define RCAR_GEN4_PCIE_FIRMWARE_NAME		"rcar_gen4_pcie.bin" +#define RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR	0xc000 +MODULE_FIRMWARE(RCAR_GEN4_PCIE_FIRMWARE_NAME); + +struct rcar_gen4_pcie; +struct rcar_gen4_pcie_drvdata { +	void (*additional_common_init)(struct rcar_gen4_pcie *rcar); +	int (*ltssm_control)(struct rcar_gen4_pcie *rcar, bool enable); +	enum dw_pcie_device_mode mode; +}; +  struct rcar_gen4_pcie {  	struct dw_pcie dw;  	void __iomem *base; +	void __iomem *phy_base;  	struct platform_device *pdev; -	enum dw_pcie_device_mode mode; +	const struct rcar_gen4_pcie_drvdata *drvdata;  };  #define to_rcar_gen4_pcie(_dw)	container_of(_dw, struct rcar_gen4_pcie, dw)  /* Common */ -static void rcar_gen4_pcie_ltssm_enable(struct rcar_gen4_pcie *rcar, -					bool enable) -{ -	u32 val; - -	val = readl(rcar->base + PCIERSTCTRL1); -	if (enable) { -		val |= APP_LTSSM_ENABLE; -		val &= ~APP_HOLD_PHY_RST; -	} else { -		/* -		 * Since the datasheet of R-Car doesn't mention how to assert -		 * the APP_HOLD_PHY_RST, don't assert it again. Otherwise, -		 * hang-up issue happened in the dw_edma_core_off() when -		 * the controller didn't detect a PCI device. -		 */ -		val &= ~APP_LTSSM_ENABLE; -	} -	writel(val, rcar->base + PCIERSTCTRL1); -} -  static int rcar_gen4_pcie_link_up(struct dw_pcie *dw)  {  	struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw); @@ -123,9 +132,13 @@ static int rcar_gen4_pcie_speed_change(struct dw_pcie *dw)  static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)  {  	struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw); -	int i, changes; +	int i, changes, ret; -	rcar_gen4_pcie_ltssm_enable(rcar, true); +	if (rcar->drvdata->ltssm_control) { +		ret = rcar->drvdata->ltssm_control(rcar, true); +		if (ret) +			return ret; +	}  	/*  	 * Require direct speed change with retrying here if the link_gen is @@ -137,7 +150,7 @@ static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)  	 * Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.  	 * So, this needs remaining times for up to PCIe Gen4 if RC mode.  	 */ -	if (changes && rcar->mode == DW_PCIE_RC_TYPE) +	if (changes && rcar->drvdata->mode == DW_PCIE_RC_TYPE)  		changes--;  	for (i = 0; i < changes; i++) { @@ -153,7 +166,8 @@ static void rcar_gen4_pcie_stop_link(struct dw_pcie *dw)  {  	struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw); -	rcar_gen4_pcie_ltssm_enable(rcar, false); +	if (rcar->drvdata->ltssm_control) +		rcar->drvdata->ltssm_control(rcar, false);  }  static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar) @@ -172,9 +186,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)  		reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);  	val = readl(rcar->base + PCIEMSR0); -	if (rcar->mode == DW_PCIE_RC_TYPE) { +	if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {  		val |= DEVICE_TYPE_RC; -	} else if (rcar->mode == DW_PCIE_EP_TYPE) { +	} else if (rcar->drvdata->mode == DW_PCIE_EP_TYPE) {  		val |= DEVICE_TYPE_EP;  	} else {  		ret = -EINVAL; @@ -190,6 +204,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)  	if (ret)  		goto err_unprepare; +	if (rcar->drvdata->additional_common_init) +		rcar->drvdata->additional_common_init(rcar); +  	return 0;  err_unprepare: @@ -231,6 +248,10 @@ static void rcar_gen4_pcie_unprepare(struct rcar_gen4_pcie *rcar)  static int rcar_gen4_pcie_get_resources(struct rcar_gen4_pcie *rcar)  { +	rcar->phy_base = devm_platform_ioremap_resource_byname(rcar->pdev, "phy"); +	if (IS_ERR(rcar->phy_base)) +		return PTR_ERR(rcar->phy_base); +  	/* Renesas-specific registers */  	rcar->base = devm_platform_ioremap_resource_byname(rcar->pdev, "app"); @@ -255,7 +276,7 @@ static struct rcar_gen4_pcie *rcar_gen4_pcie_alloc(struct platform_device *pdev)  	rcar->dw.ops = &dw_pcie_ops;  	rcar->dw.dev = dev;  	rcar->pdev = pdev; -	dw_pcie_cap_set(&rcar->dw, EDMA_UNROLL); +	rcar->dw.edma.mf = EDMA_MF_EDMA_UNROLL;  	dw_pcie_cap_set(&rcar->dw, REQ_RES);  	platform_set_drvdata(pdev, rcar); @@ -437,7 +458,7 @@ static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)  		rcar_gen4_pcie_ep_deinit(rcar);  	} -	dw_pcie_ep_init_notify(ep); +	pci_epc_init_notify(ep->epc);  	return ret;  } @@ -451,9 +472,11 @@ static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)  /* Common */  static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)  { -	rcar->mode = (uintptr_t)of_device_get_match_data(&rcar->pdev->dev); +	rcar->drvdata = of_device_get_match_data(&rcar->pdev->dev); +	if (!rcar->drvdata) +		return -EINVAL; -	switch (rcar->mode) { +	switch (rcar->drvdata->mode) {  	case DW_PCIE_RC_TYPE:  		return rcar_gen4_add_dw_pcie_rp(rcar);  	case DW_PCIE_EP_TYPE: @@ -494,7 +517,7 @@ err_unprepare:  static void rcar_gen4_remove_dw_pcie(struct rcar_gen4_pcie *rcar)  { -	switch (rcar->mode) { +	switch (rcar->drvdata->mode) {  	case DW_PCIE_RC_TYPE:  		rcar_gen4_remove_dw_pcie_rp(rcar);  		break; @@ -514,14 +537,227 @@ static void rcar_gen4_pcie_remove(struct platform_device *pdev)  	rcar_gen4_pcie_unprepare(rcar);  } +static int r8a779f0_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable) +{ +	u32 val; + +	val = readl(rcar->base + PCIERSTCTRL1); +	if (enable) { +		val |= APP_LTSSM_ENABLE; +		val &= ~APP_HOLD_PHY_RST; +	} else { +		/* +		 * Since the datasheet of R-Car doesn't mention how to assert +		 * the APP_HOLD_PHY_RST, don't assert it again. Otherwise, +		 * hang-up issue happened in the dw_edma_core_off() when +		 * the controller didn't detect a PCI device. +		 */ +		val &= ~APP_LTSSM_ENABLE; +	} +	writel(val, rcar->base + PCIERSTCTRL1); + +	return 0; +} + +static void rcar_gen4_pcie_additional_common_init(struct rcar_gen4_pcie *rcar) +{ +	struct dw_pcie *dw = &rcar->dw; +	u32 val; + +	val = dw_pcie_readl_dbi(dw, PCIE_PORT_LANE_SKEW); +	val &= ~PORT_LANE_SKEW_INSERT_MASK; +	if (dw->num_lanes < 4) +		val |= BIT(6); +	dw_pcie_writel_dbi(dw, PCIE_PORT_LANE_SKEW, val); + +	val = readl(rcar->base + PCIEPWRMNGCTRL); +	val |= APP_CLK_REQ_N | APP_CLK_PM_EN; +	writel(val, rcar->base + PCIEPWRMNGCTRL); +} + +static void rcar_gen4_pcie_phy_reg_update_bits(struct rcar_gen4_pcie *rcar, +					       u32 offset, u32 mask, u32 val) +{ +	u32 tmp; + +	tmp = readl(rcar->phy_base + offset); +	tmp &= ~mask; +	tmp |= val; +	writel(tmp, rcar->phy_base + offset); +} + +/* + * SoC datasheet suggests checking port logic register bits during firmware + * write. If read returns non-zero value, then this function returns -EAGAIN + * indicating that the write needs to be done again. If read returns zero, + * then return 0 to indicate success. + */ +static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar, +				       u32 offset, u32 mask) +{ +	struct dw_pcie *dw = &rcar->dw; + +	if (dw_pcie_readl_dbi(dw, offset) & mask) +		return -EAGAIN; + +	return 0; +} + +static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar) +{ +	/* The check_addr values are magical numbers in the datasheet */ +	const u32 check_addr[] = { 0x00101018, 0x00101118, 0x00101021, 0x00101121}; +	struct dw_pcie *dw = &rcar->dw; +	const struct firmware *fw; +	unsigned int i, timeout; +	u32 data; +	int ret; + +	ret = request_firmware(&fw, RCAR_GEN4_PCIE_FIRMWARE_NAME, dw->dev); +	if (ret) { +		dev_err(dw->dev, "Failed to load firmware (%s): %d\n", +			RCAR_GEN4_PCIE_FIRMWARE_NAME, ret); +		return ret; +	} + +	for (i = 0; i < (fw->size / 2); i++) { +		data = fw->data[(i * 2) + 1] << 8 | fw->data[i * 2]; +		timeout = 100; +		do { +			dw_pcie_writel_dbi(dw, PRTLGC89, RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR + i); +			dw_pcie_writel_dbi(dw, PRTLGC90, data); +			if (!rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30))) +				break; +			if (!(--timeout)) { +				ret = -ETIMEDOUT; +				goto exit; +			} +			usleep_range(100, 200); +		} while (1); +	} + +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(17), BIT(17)); + +	for (i = 0; i < ARRAY_SIZE(check_addr); i++) { +		timeout = 100; +		do { +			dw_pcie_writel_dbi(dw, PRTLGC89, check_addr[i]); +			ret = rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30)); +			ret |= rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC90, BIT(0)); +			if (!ret) +				break; +			if (!(--timeout)) { +				ret = -ETIMEDOUT; +				goto exit; +			} +			usleep_range(100, 200); +		} while (1); +	} + +exit: +	release_firmware(fw); + +	return ret; +} + +static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable) +{ +	struct dw_pcie *dw = &rcar->dw; +	u32 val; +	int ret; + +	if (!enable) { +		val = readl(rcar->base + PCIERSTCTRL1); +		val &= ~APP_LTSSM_ENABLE; +		writel(val, rcar->base + PCIERSTCTRL1); + +		return 0; +	} + +	val = dw_pcie_readl_dbi(dw, PCIE_PORT_FORCE); +	val |= PORT_FORCE_DO_DESKEW_FOR_SRIS; +	dw_pcie_writel_dbi(dw, PCIE_PORT_FORCE, val); + +	val = readl(rcar->base + PCIEMSR0); +	val |= APP_SRIS_MODE; +	writel(val, rcar->base + PCIEMSR0); + +	/* +	 * The R-Car Gen4 datasheet doesn't describe the PHY registers' name. +	 * But, the initialization procedure describes these offsets. So, +	 * this driver has magical offset numbers. +	 */ +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(28), 0); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(20), 0); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(12), 0); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(4), 0); + +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22)); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16)); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6)); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0)); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15)); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26)); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0); +	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(19), BIT(19)); + +	val = readl(rcar->base + PCIERSTCTRL1); +	val &= ~APP_HOLD_PHY_RST; +	writel(val, rcar->base + PCIERSTCTRL1); + +	ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000); +	if (ret < 0) +		return ret; + +	ret = rcar_gen4_pcie_download_phy_firmware(rcar); +	if (ret) +		return ret; + +	val = readl(rcar->base + PCIERSTCTRL1); +	val |= APP_LTSSM_ENABLE; +	writel(val, rcar->base + PCIERSTCTRL1); + +	return 0; +} + +static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie = { +	.ltssm_control = r8a779f0_pcie_ltssm_control, +	.mode = DW_PCIE_RC_TYPE, +}; + +static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie_ep = { +	.ltssm_control = r8a779f0_pcie_ltssm_control, +	.mode = DW_PCIE_EP_TYPE, +}; + +static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie = { +	.additional_common_init = rcar_gen4_pcie_additional_common_init, +	.ltssm_control = rcar_gen4_pcie_ltssm_control, +	.mode = DW_PCIE_RC_TYPE, +}; + +static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie_ep = { +	.additional_common_init = rcar_gen4_pcie_additional_common_init, +	.ltssm_control = rcar_gen4_pcie_ltssm_control, +	.mode = DW_PCIE_EP_TYPE, +}; +  static const struct of_device_id rcar_gen4_pcie_of_match[] = {  	{ +		.compatible = "renesas,r8a779f0-pcie", +		.data = &drvdata_r8a779f0_pcie, +	}, +	{ +		.compatible = "renesas,r8a779f0-pcie-ep", +		.data = &drvdata_r8a779f0_pcie_ep, +	}, +	{  		.compatible = "renesas,rcar-gen4-pcie", -		.data = (void *)DW_PCIE_RC_TYPE, +		.data = &drvdata_rcar_gen4_pcie,  	},  	{  		.compatible = "renesas,rcar-gen4-pcie-ep", -		.data = (void *)DW_PCIE_EP_TYPE, +		.data = &drvdata_rcar_gen4_pcie_ep,  	},  	{},  }; diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 93f5433c5c55..4bf7b433417a 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -13,7 +13,6 @@  #include <linux/clk.h>  #include <linux/debugfs.h>  #include <linux/delay.h> -#include <linux/gpio.h>  #include <linux/gpio/consumer.h>  #include <linux/interconnect.h>  #include <linux/interrupt.h> @@ -21,7 +20,6 @@  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/of.h> -#include <linux/of_gpio.h>  #include <linux/of_pci.h>  #include <linux/pci.h>  #include <linux/phy/phy.h> @@ -308,10 +306,6 @@ static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)  	return readl_relaxed(pcie->appl_base + reg);  } -struct tegra_pcie_soc { -	enum dw_pcie_device_mode mode; -}; -  static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)  {  	struct dw_pcie *pci = &pcie->pci; @@ -1715,6 +1709,7 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)  	if (ret)  		dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); +	pci_epc_deinit_notify(pcie->pci.ep.epc);  	dw_pcie_ep_cleanup(&pcie->pci.ep);  	reset_control_assert(pcie->core_rst); @@ -1903,7 +1898,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)  		goto fail_init_complete;  	} -	dw_pcie_ep_init_notify(ep); +	pci_epc_init_notify(ep->epc);  	/* Program the private control to allow sending LTR upstream */  	if (pcie->of_data->has_ltr_req_fix) { @@ -2015,6 +2010,7 @@ static const struct pci_epc_features tegra_pcie_epc_features = {  	.bar[BAR_3] = { .type = BAR_RESERVED, },  	.bar[BAR_4] = { .type = BAR_RESERVED, },  	.bar[BAR_5] = { .type = BAR_RESERVED, }, +	.align = SZ_64K,  };  static const struct pci_epc_features* diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c index a2b844268e28..d6e73811216e 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c +++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c @@ -410,7 +410,7 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)  		return ret;  	} -	dw_pcie_ep_init_notify(&priv->pci.ep); +	pci_epc_init_notify(priv->pci.ep.epc);  	return 0;  } |