diff options
Diffstat (limited to 'drivers/pci')
103 files changed, 8092 insertions, 3505 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 2ab92409210a..a304f5ea11b9 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -52,7 +52,7 @@ config PCI_MSI If you don't know what to do here, say Y. config PCI_MSI_IRQ_DOMAIN - def_bool ARC || ARM || ARM64 || X86 + def_bool ARC || ARM || ARM64 || X86 || RISCV depends on PCI_MSI select GENERIC_MSI_IRQ_DOMAIN @@ -170,7 +170,7 @@ config PCI_P2PDMA Many PCIe root complexes do not support P2P transactions and it's hard to tell which support it at all, so at this time, - P2P DMA transations must be between devices behind the same root + P2P DMA transactions must be between devices behind the same root port. If unsure, say N. @@ -181,7 +181,8 @@ config PCI_LABEL config PCI_HYPERV tristate "Hyper-V PCI Frontend" - depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS + select PCI_HYPERV_INTERFACE help The PCI device frontend driver allows the kernel to import arbitrary PCI devices from a PCI backend to support PCI driver domains. diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 657d642fcc67..28cdd8c0213a 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -10,10 +10,10 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \ ifdef CONFIG_PCI obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSFS) += slot.o -obj-$(CONFIG_OF) += of.o obj-$(CONFIG_ACPI) += pci-acpi.o endif +obj-$(CONFIG_OF) += of.o obj-$(CONFIG_PCI_QUIRKS) += quirks.o obj-$(CONFIG_PCIEPORTBUS) += pcie/ obj-$(CONFIG_HOTPLUG_PCI) += hotplug/ diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 544922f097c0..2fccb5762c76 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -336,15 +336,6 @@ static inline int pcie_cap_version(const struct pci_dev *dev) return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; } -static bool pcie_downstream_port(const struct pci_dev *dev) -{ - int type = pci_pcie_type(dev); - - return type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_DOWNSTREAM || - type == PCI_EXP_TYPE_PCIE_BRIDGE; -} - bool pcie_cap_has_lnkctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 97c08146534a..e18499243f84 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required); * @pdev: PCI device structure * * Returns negative value when PASID capability is not present. - * Otherwise it returns the numer of supported PASIDs. + * Otherwise it returns the number of supported PASIDs. */ int pci_max_pasids(struct pci_dev *pdev) { diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 5cb40b2518f9..8e40b3e6da77 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -23,7 +23,7 @@ void pci_add_resource_offset(struct list_head *resources, struct resource *res, entry = resource_list_create_entry(res, 0); if (!entry) { - printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); + pr_err("PCI: can't add host bridge window %pR\n", res); return; } @@ -288,8 +288,7 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx) res->end = end; res->flags &= ~IORESOURCE_UNSET; orig_res.flags &= ~IORESOURCE_UNSET; - pci_printk(KERN_DEBUG, dev, "%pR clipped to %pR\n", - &orig_res, res); + pci_info(dev, "%pR clipped to %pR\n", &orig_res, res); return true; } @@ -418,11 +417,9 @@ struct pci_bus *pci_bus_get(struct pci_bus *bus) get_device(&bus->dev); return bus; } -EXPORT_SYMBOL(pci_bus_get); void pci_bus_put(struct pci_bus *bus) { if (bus) put_device(&bus->dev); } -EXPORT_SYMBOL(pci_bus_put); diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 6012f3059acd..70e078238899 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -174,14 +174,14 @@ config PCIE_IPROC_MSI PCIe controller config PCIE_ALTERA - bool "Altera PCIe controller" + tristate "Altera PCIe controller" depends on ARM || NIOS2 || ARM64 || COMPILE_TEST help Say Y here if you want to enable PCIe controller support on Altera FPGA. config PCIE_ALTERA_MSI - bool "Altera PCIe MSI feature" + tristate "Altera PCIe MSI feature" depends on PCIE_ALTERA depends on PCI_MSI_IRQ_DOMAIN help @@ -267,6 +267,7 @@ config PCIE_TANGO_SMP8759 config VMD depends on PCI_MSI && X86_64 && SRCU + select X86_DEV_DMA_OPS tristate "Intel Volume Management Device Driver" ---help--- Adds support for the Intel Volume Management Device (VMD). VMD is a @@ -280,5 +281,12 @@ config VMD To compile this driver as a module, choose M here: the module will be called vmd. +config PCI_HYPERV_INTERFACE + tristate "Hyper-V PCI Interface" + depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + help + The Hyper-V PCI Interface is a helper driver allows other drivers to + have a common interface with the Hyper-V PCI frontend driver. + source "drivers/pci/controller/dwc/Kconfig" endmenu diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index d56a507495c5..a2a22c9d91af 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o +obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 6ea74b1c0d94..0ba988b5b5bc 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -90,7 +90,7 @@ config PCI_EXYNOS config PCI_IMX6 bool "Freescale i.MX6/7/8 PCIe controller" - depends on SOC_IMX6Q || SOC_IMX7D || (ARM64 && ARCH_MXC) || COMPILE_TEST + depends on ARCH_MXC || COMPILE_TEST depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST @@ -103,24 +103,57 @@ config PCIE_SPEAR13XX Say Y here if you want PCIe support on SPEAr13XX SoCs. config PCI_KEYSTONE - bool "TI Keystone PCIe controller" - depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST) + bool + +config PCI_KEYSTONE_HOST + bool "PCI Keystone Host Mode" + depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST) depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST + select PCI_KEYSTONE + default y help - Say Y here if you want to enable PCI controller support on Keystone - SoCs. The PCI controller on Keystone is based on DesignWare hardware - and therefore the driver re-uses the DesignWare core functions to - implement the driver. + Enables support for the PCIe controller in the Keystone SoC to + work in host mode. The PCI controller on Keystone is based on + DesignWare hardware and therefore the driver re-uses the + DesignWare core functions to implement the driver. + +config PCI_KEYSTONE_EP + bool "PCI Keystone Endpoint Mode" + depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST) + depends on PCI_ENDPOINT + select PCIE_DW_EP + select PCI_KEYSTONE + help + Enables support for the PCIe controller in the Keystone SoC to + work in endpoint mode. The PCI controller on Keystone is based + on DesignWare hardware and therefore the driver re-uses the + DesignWare core functions to implement the driver. config PCI_LAYERSCAPE - bool "Freescale Layerscape PCIe controller" + bool "Freescale Layerscape PCIe controller - Host mode" depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) depends on PCI_MSI_IRQ_DOMAIN select MFD_SYSCON select PCIE_DW_HOST help - Say Y here if you want PCIe controller support on Layerscape SoCs. + Say Y here if you want to enable PCIe controller support on Layerscape + SoCs to work in Host mode. + This controller can work either as EP or RC. The RCW[HOST_AGT_PEX] + determines which PCIe controller works in EP mode and which PCIe + controller works in RC mode. + +config PCI_LAYERSCAPE_EP + bool "Freescale Layerscape PCIe controller - Endpoint mode" + depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) + depends on PCI_ENDPOINT + select PCIE_DW_EP + help + Say Y here if you want to enable PCIe controller support on Layerscape + SoCs to work in Endpoint mode. + This controller can work either as EP or RC. The RCW[HOST_AGT_PEX] + determines which PCIe controller works in EP mode and which PCIe + controller works in RC mode. config PCI_HISI depends on OF && (ARM64 || COMPILE_TEST) @@ -203,6 +236,16 @@ config PCI_MESON and therefore the driver re-uses the DesignWare core functions to implement the driver. +config PCIE_TEGRA194 + tristate "NVIDIA Tegra194 (and later) PCIe controller" + depends on ARCH_TEGRA_194_SOC || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + select PHY_TEGRA194_P2U + help + Say Y here if you want support for DesignWare core based PCIe host + controller found in NVIDIA Tegra194 SoC. + config PCIE_UNIPHIER bool "Socionext UniPhier PCIe controllers" depends on ARCH_UNIPHIER || COMPILE_TEST @@ -213,4 +256,16 @@ config PCIE_UNIPHIER Say Y here if you want PCIe controller support on UniPhier SoCs. This driver supports LD20 and PXs3 SoCs. +config PCIE_AL + bool "Amazon Annapurna Labs PCIe controller" + depends on OF && (ARM64 || COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here to enable support of the Amazon's Annapurna Labs PCIe + controller IP on Amazon SoCs. The PCIe controller uses the DesignWare + core plus Annapurna Labs proprietary hardware wrappers. This is + required only for DT-based platforms. ACPI platforms with the + Annapurna Labs PCIe controller don't need to enable this. + endmenu diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index b5f3b83cc2b3..69faff371f11 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -8,13 +8,15 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o -obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o +obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o obj-$(CONFIG_PCI_MESON) += pci-meson.o +obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o # The following drivers are for devices that use the generic ACPI @@ -28,5 +30,6 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o # depending on whether ACPI, the DT driver, or both are enabled. ifdef CONFIG_PCI +obj-$(CONFIG_ARM64) += pcie-al.o obj-$(CONFIG_ARM64) += pcie-hisi.o endif diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index ae84a69ae63a..4234ddb4722f 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -26,6 +26,7 @@ #include <linux/types.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> +#include <linux/gpio/consumer.h> #include "../../pci.h" #include "pcie-designware.h" @@ -247,6 +248,7 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, pp); + of_node_put(pcie_intc_node); if (!dra7xx->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENODEV; @@ -406,7 +408,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep) return &dra7xx_pcie_epc_features; } -static struct dw_pcie_ep_ops pcie_ep_ops = { +static const struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = dra7xx_pcie_ep_init, .raise_irq = dra7xx_pcie_raise_irq, .get_features = dra7xx_pcie_get_features, diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index cee5f2f590e2..14a6ba4067fb 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) ep->phy = devm_of_phy_get(dev, np, NULL); if (IS_ERR(ep->phy)) { - if (PTR_ERR(ep->phy) == -EPROBE_DEFER) + if (PTR_ERR(ep->phy) != -ENODEV) return PTR_ERR(ep->phy); ep->phy = NULL; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 3d627f94a166..acfbd34032a8 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -52,10 +52,12 @@ enum imx6_pcie_variants { #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) #define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1) +#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) struct imx6_pcie_drvdata { enum imx6_pcie_variants variant; u32 flags; + int dbi_length; }; struct imx6_pcie { @@ -89,9 +91,8 @@ struct imx6_pcie { }; /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ -#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 -#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 +#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) /* PCIe Root Complex registers (memory-mapped) */ #define PCIE_RC_IMX6_MSI_CAP 0x50 @@ -104,34 +105,29 @@ struct imx6_pcie { /* PCIe Port Logic registers (memory-mapped) */ #define PL_OFFSET 0x700 -#define PCIE_PL_PFLR (PL_OFFSET + 0x08) -#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) -#define PCIE_PL_PFLR_FORCE_LINK (1 << 15) -#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) -#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) -#define PCIE_PHY_CTRL_DATA_LOC 0 -#define PCIE_PHY_CTRL_CAP_ADR_LOC 16 -#define PCIE_PHY_CTRL_CAP_DAT_LOC 17 -#define PCIE_PHY_CTRL_WR_LOC 18 -#define PCIE_PHY_CTRL_RD_LOC 19 +#define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) +#define PCIE_PHY_CTRL_CAP_ADR BIT(16) +#define PCIE_PHY_CTRL_CAP_DAT BIT(17) +#define PCIE_PHY_CTRL_WR BIT(18) +#define PCIE_PHY_CTRL_RD BIT(19) #define PCIE_PHY_STAT (PL_OFFSET + 0x110) -#define PCIE_PHY_STAT_ACK_LOC 16 +#define PCIE_PHY_STAT_ACK BIT(16) #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C /* PHY registers (not memory-mapped) */ #define PCIE_PHY_ATEOVRD 0x10 -#define PCIE_PHY_ATEOVRD_EN (0x1 << 2) +#define PCIE_PHY_ATEOVRD_EN BIT(2) #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f -#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9) +#define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) #define PCIE_PHY_RX_ASIC_OUT 0x100D #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) @@ -154,19 +150,19 @@ struct imx6_pcie { #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC #define PHY_RX_OVRD_IN_LO 0x1005 -#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) -#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) +#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) +#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) -static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) +static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val) { struct dw_pcie *pci = imx6_pcie->pci; - u32 val; + bool val; u32 max_iterations = 10; u32 wait_counter = 0; do { - val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); - val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; + val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & + PCIE_PHY_STAT_ACK; wait_counter++; if (val == exp_val) @@ -184,27 +180,27 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) u32 val; int ret; - val = addr << PCIE_PHY_CTRL_DATA_LOC; + val = PCIE_PHY_CTRL_DATA(addr); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); - val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); + val |= PCIE_PHY_CTRL_CAP_ADR; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); - ret = pcie_phy_poll_ack(imx6_pcie, 1); + ret = pcie_phy_poll_ack(imx6_pcie, true); if (ret) return ret; - val = addr << PCIE_PHY_CTRL_DATA_LOC; + val = PCIE_PHY_CTRL_DATA(addr); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); - return pcie_phy_poll_ack(imx6_pcie, 0); + return pcie_phy_poll_ack(imx6_pcie, false); } /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ -static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) +static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data) { struct dw_pcie *pci = imx6_pcie->pci; - u32 val, phy_ctl; + u32 phy_ctl; int ret; ret = pcie_phy_wait_ack(imx6_pcie, addr); @@ -212,23 +208,22 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) return ret; /* assert Read signal */ - phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; + phy_ctl = PCIE_PHY_CTRL_RD; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); - ret = pcie_phy_poll_ack(imx6_pcie, 1); + ret = pcie_phy_poll_ack(imx6_pcie, true); if (ret) return ret; - val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); - *data = val & 0xffff; + *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); /* deassert Read signal */ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); - return pcie_phy_poll_ack(imx6_pcie, 0); + return pcie_phy_poll_ack(imx6_pcie, false); } -static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) +static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) { struct dw_pcie *pci = imx6_pcie->pci; u32 var; @@ -240,41 +235,41 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) if (ret) return ret; - var = data << PCIE_PHY_CTRL_DATA_LOC; + var = PCIE_PHY_CTRL_DATA(data); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* capture data */ - var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); + var |= PCIE_PHY_CTRL_CAP_DAT; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); - ret = pcie_phy_poll_ack(imx6_pcie, 1); + ret = pcie_phy_poll_ack(imx6_pcie, true); if (ret) return ret; /* deassert cap data */ - var = data << PCIE_PHY_CTRL_DATA_LOC; + var = PCIE_PHY_CTRL_DATA(data); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* wait for ack de-assertion */ - ret = pcie_phy_poll_ack(imx6_pcie, 0); + ret = pcie_phy_poll_ack(imx6_pcie, false); if (ret) return ret; /* assert wr signal */ - var = 0x1 << PCIE_PHY_CTRL_WR_LOC; + var = PCIE_PHY_CTRL_WR; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* wait for ack */ - ret = pcie_phy_poll_ack(imx6_pcie, 1); + ret = pcie_phy_poll_ack(imx6_pcie, true); if (ret) return ret; /* deassert wr signal */ - var = data << PCIE_PHY_CTRL_DATA_LOC; + var = PCIE_PHY_CTRL_DATA(data); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* wait for ack de-assertion */ - ret = pcie_phy_poll_ack(imx6_pcie, 0); + ret = pcie_phy_poll_ack(imx6_pcie, false); if (ret) return ret; @@ -285,7 +280,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) { - u32 tmp; + u16 tmp; if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) return; @@ -455,7 +450,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) * reset time is too short, cannot meet the requirement. * add one ~10us delay here. */ - udelay(10); + usleep_range(10, 100); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); break; @@ -488,20 +483,14 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) { u32 val; - unsigned int retries; struct device *dev = imx6_pcie->pci->dev; - for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { - regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); - - if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) - return; - - usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, - PHY_PLL_LOCK_WAIT_USLEEP_MAX); - } - - dev_err(dev, "PCIe PLL lock timeout\n"); + if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr, + IOMUXC_GPR22, val, + val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, + PHY_PLL_LOCK_WAIT_USLEEP_MAX, + PHY_PLL_LOCK_WAIT_TIMEOUT)) + dev_err(dev, "PCIe PLL lock timeout\n"); } static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) @@ -687,7 +676,7 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) { unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); int mult, div; - u32 val; + u16 val; if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) return 0; @@ -730,21 +719,6 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) return 0; } -static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) -{ - struct dw_pcie *pci = imx6_pcie->pci; - struct device *dev = pci->dev; - - /* check if the link is up or not */ - if (!dw_pcie_wait_for_link(pci)) - return 0; - - dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", - dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), - dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); - return -ETIMEDOUT; -} - static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; @@ -761,7 +735,7 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) } dev_err(dev, "Speed change timeout\n"); - return -EINVAL; + return -ETIMEDOUT; } static void imx6_pcie_ltssm_enable(struct device *dev) @@ -803,7 +777,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) /* Start LTSSM. */ imx6_pcie_ltssm_enable(dev); - ret = imx6_pcie_wait_for_link(imx6_pcie); + ret = dw_pcie_wait_for_link(pci); if (ret) goto err_reset_phy; @@ -841,7 +815,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) } /* Make sure link training is finished as well! */ - ret = imx6_pcie_wait_for_link(imx6_pcie); + ret = dw_pcie_wait_for_link(pci); if (ret) { dev_err(dev, "Failed to bring link up!\n"); goto err_reset_phy; @@ -856,8 +830,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) err_reset_phy: dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", - dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), - dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); + dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), + dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); imx6_pcie_reset_phy(imx6_pcie); return ret; } @@ -993,17 +967,11 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) } } -static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie) -{ - return (imx6_pcie->drvdata->variant == IMX7D || - imx6_pcie->drvdata->variant == IMX6SX); -} - static int imx6_pcie_suspend_noirq(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - if (!imx6_pcie_supports_suspend(imx6_pcie)) + if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; imx6_pcie_pm_turnoff(imx6_pcie); @@ -1019,7 +987,7 @@ static int imx6_pcie_resume_noirq(struct device *dev) struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); struct pcie_port *pp = &imx6_pcie->pci->pp; - if (!imx6_pcie_supports_suspend(imx6_pcie)) + if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; imx6_pcie_assert_core_reset(imx6_pcie); @@ -1206,8 +1174,8 @@ static int imx6_pcie_probe(struct platform_device *pdev) imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); if (IS_ERR(imx6_pcie->vpcie)) { - if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV) + return PTR_ERR(imx6_pcie->vpcie); imx6_pcie->vpcie = NULL; } @@ -1245,11 +1213,13 @@ static const struct imx6_pcie_drvdata drvdata[] = { .variant = IMX6Q, .flags = IMX6_PCIE_FLAG_IMX6_PHY | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, + .dbi_length = 0x200, }, [IMX6SX] = { .variant = IMX6SX, .flags = IMX6_PCIE_FLAG_IMX6_PHY | - IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, + IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | + IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, }, [IMX6QP] = { .variant = IMX6QP, @@ -1258,6 +1228,7 @@ static const struct imx6_pcie_drvdata drvdata[] = { }, [IMX7D] = { .variant = IMX7D, + .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, }, [IMX8MQ] = { .variant = IMX8MQ, @@ -1279,11 +1250,43 @@ static struct platform_driver imx6_pcie_driver = { .of_match_table = imx6_pcie_of_match, .suppress_bind_attrs = true, .pm = &imx6_pcie_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = imx6_pcie_probe, .shutdown = imx6_pcie_shutdown, }; +static void imx6_pcie_quirk(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + struct pcie_port *pp = bus->sysdata; + + /* Bus parent is the PCI bridge, its parent is this platform driver */ + if (!bus->dev.parent || !bus->dev.parent->parent) + return; + + /* Make sure we only quirk devices associated with this driver */ + if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver) + return; + + if (bus->number == pp->root_bus_nr) { + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + + /* + * Limit config length to avoid the kernel reading beyond + * the register set and causing an abort on i.MX 6Quad + */ + if (imx6_pcie->drvdata->dbi_length) { + dev->cfg_size = imx6_pcie->drvdata->dbi_length; + dev_info(&dev->dev, "Limiting cfg_size to %d\n", + dev->cfg_size); + } + } +} +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, + PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk); + static int __init imx6_pcie_init(void) { #ifdef CONFIG_ARM diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 14f2b0b4ed5e..af677254a072 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -11,6 +11,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> @@ -18,6 +19,7 @@ #include <linux/mfd/syscon.h> #include <linux/msi.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/phy/phy.h> @@ -26,6 +28,7 @@ #include <linux/resource.h> #include <linux/signal.h> +#include "../../pci.h" #include "pcie-designware.h" #define PCIE_VENDORID_MASK 0xffff @@ -44,28 +47,34 @@ #define CFG_TYPE1 BIT(24) #define OB_SIZE 0x030 -#define SPACE0_REMOTE_CFG_OFFSET 0x1000 #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) #define OB_OFFSET_HI(n) (0x204 + (8 * (n))) #define OB_ENABLEN BIT(0) #define OB_WIN_SIZE 8 /* 8MB */ +#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1))) +#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1))) +#define PCIE_EP_IRQ_SET 0x64 +#define PCIE_EP_IRQ_CLR 0x68 +#define INT_ENABLE BIT(0) + /* IRQ register defines */ #define IRQ_EOI 0x050 -#define IRQ_STATUS 0x184 -#define IRQ_ENABLE_SET 0x188 -#define IRQ_ENABLE_CLR 0x18c #define MSI_IRQ 0x054 -#define MSI0_IRQ_STATUS 0x104 -#define MSI0_IRQ_ENABLE_SET 0x108 -#define MSI0_IRQ_ENABLE_CLR 0x10c -#define IRQ_STATUS 0x184 +#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4)) +#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4)) +#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4)) #define MSI_IRQ_OFFSET 4 +#define IRQ_STATUS(n) (0x184 + ((n) << 4)) +#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4)) +#define INTx_EN BIT(0) + #define ERR_IRQ_STATUS 0x1c4 #define ERR_IRQ_ENABLE_SET 0x1c8 #define ERR_AER BIT(5) /* ECRC error */ +#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */ #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ #define ERR_CORR BIT(3) /* Correctable error */ #define ERR_NONFATAL BIT(2) /* Non-fatal error */ @@ -74,25 +83,45 @@ #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ ERR_NONFATAL | ERR_FATAL | ERR_SYS) -#define MAX_MSI_HOST_IRQS 8 /* PCIE controller device IDs */ #define PCIE_RC_K2HK 0xb008 #define PCIE_RC_K2E 0xb009 #define PCIE_RC_K2L 0xb00a #define PCIE_RC_K2G 0xb00b +#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1) +#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1) + +#define EP 0x0 +#define LEG_EP 0x1 +#define RC 0x2 + +#define EXP_CAP_ID_OFFSET 0x70 + +#define KS_PCIE_SYSCLOCKOUTEN BIT(0) + +#define AM654_PCIE_DEV_TYPE_MASK 0x3 +#define AM654_WIN_SIZE SZ_64K + +#define APP_ADDR_SPACE_0 (16 * SZ_1K) + #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) +struct ks_pcie_of_data { + enum dw_pcie_device_mode mode; + const struct dw_pcie_host_ops *host_ops; + const struct dw_pcie_ep_ops *ep_ops; + unsigned int version; +}; + struct keystone_pcie { struct dw_pcie *pci; /* PCI Device ID */ u32 device_id; - int num_legacy_host_irqs; int legacy_host_irqs[PCI_NUM_INTX]; struct device_node *legacy_intc_np; - int num_msi_host_irqs; - int msi_host_irqs[MAX_MSI_HOST_IRQS]; + int msi_host_irq; int num_lanes; u32 num_viewport; struct phy **phy; @@ -101,28 +130,12 @@ struct keystone_pcie { struct irq_domain *legacy_irq_domain; struct device_node *np; - int error_irq; - /* Application register space */ void __iomem *va_app_base; /* DT 1st resource */ struct resource app; + bool is_am6; }; -static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, - u32 *bit_pos) -{ - *reg_offset = offset % 8; - *bit_pos = offset >> 3; -} - -static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - return ks_pcie->app.start + MSI_IRQ; -} - static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) { return readl(ks_pcie->va_app_base + offset); @@ -134,81 +147,114 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset, writel(val, ks_pcie->va_app_base + offset); } -static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) +static void ks_pcie_msi_irq_ack(struct irq_data *data) { - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - u32 pending, vector; - int src, virq; + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct keystone_pcie *ks_pcie; + u32 irq = data->hwirq; + struct dw_pcie *pci; + u32 reg_offset; + u32 bit_pos; - pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); + pci = to_dw_pcie_from_pp(pp); + ks_pcie = to_keystone_pcie(pci); - /* - * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit - * shows 1, 9, 17, 25 and so forth - */ - for (src = 0; src < 4; src++) { - if (BIT(src) & pending) { - vector = offset + (src << 3); - virq = irq_linear_revmap(pp->irq_domain, vector); - dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", - src, vector, virq); - generic_handle_irq(virq); - } - } + reg_offset = irq % 8; + bit_pos = irq >> 3; + + ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset), + BIT(bit_pos)); + ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); } -static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp) +static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { - u32 reg_offset, bit_pos; + struct pcie_port *pp = irq_data_get_irq_chip_data(data); struct keystone_pcie *ks_pcie; struct dw_pcie *pci; + u64 msi_target; pci = to_dw_pcie_from_pp(pp); ks_pcie = to_keystone_pcie(pci); - update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), - BIT(bit_pos)); - ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); + msi_target = ks_pcie->app.start + MSI_IRQ; + msg->address_lo = lower_32_bits(msi_target); + msg->address_hi = upper_32_bits(msi_target); + msg->data = data->hwirq; + + dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", + (int)data->hwirq, msg->address_hi, msg->address_lo); } -static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq) +static int ks_pcie_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) { - u32 reg_offset, bit_pos; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + return -EINVAL; +} - update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), +static void ks_pcie_msi_mask(struct irq_data *data) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct keystone_pcie *ks_pcie; + u32 irq = data->hwirq; + struct dw_pcie *pci; + unsigned long flags; + u32 reg_offset; + u32 bit_pos; + + raw_spin_lock_irqsave(&pp->lock, flags); + + pci = to_dw_pcie_from_pp(pp); + ks_pcie = to_keystone_pcie(pci); + + reg_offset = irq % 8; + bit_pos = irq >> 3; + + ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset), BIT(bit_pos)); + + raw_spin_unlock_irqrestore(&pp->lock, flags); } -static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq) +static void ks_pcie_msi_unmask(struct irq_data *data) { - u32 reg_offset, bit_pos; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct keystone_pcie *ks_pcie; + u32 irq = data->hwirq; + struct dw_pcie *pci; + unsigned long flags; + u32 reg_offset; + u32 bit_pos; + + raw_spin_lock_irqsave(&pp->lock, flags); - update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), + pci = to_dw_pcie_from_pp(pp); + ks_pcie = to_keystone_pcie(pci); + + reg_offset = irq % 8; + bit_pos = irq >> 3; + + ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset), BIT(bit_pos)); + + raw_spin_unlock_irqrestore(&pp->lock, flags); } +static struct irq_chip ks_pcie_msi_irq_chip = { + .name = "KEYSTONE-PCI-MSI", + .irq_ack = ks_pcie_msi_irq_ack, + .irq_compose_msi_msg = ks_pcie_compose_msi_msg, + .irq_set_affinity = ks_pcie_msi_set_affinity, + .irq_mask = ks_pcie_msi_mask, + .irq_unmask = ks_pcie_msi_unmask, +}; + static int ks_pcie_msi_host_init(struct pcie_port *pp) { + pp->msi_irq_chip = &ks_pcie_msi_irq_chip; return dw_pcie_allocate_domains(pp); } -static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) -{ - int i; - - for (i = 0; i < PCI_NUM_INTX; i++) - ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); -} - static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) { @@ -217,7 +263,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, u32 pending; int virq; - pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); + pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset)); if (BIT(0) & pending) { virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); @@ -229,6 +275,14 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); } +/* + * Dummy function so that DW core doesn't configure MSI + */ +static int ks_pcie_am654_msi_host_init(struct pcie_port *pp) +{ + return 0; +} + static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) { ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); @@ -255,10 +309,10 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) if (reg & ERR_CORR) dev_dbg(dev, "Correctable Error\n"); - if (reg & ERR_AXI) + if (!ks_pcie->is_am6 && (reg & ERR_AXI)) dev_err(dev, "AXI tag lookup fatal Error\n"); - if (reg & ERR_AER) + if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER))) dev_err(dev, "ECRC Error\n"); ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg); @@ -356,6 +410,9 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); ks_pcie_clear_dbi_mode(ks_pcie); + if (ks_pcie->is_am6) + return; + val = ilog2(OB_WIN_SIZE); ks_pcie_app_writel(ks_pcie, OB_SIZE, val); @@ -445,68 +502,33 @@ static int ks_pcie_link_up(struct dw_pcie *pci) return (val == PORT_LOGIC_LTSSM_STATE_L0); } -static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) +static void ks_pcie_stop_link(struct dw_pcie *pci) { + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); u32 val; /* Disable Link training */ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); val &= ~LTSSM_EN_VAL; ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); - - /* Initiate Link Training */ - val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); - ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); } -/** - * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware - * - * Ioremap the register resources, initialize legacy irq domain - * and call dw_pcie_v3_65_host_init() API to initialize the Keystone - * PCI host controller. - */ -static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie) +static int ks_pcie_start_link(struct dw_pcie *pci) { - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); struct device *dev = pci->dev; - struct platform_device *pdev = to_platform_device(dev); - struct resource *res; - - /* Index 0 is the config reg. space address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - /* - * We set these same and is used in pcie rd/wr_other_conf - * functions - */ - pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; - pp->va_cfg1_base = pp->va_cfg0_base; - - /* Index 1 is the application reg. space address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - ks_pcie->va_app_base = devm_ioremap_resource(dev, res); - if (IS_ERR(ks_pcie->va_app_base)) - return PTR_ERR(ks_pcie->va_app_base); - - ks_pcie->app = *res; + u32 val; - /* Create legacy IRQ domain */ - ks_pcie->legacy_irq_domain = - irq_domain_add_linear(ks_pcie->legacy_intc_np, - PCI_NUM_INTX, - &ks_pcie_legacy_irq_domain_ops, - NULL); - if (!ks_pcie->legacy_irq_domain) { - dev_err(dev, "Failed to add irq domain for legacy irqs\n"); - return -EINVAL; + if (dw_pcie_link_up(pci)) { + dev_dbg(dev, "link is already up\n"); + return 0; } - return dw_pcie_host_init(pp); + /* Initiate Link Training */ + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); + + return 0; } static void ks_pcie_quirk(struct pci_dev *dev) @@ -552,34 +574,16 @@ static void ks_pcie_quirk(struct pci_dev *dev) } DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); -static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct device *dev = pci->dev; - - if (dw_pcie_link_up(pci)) { - dev_info(dev, "Link already up\n"); - return 0; - } - - ks_pcie_initiate_link_train(ks_pcie); - - /* check if the link is up or not */ - if (!dw_pcie_wait_for_link(pci)) - return 0; - - dev_err(dev, "phy link never came up\n"); - return -ETIMEDOUT; -} - static void ks_pcie_msi_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); + unsigned int irq = desc->irq_data.hwirq; struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); - u32 offset = irq - ks_pcie->msi_host_irqs[0]; + u32 offset = irq - ks_pcie->msi_host_irq; struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; struct device *dev = pci->dev; struct irq_chip *chip = irq_desc_get_chip(desc); + u32 vector, virq, reg, pos; dev_dbg(dev, "%s, irq %d\n", __func__, irq); @@ -589,7 +593,23 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) * ack operation. */ chained_irq_enter(chip, desc); - ks_pcie_handle_msi_irq(ks_pcie, offset); + + reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset)); + /* + * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit + * shows 1, 9, 17, 25 and so forth + */ + for (pos = 0; pos < 4; pos++) { + if (!(reg & BIT(pos))) + continue; + + vector = offset + (pos << 3); + virq = irq_linear_revmap(pp->irq_domain, vector); + dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector, + virq); + generic_handle_irq(virq); + } + chained_irq_exit(chip, desc); } @@ -622,89 +642,119 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, - char *controller, int *num_irqs) +static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie) { - int temp, max_host_irqs, legacy = 1, *host_irqs; struct device *dev = ks_pcie->pci->dev; - struct device_node *np_pcie = dev->of_node, **np_temp; - - if (!strcmp(controller, "msi-interrupt-controller")) - legacy = 0; + struct device_node *np = ks_pcie->np; + struct device_node *intc_np; + struct irq_data *irq_data; + int irq_count, irq, ret, i; - if (legacy) { - np_temp = &ks_pcie->legacy_intc_np; - max_host_irqs = PCI_NUM_INTX; - host_irqs = &ks_pcie->legacy_host_irqs[0]; - } else { - np_temp = &ks_pcie->msi_intc_np; - max_host_irqs = MAX_MSI_HOST_IRQS; - host_irqs = &ks_pcie->msi_host_irqs[0]; - } + if (!IS_ENABLED(CONFIG_PCI_MSI)) + return 0; - /* interrupt controller is in a child node */ - *np_temp = of_get_child_by_name(np_pcie, controller); - if (!(*np_temp)) { - dev_err(dev, "Node for %s is absent\n", controller); + intc_np = of_get_child_by_name(np, "msi-interrupt-controller"); + if (!intc_np) { + if (ks_pcie->is_am6) + return 0; + dev_warn(dev, "msi-interrupt-controller node is absent\n"); return -EINVAL; } - temp = of_irq_count(*np_temp); - if (!temp) { - dev_err(dev, "No IRQ entries in %s\n", controller); - of_node_put(*np_temp); - return -EINVAL; + irq_count = of_irq_count(intc_np); + if (!irq_count) { + dev_err(dev, "No IRQ entries in msi-interrupt-controller\n"); + ret = -EINVAL; + goto err; } - if (temp > max_host_irqs) - dev_warn(dev, "Too many %s interrupts defined %u\n", - (legacy ? "legacy" : "MSI"), temp); - - /* - * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to - * 7 (MSI) - */ - for (temp = 0; temp < max_host_irqs; temp++) { - host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); - if (!host_irqs[temp]) - break; - } + for (i = 0; i < irq_count; i++) { + irq = irq_of_parse_and_map(intc_np, i); + if (!irq) { + ret = -EINVAL; + goto err; + } - of_node_put(*np_temp); + if (!ks_pcie->msi_host_irq) { + irq_data = irq_get_irq_data(irq); + if (!irq_data) { + ret = -EINVAL; + goto err; + } + ks_pcie->msi_host_irq = irq_data->hwirq; + } - if (temp) { - *num_irqs = temp; - return 0; + irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler, + ks_pcie); } - return -EINVAL; + of_node_put(intc_np); + return 0; + +err: + of_node_put(intc_np); + return ret; } -static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) +static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) { - int i; + struct device *dev = ks_pcie->pci->dev; + struct irq_domain *legacy_irq_domain; + struct device_node *np = ks_pcie->np; + struct device_node *intc_np; + int irq_count, irq, ret = 0, i; + + intc_np = of_get_child_by_name(np, "legacy-interrupt-controller"); + if (!intc_np) { + /* + * Since legacy interrupts are modeled as edge-interrupts in + * AM6, keep it disabled for now. + */ + if (ks_pcie->is_am6) + return 0; + dev_warn(dev, "legacy-interrupt-controller node is absent\n"); + return -EINVAL; + } - /* Legacy IRQ */ - for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { - irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], + irq_count = of_irq_count(intc_np); + if (!irq_count) { + dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n"); + ret = -EINVAL; + goto err; + } + + for (i = 0; i < irq_count; i++) { + irq = irq_of_parse_and_map(intc_np, i); + if (!irq) { + ret = -EINVAL; + goto err; + } + ks_pcie->legacy_host_irqs[i] = irq; + + irq_set_chained_handler_and_data(irq, ks_pcie_legacy_irq_handler, ks_pcie); } - ks_pcie_enable_legacy_irqs(ks_pcie); - /* MSI IRQ */ - if (IS_ENABLED(CONFIG_PCI_MSI)) { - for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { - irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], - ks_pcie_msi_irq_handler, - ks_pcie); - } + legacy_irq_domain = + irq_domain_add_linear(intc_np, PCI_NUM_INTX, + &ks_pcie_legacy_irq_domain_ops, NULL); + if (!legacy_irq_domain) { + dev_err(dev, "Failed to add irq domain for legacy irqs\n"); + ret = -EINVAL; + goto err; } + ks_pcie->legacy_irq_domain = legacy_irq_domain; + + for (i = 0; i < PCI_NUM_INTX; i++) + ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN); - if (ks_pcie->error_irq > 0) - ks_pcie_enable_error_irq(ks_pcie); +err: + of_node_put(intc_np); + return ret; } +#ifdef CONFIG_ARM /* * When a PCI device does not exist during config cycles, keystone host gets a * bus error instead of returning 0xffffffff. This handler always returns 0 @@ -724,6 +774,7 @@ static int ks_pcie_fault(unsigned long addr, unsigned int fsr, return 0; } +#endif static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) { @@ -742,8 +793,10 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) if (ret) return ret; + dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK); dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT); + dw_pcie_dbi_ro_wr_dis(pci); return 0; } @@ -754,11 +807,18 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); int ret; + ret = ks_pcie_config_legacy_irq(ks_pcie); + if (ret) + return ret; + + ret = ks_pcie_config_msi_irq(ks_pcie); + if (ret) + return ret; + dw_pcie_setup_rc(pp); - ks_pcie_establish_link(ks_pcie); + ks_pcie_stop_link(pci); ks_pcie_setup_rc_app_regs(ks_pcie); - ks_pcie_setup_interrupts(ks_pcie); writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), pci->dbi_base + PCI_IO_BASE); @@ -766,12 +826,17 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) if (ret < 0) return ret; +#ifdef CONFIG_ARM /* * PCIe access errors that result into OCP errors are caught by ARM as * "External aborts" */ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, "Asynchronous external abort"); +#endif + + ks_pcie_start_link(pci); + dw_pcie_wait_for_link(pci); return 0; } @@ -780,14 +845,15 @@ static const struct dw_pcie_host_ops ks_pcie_host_ops = { .rd_other_conf = ks_pcie_rd_other_conf, .wr_other_conf = ks_pcie_wr_other_conf, .host_init = ks_pcie_host_init, - .msi_set_irq = ks_pcie_msi_set_irq, - .msi_clear_irq = ks_pcie_msi_clear_irq, - .get_msi_addr = ks_pcie_get_msi_addr, .msi_host_init = ks_pcie_msi_host_init, - .msi_irq_ack = ks_pcie_msi_irq_ack, .scan_bus = ks_pcie_v3_65_scan_bus, }; +static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = { + .host_init = ks_pcie_host_init, + .msi_host_init = ks_pcie_am654_msi_host_init, +}; + static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) { struct keystone_pcie *ks_pcie = priv; @@ -801,41 +867,17 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, struct dw_pcie *pci = ks_pcie->pci; struct pcie_port *pp = &pci->pp; struct device *dev = &pdev->dev; + struct resource *res; int ret; - ret = ks_pcie_get_irq_controller_info(ks_pcie, - "legacy-interrupt-controller", - &ks_pcie->num_legacy_host_irqs); - if (ret) - return ret; - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - ret = ks_pcie_get_irq_controller_info(ks_pcie, - "msi-interrupt-controller", - &ks_pcie->num_msi_host_irqs); - if (ret) - return ret; - } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); + pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pp->va_cfg0_base)) + return PTR_ERR(pp->va_cfg0_base); - /* - * Index 0 is the platform interrupt for error interrupt - * from RC. This is optional. - */ - ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); - if (ks_pcie->error_irq <= 0) - dev_info(dev, "no error IRQ defined\n"); - else { - ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler, - IRQF_SHARED, "pcie-error-irq", ks_pcie); - if (ret < 0) { - dev_err(dev, "failed to request error IRQ %d\n", - ks_pcie->error_irq); - return ret; - } - } + pp->va_cfg1_base = pp->va_cfg0_base; - pp->ops = &ks_pcie_host_ops; - ret = ks_pcie_dw_host_init(ks_pcie); + ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); return ret; @@ -844,18 +886,139 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, return 0; } -static const struct of_device_id ks_pcie_of_match[] = { - { - .type = "pci", - .compatible = "ti,keystone-pcie", - }, - { }, -}; +static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) +{ + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + u32 val; + + ks_pcie_set_dbi_mode(ks_pcie); + dw_pcie_read(base + reg, size, &val); + ks_pcie_clear_dbi_mode(ks_pcie); + return val; +} + +static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + ks_pcie_set_dbi_mode(ks_pcie); + dw_pcie_write(base + reg, size, val); + ks_pcie_clear_dbi_mode(ks_pcie); +} static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { + .start_link = ks_pcie_start_link, + .stop_link = ks_pcie_stop_link, .link_up = ks_pcie_link_up, + .read_dbi2 = ks_pcie_am654_read_dbi2, + .write_dbi2 = ks_pcie_am654_write_dbi2, +}; + +static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + int flags; + + ep->page_size = AM654_WIN_SIZE; + flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32; + dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags); +} + +static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie) +{ + struct dw_pcie *pci = ks_pcie->pci; + u8 int_pin; + + int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN); + if (int_pin == 0 || int_pin > 4) + return; + + ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin), + INT_ENABLE); + ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE); + mdelay(1); + ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE); + ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin), + INT_ENABLE); +} + +static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, + u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + ks_pcie_am654_raise_legacy_irq(ks_pcie); + break; + case PCI_EPC_IRQ_MSI: + dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); + break; + default: + dev_err(pci->dev, "UNKNOWN IRQ type\n"); + return -EINVAL; + } + + return 0; +} + +static const struct pci_epc_features ks_pcie_am654_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, + .reserved_bar = 1 << BAR_0 | 1 << BAR_1, + .bar_fixed_64bit = 1 << BAR_0, + .bar_fixed_size[2] = SZ_1M, + .bar_fixed_size[3] = SZ_64K, + .bar_fixed_size[4] = 256, + .bar_fixed_size[5] = SZ_1M, + .align = SZ_1M, }; +static const struct pci_epc_features* +ks_pcie_am654_get_features(struct dw_pcie_ep *ep) +{ + return &ks_pcie_am654_epc_features; +} + +static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = { + .ep_init = ks_pcie_am654_ep_init, + .raise_irq = ks_pcie_am654_raise_irq, + .get_features = &ks_pcie_am654_get_features, +}; + +static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie_ep *ep; + struct resource *res; + struct device *dev = &pdev->dev; + struct dw_pcie *pci = ks_pcie->pci; + + ep = &pci->ep; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "failed to initialize endpoint\n"); + return ret; + } + + return 0; +} + static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie) { int num_lanes = ks_pcie->num_lanes; @@ -873,6 +1036,10 @@ static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie) int num_lanes = ks_pcie->num_lanes; for (i = 0; i < num_lanes; i++) { + ret = phy_reset(ks_pcie->phy[i]); + if (ret < 0) + goto err_phy; + ret = phy_init(ks_pcie->phy[i]); if (ret < 0) goto err_phy; @@ -895,20 +1062,161 @@ err_phy: return ret; } +static int ks_pcie_set_mode(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct regmap *syscon; + u32 val; + u32 mask; + int ret = 0; + + syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode"); + if (IS_ERR(syscon)) + return 0; + + mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN; + val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN; + + ret = regmap_update_bits(syscon, 0, mask, val); + if (ret) { + dev_err(dev, "failed to set pcie mode\n"); + return ret; + } + + return 0; +} + +static int ks_pcie_am654_set_mode(struct device *dev, + enum dw_pcie_device_mode mode) +{ + struct device_node *np = dev->of_node; + struct regmap *syscon; + u32 val; + u32 mask; + int ret = 0; + + syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode"); + if (IS_ERR(syscon)) + return 0; + + mask = AM654_PCIE_DEV_TYPE_MASK; + + switch (mode) { + case DW_PCIE_RC_TYPE: + val = RC; + break; + case DW_PCIE_EP_TYPE: + val = EP; + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + return -EINVAL; + } + + ret = regmap_update_bits(syscon, 0, mask, val); + if (ret) { + dev_err(dev, "failed to set pcie mode\n"); + return ret; + } + + return 0; +} + +static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed) +{ + u32 val; + + dw_pcie_dbi_ro_wr_en(pci); + + val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP); + if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) { + val &= ~((u32)PCI_EXP_LNKCAP_SLS); + val |= link_speed; + dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP, + val); + } + + val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2); + if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) { + val &= ~((u32)PCI_EXP_LNKCAP_SLS); + val |= link_speed; + dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2, + val); + } + + dw_pcie_dbi_ro_wr_dis(pci); +} + +static const struct ks_pcie_of_data ks_pcie_rc_of_data = { + .host_ops = &ks_pcie_host_ops, + .version = 0x365A, +}; + +static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = { + .host_ops = &ks_pcie_am654_host_ops, + .mode = DW_PCIE_RC_TYPE, + .version = 0x490A, +}; + +static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = { + .ep_ops = &ks_pcie_am654_ep_ops, + .mode = DW_PCIE_EP_TYPE, + .version = 0x490A, +}; + +static const struct of_device_id ks_pcie_of_match[] = { + { + .type = "pci", + .data = &ks_pcie_rc_of_data, + .compatible = "ti,keystone-pcie", + }, + { + .data = &ks_pcie_am654_rc_of_data, + .compatible = "ti,am654-pcie-rc", + }, + { + .data = &ks_pcie_am654_ep_of_data, + .compatible = "ti,am654-pcie-ep", + }, + { }, +}; + static int __init ks_pcie_probe(struct platform_device *pdev) { + const struct dw_pcie_host_ops *host_ops; + const struct dw_pcie_ep_ops *ep_ops; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + const struct ks_pcie_of_data *data; + const struct of_device_id *match; + enum dw_pcie_device_mode mode; struct dw_pcie *pci; struct keystone_pcie *ks_pcie; struct device_link **link; + struct gpio_desc *gpiod; + void __iomem *atu_base; + struct resource *res; + unsigned int version; + void __iomem *base; u32 num_viewport; struct phy **phy; + int link_speed; u32 num_lanes; char name[10]; int ret; + int irq; int i; + match = of_match_device(of_match_ptr(ks_pcie_of_match), dev); + data = (struct ks_pcie_of_data *)match->data; + if (!data) + return -EINVAL; + + version = data->version; + host_ops = data->host_ops; + ep_ops = data->ep_ops; + mode = data->mode; + ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); if (!ks_pcie) return -ENOMEM; @@ -917,12 +1225,38 @@ static int __init ks_pcie_probe(struct platform_device *pdev) if (!pci) return -ENOMEM; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app"); + ks_pcie->va_app_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ks_pcie->va_app_base)) + return PTR_ERR(ks_pcie->va_app_base); + + ks_pcie->app = *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics"); + base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + if (of_device_is_compatible(np, "ti,am654-pcie-rc")) + ks_pcie->is_am6 = true; + + pci->dbi_base = base; + pci->dbi_base2 = base; pci->dev = dev; pci->ops = &ks_pcie_dw_pcie_ops; + pci->version = version; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "missing IRQ resource: %d\n", irq); + return irq; + } - ret = of_property_read_u32(np, "num-viewport", &num_viewport); + ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, + "ks-pcie-error-irq", ks_pcie); if (ret < 0) { - dev_err(dev, "unable to read *num-viewport* property\n"); + dev_err(dev, "failed to request error IRQ %d\n", + irq); return ret; } @@ -960,9 +1294,17 @@ static int __init ks_pcie_probe(struct platform_device *pdev) ks_pcie->pci = pci; ks_pcie->link = link; ks_pcie->num_lanes = num_lanes; - ks_pcie->num_viewport = num_viewport; ks_pcie->phy = phy; + gpiod = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get reset GPIO\n"); + goto err_link; + } + ret = ks_pcie_enable_phy(ks_pcie); if (ret) { dev_err(dev, "failed to enable phy\n"); @@ -977,9 +1319,79 @@ static int __init ks_pcie_probe(struct platform_device *pdev) goto err_get_sync; } - ret = ks_pcie_add_pcie_port(ks_pcie, pdev); - if (ret < 0) - goto err_get_sync; + if (pci->version >= 0x480A) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); + atu_base = devm_ioremap_resource(dev, res); + if (IS_ERR(atu_base)) { + ret = PTR_ERR(atu_base); + goto err_get_sync; + } + + pci->atu_base = atu_base; + + ret = ks_pcie_am654_set_mode(dev, mode); + if (ret < 0) + goto err_get_sync; + } else { + ret = ks_pcie_set_mode(dev); + if (ret < 0) + goto err_get_sync; + } + + link_speed = of_pci_get_max_link_speed(np); + if (link_speed < 0) + link_speed = 2; + + ks_pcie_set_link_speed(pci, link_speed); + + switch (mode) { + case DW_PCIE_RC_TYPE: + if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) { + ret = -ENODEV; + goto err_get_sync; + } + + ret = of_property_read_u32(np, "num-viewport", &num_viewport); + if (ret < 0) { + dev_err(dev, "unable to read *num-viewport* property\n"); + return ret; + } + + /* + * "Power Sequencing and Reset Signal Timings" table in + * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0 + * indicates PERST# should be deasserted after minimum of 100us + * once REFCLK is stable. The REFCLK to the connector in RC + * mode is selected while enabling the PHY. So deassert PERST# + * after 100 us. + */ + if (gpiod) { + usleep_range(100, 200); + gpiod_set_value_cansleep(gpiod, 1); + } + + ks_pcie->num_viewport = num_viewport; + pci->pp.ops = host_ops; + ret = ks_pcie_add_pcie_port(ks_pcie, pdev); + if (ret < 0) + goto err_get_sync; + break; + case DW_PCIE_EP_TYPE: + if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) { + ret = -ENODEV; + goto err_get_sync; + } + + pci->ep.ops = ep_ops; + ret = ks_pcie_add_pcie_ep(ks_pcie, pdev); + if (ret < 0) + goto err_get_sync; + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + } + + ks_pcie_enable_error_irq(ks_pcie); return 0; diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index a42c9c3ae1cc..ca9aa4501e7e 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -44,6 +44,7 @@ static const struct pci_epc_features ls_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, + .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4), }; static const struct pci_epc_features* @@ -79,7 +80,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, } } -static struct dw_pcie_ep_ops pcie_ep_ops = { +static const struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = ls_pcie_ep_init, .raise_irq = ls_pcie_ep_raise_irq, .get_features = ls_pcie_ep_get_features, diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index ce45bde29bf8..3a5fa26d5e56 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -201,6 +201,7 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp) return -EINVAL; } + of_node_put(msi_node); return 0; } diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c new file mode 100644 index 000000000000..1eeda2f6371f --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips + * such as Graviton and Alpine) + * + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Author: Jonathan Chocron <[email protected]> + */ + +#include <linux/pci.h> +#include <linux/pci-ecam.h> +#include <linux/pci-acpi.h> +#include "../../pci.h" + +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) + +struct al_pcie_acpi { + void __iomem *dbi_base; +}; + +static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct pci_config_window *cfg = bus->sysdata; + struct al_pcie_acpi *pcie = cfg->priv; + void __iomem *dbi_base = pcie->dbi_base; + + if (bus->number == cfg->busr.start) { + /* + * The DW PCIe core doesn't filter out transactions to other + * devices/functions on the root bus num, so we do this here. + */ + if (PCI_SLOT(devfn) > 0) + return NULL; + else + return dbi_base + where; + } + + return pci_ecam_map_bus(bus, devfn, where); +} + +static int al_pcie_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + struct acpi_pci_root *root = acpi_driver_data(adev); + struct al_pcie_acpi *al_pcie; + struct resource *res; + int ret; + + al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL); + if (!al_pcie) + return -ENOMEM; + + res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res); + if (ret) { + dev_err(dev, "can't get rc dbi base address for SEG %d\n", + root->segment); + return ret; + } + + dev_dbg(dev, "Root port dbi res: %pR\n", res); + + al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(al_pcie->dbi_base)) { + long err = PTR_ERR(al_pcie->dbi_base); + + dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n", + res, err); + return err; + } + + cfg->priv = al_pcie; + + return 0; +} + +struct pci_ecam_ops al_pcie_ops = { + .bus_shift = 20, + .init = al_pcie_init, + .pci_ops = { + .map_bus = al_pcie_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; + +#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */ + +#ifdef CONFIG_PCIE_AL + +#include <linux/of_pci.h> +#include "pcie-designware.h" + +#define AL_PCIE_REV_ID_2 2 +#define AL_PCIE_REV_ID_3 3 +#define AL_PCIE_REV_ID_4 4 + +#define AXI_BASE_OFFSET 0x0 + +#define DEVICE_ID_OFFSET 0x16c + +#define DEVICE_REV_ID 0x0 +#define DEVICE_REV_ID_DEV_ID_MASK GENMASK(31, 16) + +#define DEVICE_REV_ID_DEV_ID_X4 0 +#define DEVICE_REV_ID_DEV_ID_X8 2 +#define DEVICE_REV_ID_DEV_ID_X16 4 + +#define OB_CTRL_REV1_2_OFFSET 0x0040 +#define OB_CTRL_REV3_5_OFFSET 0x0030 + +#define CFG_TARGET_BUS 0x0 +#define CFG_TARGET_BUS_MASK_MASK GENMASK(7, 0) +#define CFG_TARGET_BUS_BUSNUM_MASK GENMASK(15, 8) + +#define CFG_CONTROL 0x4 +#define CFG_CONTROL_SUBBUS_MASK GENMASK(15, 8) +#define CFG_CONTROL_SEC_BUS_MASK GENMASK(23, 16) + +struct al_pcie_reg_offsets { + unsigned int ob_ctrl; +}; + +struct al_pcie_target_bus_cfg { + u8 reg_val; + u8 reg_mask; + u8 ecam_mask; +}; + +struct al_pcie { + struct dw_pcie *pci; + void __iomem *controller_base; /* base of PCIe unit (not DW core) */ + struct device *dev; + resource_size_t ecam_size; + unsigned int controller_rev_id; + struct al_pcie_reg_offsets reg_offsets; + struct al_pcie_target_bus_cfg target_bus_cfg; +}; + +#define PCIE_ECAM_DEVFN(x) (((x) & 0xff) << 12) + +#define to_al_pcie(x) dev_get_drvdata((x)->dev) + +static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset) +{ + return readl_relaxed(pcie->controller_base + offset); +} + +static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset, + u32 val) +{ + writel_relaxed(val, pcie->controller_base + offset); +} + +static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id) +{ + u32 dev_rev_id_val; + u32 dev_id_val; + + dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET + + DEVICE_ID_OFFSET + + DEVICE_REV_ID); + dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val); + + switch (dev_id_val) { + case DEVICE_REV_ID_DEV_ID_X4: + *rev_id = AL_PCIE_REV_ID_2; + break; + case DEVICE_REV_ID_DEV_ID_X8: + *rev_id = AL_PCIE_REV_ID_3; + break; + case DEVICE_REV_ID_DEV_ID_X16: + *rev_id = AL_PCIE_REV_ID_4; + break; + default: + dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n", + dev_id_val); + return -EINVAL; + } + + dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val); + + return 0; +} + +static int al_pcie_reg_offsets_set(struct al_pcie *pcie) +{ + switch (pcie->controller_rev_id) { + case AL_PCIE_REV_ID_2: + pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET; + break; + case AL_PCIE_REV_ID_3: + case AL_PCIE_REV_ID_4: + pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET; + break; + default: + dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n", + pcie->controller_rev_id); + return -EINVAL; + } + + return 0; +} + +static inline void al_pcie_target_bus_set(struct al_pcie *pcie, + u8 target_bus, + u8 mask_target_bus) +{ + u32 reg; + + reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) | + FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus); + + al_pcie_controller_writel(pcie, AXI_BASE_OFFSET + + pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS, + reg); +} + +static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie, + unsigned int busnr, + unsigned int devfn) +{ + struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg; + unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask; + unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask; + struct pcie_port *pp = &pcie->pci->pp; + void __iomem *pci_base_addr; + + pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base + + (busnr_ecam << 20) + + PCIE_ECAM_DEVFN(devfn)); + + if (busnr_reg != target_bus_cfg->reg_val) { + dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n", + target_bus_cfg->reg_val, busnr_reg); + target_bus_cfg->reg_val = busnr_reg; + al_pcie_target_bus_set(pcie, + target_bus_cfg->reg_val, + target_bus_cfg->reg_mask); + } + + return pci_base_addr; +} + +static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct al_pcie *pcie = to_al_pcie(pci); + unsigned int busnr = bus->number; + void __iomem *pci_addr; + int rc; + + pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn); + + rc = dw_pcie_read(pci_addr + where, size, val); + + dev_dbg(pci->dev, "%d-byte config read from %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n", + size, pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), where, + (pci_addr + where), *val); + + return rc; +} + +static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, + u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct al_pcie *pcie = to_al_pcie(pci); + unsigned int busnr = bus->number; + void __iomem *pci_addr; + int rc; + + pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn); + + rc = dw_pcie_write(pci_addr + where, size, val); + + dev_dbg(pci->dev, "%d-byte config write to %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n", + size, pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), where, + (pci_addr + where), val); + + return rc; +} + +static void al_pcie_config_prepare(struct al_pcie *pcie) +{ + struct al_pcie_target_bus_cfg *target_bus_cfg; + struct pcie_port *pp = &pcie->pci->pp; + unsigned int ecam_bus_mask; + u32 cfg_control_offset; + u8 subordinate_bus; + u8 secondary_bus; + u32 cfg_control; + u32 reg; + + target_bus_cfg = &pcie->target_bus_cfg; + + ecam_bus_mask = (pcie->ecam_size >> 20) - 1; + if (ecam_bus_mask > 255) { + dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n"); + ecam_bus_mask = 255; + } + + /* This portion is taken from the transaction address */ + target_bus_cfg->ecam_mask = ecam_bus_mask; + /* This portion is taken from the cfg_target_bus reg */ + target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask; + target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask; + + al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val, + target_bus_cfg->reg_mask); + + secondary_bus = pp->busn->start + 1; + subordinate_bus = pp->busn->end; + + /* Set the valid values of secondary and subordinate buses */ + cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl + + CFG_CONTROL; + + cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset); + + reg = cfg_control & + ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK); + + reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) | + FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus); + + al_pcie_controller_writel(pcie, cfg_control_offset, reg); +} + +static int al_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct al_pcie *pcie = to_al_pcie(pci); + int rc; + + rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id); + if (rc) + return rc; + + rc = al_pcie_reg_offsets_set(pcie); + if (rc) + return rc; + + al_pcie_config_prepare(pcie); + + return 0; +} + +static const struct dw_pcie_host_ops al_pcie_host_ops = { + .rd_other_conf = al_pcie_rd_other_conf, + .wr_other_conf = al_pcie_wr_other_conf, + .host_init = al_pcie_host_init, +}; + +static int al_add_pcie_port(struct pcie_port *pp, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + pp->ops = &al_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { +}; + +static int al_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *controller_res; + struct resource *ecam_res; + struct resource *dbi_res; + struct al_pcie *al_pcie; + struct dw_pcie *pci; + + al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL); + if (!al_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + al_pcie->pci = pci; + al_pcie->dev = dev; + + dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res); + if (IS_ERR(pci->dbi_base)) { + dev_err(dev, "couldn't remap dbi base %pR\n", dbi_res); + return PTR_ERR(pci->dbi_base); + } + + ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); + if (!ecam_res) { + dev_err(dev, "couldn't find 'config' reg in DT\n"); + return -ENOENT; + } + al_pcie->ecam_size = resource_size(ecam_res); + + controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "controller"); + al_pcie->controller_base = devm_ioremap_resource(dev, controller_res); + if (IS_ERR(al_pcie->controller_base)) { + dev_err(dev, "couldn't remap controller base %pR\n", + controller_res); + return PTR_ERR(al_pcie->controller_base); + } + + dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n", + dbi_res, controller_res); + + platform_set_drvdata(pdev, al_pcie); + + return al_add_pcie_port(&pci->pp, pdev); +} + +static const struct of_device_id al_pcie_of_match[] = { + { .compatible = "amazon,al-alpine-v2-pcie", + }, + { .compatible = "amazon,al-alpine-v3-pcie", + }, + {}, +}; + +static struct platform_driver al_pcie_driver = { + .driver = { + .name = "al-pcie", + .of_match_table = al_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = al_pcie_probe, +}; +builtin_platform_driver(al_pcie_driver); + +#endif /* CONFIG_PCIE_AL*/ diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 0c389a30ef5d..49596547e8c2 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -25,10 +25,14 @@ #include "pcie-designware.h" +#define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4 + struct armada8k_pcie { struct dw_pcie *pci; struct clk *clk; struct clk *clk_reg; + struct phy *phy[ARMADA8K_PCIE_MAX_LANES]; + unsigned int phy_count; }; #define PCIE_VENDOR_REGS_OFFSET 0x8000 @@ -55,7 +59,7 @@ struct armada8k_pcie { #define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) #define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) /* - * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write + * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write * allocate */ #define ARCACHE_DEFAULT_VALUE 0x3511 @@ -67,6 +71,75 @@ struct armada8k_pcie { #define to_armada8k_pcie(x) dev_get_drvdata((x)->dev) +static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie) +{ + int i; + + for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { + phy_power_off(pcie->phy[i]); + phy_exit(pcie->phy[i]); + } +} + +static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie) +{ + int ret; + int i; + + for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { + ret = phy_init(pcie->phy[i]); + if (ret) + return ret; + + ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE, + pcie->phy_count); + if (ret) { + phy_exit(pcie->phy[i]); + return ret; + } + + ret = phy_power_on(pcie->phy[i]); + if (ret) { + phy_exit(pcie->phy[i]); + return ret; + } + } + + return 0; +} + +static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + struct device_node *node = dev->of_node; + int ret = 0; + int i; + + for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { + pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i); + if (IS_ERR(pcie->phy[i])) { + if (PTR_ERR(pcie->phy[i]) != -ENODEV) + return PTR_ERR(pcie->phy[i]); + + pcie->phy[i] = NULL; + continue; + } + + pcie->phy_count++; + } + + /* Old bindings miss the PHY handle, so just warn if there is no PHY */ + if (!pcie->phy_count) + dev_warn(dev, "No available PHY\n"); + + ret = armada8k_pcie_enable_phys(pcie); + if (ret) + dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret); + + return ret; +} + static int armada8k_pcie_link_up(struct dw_pcie *pci) { u32 reg; @@ -249,14 +322,20 @@ static int armada8k_pcie_probe(struct platform_device *pdev) goto fail_clkreg; } + ret = armada8k_pcie_setup_phys(pcie); + if (ret) + goto fail_clkreg; + platform_set_drvdata(pdev, pcie); ret = armada8k_add_pcie_port(pcie, pdev); if (ret) - goto fail_clkreg; + goto disable_phy; return 0; +disable_phy: + armada8k_pcie_disable_phys(pcie); fail_clkreg: clk_disable_unprepare(pcie->clk_reg); fail: diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index dba83abfe764..d00252bd8fae 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -444,7 +444,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, return 0; } -static struct dw_pcie_ep_ops pcie_ep_ops = { +static const struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = artpec6_pcie_ep_init, .raise_irq = artpec6_pcie_raise_irq, }; diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 24f5a775ad34..3dd2e2697294 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -40,39 +40,6 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) __dw_pcie_ep_reset_bar(pci, bar, 0); } -static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, - u8 cap) -{ - u8 cap_id, next_cap_ptr; - u16 reg; - - reg = dw_pcie_readw_dbi(pci, cap_ptr); - next_cap_ptr = (reg & 0xff00) >> 8; - cap_id = (reg & 0x00ff); - - if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX) - return 0; - - if (cap_id == cap) - return cap_ptr; - - return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); -} - -static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap) -{ - u8 next_cap_ptr; - u16 reg; - - reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); - next_cap_ptr = (reg & 0x00ff); - - if (!next_cap_ptr) - return 0; - - return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); -} - static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr) { @@ -397,6 +364,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct pci_epc *epc = ep->epc; + unsigned int aligned_offset; u16 msg_ctrl, msg_data; u32 msg_addr_lower, msg_addr_upper, reg; u64 msg_addr; @@ -422,13 +390,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, reg = ep->msi_cap + PCI_MSI_DATA_32; msg_data = dw_pcie_readw_dbi(pci, reg); } - msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; + aligned_offset = msg_addr_lower & (epc->mem->page_size - 1); + msg_addr = ((u64)msg_addr_upper) << 32 | + (msg_addr_lower & ~aligned_offset); ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, epc->mem->page_size); if (ret) return ret; - writel(msg_data | (interrupt_num - 1), ep->msi_mem); + writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); @@ -504,10 +474,33 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep) pci_epc_mem_exit(epc); } +static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) +{ + u32 header; + int pos = PCI_CFG_SPACE_SIZE; + + while (pos) { + header = dw_pcie_readl_dbi(pci, pos); + if (PCI_EXT_CAP_ID(header) == cap) + return pos; + + pos = PCI_EXT_CAP_NEXT(header); + if (!pos) + break; + } + + return 0; +} + int dw_pcie_ep_init(struct dw_pcie_ep *ep) { + int i; int ret; + u32 reg; void *addr; + u8 hdr_type; + unsigned int nbars; + unsigned int offset; struct pci_epc *epc; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; @@ -517,10 +510,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); return -EINVAL; } - if (pci->iatu_unroll_enabled && !pci->atu_base) { - dev_err(dev, "atu_base is not populated\n"); - return -EINVAL; - } ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); if (ret < 0) { @@ -574,6 +563,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) if (ep->ops->ep_init) ep->ops->ep_init(ep); + hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE); + if (hdr_type != PCI_HEADER_TYPE_NORMAL) { + dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", + hdr_type); + return -EIO; + } + ret = of_property_read_u8(np, "max-functions", &epc->max_functions); if (ret < 0) epc->max_functions = 1; @@ -591,9 +587,21 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); return -ENOMEM; } - ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI); + ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); + + ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX); - ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); + offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); + if (offset) { + reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); + nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> + PCI_REBAR_CTRL_NBAR_SHIFT; + + dw_pcie_dbi_ro_wr_en(pci); + for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); + dw_pcie_dbi_ro_wr_dis(pci); + } dw_pcie_setup(pci); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 25087d3c9a82..0f36a926059a 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -126,18 +126,12 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) struct dw_pcie *pci = to_dw_pcie_from_pp(pp); u64 msi_target; - if (pp->ops->get_msi_addr) - msi_target = pp->ops->get_msi_addr(pp); - else - msi_target = (u64)pp->msi_data; + msi_target = (u64)pp->msi_data; msg->address_lo = lower_32_bits(msi_target); msg->address_hi = upper_32_bits(msi_target); - if (pp->ops->get_msi_data) - msg->data = pp->ops->get_msi_data(pp, d->hwirq); - else - msg->data = d->hwirq; + msg->data = d->hwirq; dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", (int)d->hwirq, msg->address_hi, msg->address_lo); @@ -157,17 +151,13 @@ static void dw_pci_bottom_mask(struct irq_data *d) raw_spin_lock_irqsave(&pp->lock, flags); - if (pp->ops->msi_clear_irq) { - pp->ops->msi_clear_irq(pp, d->hwirq); - } else { - ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; - res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; - bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; - pp->irq_mask[ctrl] |= BIT(bit); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, - pp->irq_mask[ctrl]); - } + pp->irq_mask[ctrl] |= BIT(bit); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + pp->irq_mask[ctrl]); raw_spin_unlock_irqrestore(&pp->lock, flags); } @@ -180,17 +170,13 @@ static void dw_pci_bottom_unmask(struct irq_data *d) raw_spin_lock_irqsave(&pp->lock, flags); - if (pp->ops->msi_set_irq) { - pp->ops->msi_set_irq(pp, d->hwirq); - } else { - ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; - res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; - bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; - pp->irq_mask[ctrl] &= ~BIT(bit); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, - pp->irq_mask[ctrl]); - } + pp->irq_mask[ctrl] &= ~BIT(bit); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + pp->irq_mask[ctrl]); raw_spin_unlock_irqrestore(&pp->lock, flags); } @@ -199,20 +185,12 @@ static void dw_pci_bottom_ack(struct irq_data *d) { struct pcie_port *pp = irq_data_get_irq_chip_data(d); unsigned int res, bit, ctrl; - unsigned long flags; ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; - raw_spin_lock_irqsave(&pp->lock, flags); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); - - if (pp->ops->msi_irq_ack) - pp->ops->msi_irq_ack(d->hwirq, pp); - - raw_spin_unlock_irqrestore(&pp->lock, flags); } static struct irq_chip dw_pci_msi_bottom_irq_chip = { @@ -245,7 +223,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, bit + i, - &dw_pci_msi_bottom_irq_chip, + pp->msi_irq_chip, pp, handle_edge_irq, NULL, NULL); @@ -298,25 +276,31 @@ int dw_pcie_allocate_domains(struct pcie_port *pp) void dw_pcie_free_msi(struct pcie_port *pp) { - irq_set_chained_handler(pp->msi_irq, NULL); - irq_set_handler_data(pp->msi_irq, NULL); + if (pp->msi_irq) { + irq_set_chained_handler(pp->msi_irq, NULL); + irq_set_handler_data(pp->msi_irq, NULL); + } irq_domain_remove(pp->msi_domain); irq_domain_remove(pp->irq_domain); + + if (pp->msi_page) + __free_page(pp->msi_page); } void dw_pcie_msi_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; - struct page *page; u64 msi_target; - page = alloc_page(GFP_KERNEL); - pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + pp->msi_page = alloc_page(GFP_KERNEL); + pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); if (dma_mapping_error(dev, pp->msi_data)) { dev_err(dev, "Failed to map MSI data\n"); - __free_page(page); + __free_page(pp->msi_page); + pp->msi_page = NULL; return; } msi_target = (u64)pp->msi_data; @@ -327,6 +311,7 @@ void dw_pcie_msi_init(struct pcie_port *pp) dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, upper_32_bits(msi_target)); } +EXPORT_SYMBOL_GPL(dw_pcie_msi_init); int dw_pcie_host_init(struct pcie_port *pp) { @@ -335,9 +320,10 @@ int dw_pcie_host_init(struct pcie_port *pp) struct device_node *np = dev->of_node; struct platform_device *pdev = to_platform_device(dev); struct resource_entry *win, *tmp; - struct pci_bus *bus, *child; + struct pci_bus *child; struct pci_host_bridge *bridge; struct resource *cfg_res; + u32 hdr_type; int ret; raw_spin_lock_init(&pci->pp.lock); @@ -352,7 +338,7 @@ int dw_pcie_host_init(struct pcie_port *pp) dev_err(dev, "Missing *config* reg space\n"); } - bridge = pci_alloc_host_bridge(0); + bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; @@ -363,7 +349,7 @@ int dw_pcie_host_init(struct pcie_port *pp) ret = devm_request_pci_bus_resources(dev, &bridge->windows); if (ret) - goto error; + return ret; /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { @@ -407,8 +393,7 @@ int dw_pcie_host_init(struct pcie_port *pp) resource_size(pp->cfg)); if (!pci->dbi_base) { dev_err(dev, "Error with ioremap\n"); - ret = -ENOMEM; - goto error; + return -ENOMEM; } } @@ -419,8 +404,7 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->cfg0_base, pp->cfg0_size); if (!pp->va_cfg0_base) { dev_err(dev, "Error with ioremap in function\n"); - ret = -ENOMEM; - goto error; + return -ENOMEM; } } @@ -430,8 +414,7 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->cfg1_size); if (!pp->va_cfg1_base) { dev_err(dev, "Error with ioremap\n"); - ret = -ENOMEM; - goto error; + return -ENOMEM; } } @@ -439,7 +422,7 @@ int dw_pcie_host_init(struct pcie_port *pp) if (ret) pci->num_viewport = 2; - if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) { + if (pci_msi_enabled()) { /* * If a specific SoC driver needs to change the * default number of vectors, it needs to implement @@ -454,14 +437,16 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->num_vectors == 0) { dev_err(dev, "Invalid number of vectors\n"); - goto error; + return -EINVAL; } } if (!pp->ops->msi_host_init) { + pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; + ret = dw_pcie_allocate_domains(pp); if (ret) - goto error; + return ret; if (pp->msi_irq) irq_set_chained_handler_and_data(pp->msi_irq, @@ -470,14 +455,29 @@ int dw_pcie_host_init(struct pcie_port *pp) } else { ret = pp->ops->msi_host_init(pp); if (ret < 0) - goto error; + return ret; } } if (pp->ops->host_init) { ret = pp->ops->host_init(pp); if (ret) - goto error; + goto err_free_msi; + } + + ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type); + if (ret != PCIBIOS_SUCCESSFUL) { + dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n", + ret); + ret = pcibios_err_to_errno(ret); + goto err_free_msi; + } + if (hdr_type != PCI_HEADER_TYPE_BRIDGE) { + dev_err(pci->dev, + "PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n", + hdr_type); + ret = -EIO; + goto err_free_msi; } pp->root_bus_nr = pp->busn->start; @@ -491,26 +491,37 @@ int dw_pcie_host_init(struct pcie_port *pp) ret = pci_scan_root_bus_bridge(bridge); if (ret) - goto error; + goto err_free_msi; - bus = bridge->bus; + pp->root_bus = bridge->bus; if (pp->ops->scan_bus) pp->ops->scan_bus(pp); - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); + pci_bus_size_bridges(pp->root_bus); + pci_bus_assign_resources(pp->root_bus); - list_for_each_entry(child, &bus->children, node) + list_for_each_entry(child, &pp->root_bus->children, node) pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); + pci_bus_add_devices(pp->root_bus); return 0; -error: - pci_free_host_bridge(bridge); +err_free_msi: + if (pci_msi_enabled() && !pp->ops->msi_host_init) + dw_pcie_free_msi(pp); return ret; } +EXPORT_SYMBOL_GPL(dw_pcie_host_init); + +void dw_pcie_host_deinit(struct pcie_port *pp) +{ + pci_stop_root_bus(pp->root_bus); + pci_remove_root_bus(pp->root_bus); + if (pci_msi_enabled() && !pp->ops->msi_host_init) + dw_pcie_free_msi(pp); +} +EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val, @@ -628,35 +639,32 @@ static struct pci_ops dw_pcie_ops = { .write = dw_pcie_wr_conf, }; -static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) -{ - u32 val; - - val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); - if (val == 0xffffffff) - return 1; - - return 0; -} - void dw_pcie_setup_rc(struct pcie_port *pp) { u32 val, ctrl, num_ctrls; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - dw_pcie_setup(pci); + /* + * Enable DBI read-only registers for writing/updating configuration. + * Write permission gets disabled towards the end of this function. + */ + dw_pcie_dbi_ro_wr_en(pci); - num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + dw_pcie_setup(pci); - /* Initialize IRQ Status array */ - for (ctrl = 0; ctrl < num_ctrls; ctrl++) { - pp->irq_mask[ctrl] = ~0; - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + - (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - 4, pp->irq_mask[ctrl]); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + - (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - 4, ~0); + if (!pp->ops->msi_host_init) { + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + /* Initialize IRQ Status array */ + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + pp->irq_mask[ctrl] = ~0; + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + 4, pp->irq_mask[ctrl]); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + 4, ~0); + } } /* Setup RC BARs */ @@ -664,12 +672,10 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); /* Setup interrupt pins */ - dw_pcie_dbi_ro_wr_en(pci); val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); val &= 0xffff00ff; val |= 0x00000100; dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); - dw_pcie_dbi_ro_wr_dis(pci); /* Setup bus numbers */ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); @@ -690,14 +696,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp) * we should not program the ATU here. */ if (!pp->ops->rd_other_conf) { - /* Get iATU unroll support */ - pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); - dev_dbg(pci->dev, "iATU unroll: %s\n", - pci->iatu_unroll_enabled ? "enabled" : "disabled"); - - if (pci->iatu_unroll_enabled && !pci->atu_base) - pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; - dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, PCIE_ATU_TYPE_MEM, pp->mem_base, pp->mem_bus_addr, pp->mem_size); @@ -709,14 +707,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); - /* Enable write permission for the DBI read-only register */ - dw_pcie_dbi_ro_wr_en(pci); /* Program correct class for RC */ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); - /* Better disable write permission right after the update */ - dw_pcie_dbi_ro_wr_dis(pci); dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); val |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); + + dw_pcie_dbi_ro_wr_dis(pci); } +EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 932dbd0b34b6..b58fdcbc664b 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -106,7 +106,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep) return &dw_plat_pcie_epc_features; } -static struct dw_pcie_ep_ops pcie_ep_ops = { +static const struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = dw_plat_pcie_ep_init, .raise_irq = dw_plat_pcie_ep_raise_irq, .get_features = dw_plat_pcie_get_features, diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 31f6331ca46f..820488dfeaed 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -14,11 +14,85 @@ #include "pcie-designware.h" -/* PCIe Port Logic registers */ -#define PLR_OFFSET 0x700 -#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) -#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4) -#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) +/* + * These interfaces resemble the pci_find_*capability() interfaces, but these + * are for configuring host controllers, which are bridges *to* PCI devices but + * are not PCI devices themselves. + */ +static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, + u8 cap) +{ + u8 cap_id, next_cap_ptr; + u16 reg; + + if (!cap_ptr) + return 0; + + reg = dw_pcie_readw_dbi(pci, cap_ptr); + cap_id = (reg & 0x00ff); + + if (cap_id > PCI_CAP_ID_MAX) + return 0; + + if (cap_id == cap) + return cap_ptr; + + next_cap_ptr = (reg & 0xff00) >> 8; + return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); +} + +u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap) +{ + u8 next_cap_ptr; + u16 reg; + + reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); + next_cap_ptr = (reg & 0x00ff); + + return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); +} +EXPORT_SYMBOL_GPL(dw_pcie_find_capability); + +static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start, + u8 cap) +{ + u32 header; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; + + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + if (start) + pos = start; + + header = dw_pcie_readl_dbi(pci, pos); + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) + return 0; + + while (ttl-- > 0) { + if (PCI_EXT_CAP_ID(header) == cap && pos != start) + return pos; + + pos = PCI_EXT_CAP_NEXT(header); + if (pos < PCI_CFG_SPACE_SIZE) + break; + + header = dw_pcie_readl_dbi(pci, pos); + } + + return 0; +} + +u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap) +{ + return dw_pcie_find_next_ext_capability(pci, 0, cap); +} +EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); int dw_pcie_read(void __iomem *addr, int size, u32 *val) { @@ -40,6 +114,7 @@ int dw_pcie_read(void __iomem *addr, int size, u32 *val) return PCIBIOS_SUCCESSFUL; } +EXPORT_SYMBOL_GPL(dw_pcie_read); int dw_pcie_write(void __iomem *addr, int size, u32 val) { @@ -57,37 +132,96 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val) return PCIBIOS_SUCCESSFUL; } +EXPORT_SYMBOL_GPL(dw_pcie_write); -u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size) +u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size) { int ret; u32 val; if (pci->ops->read_dbi) - return pci->ops->read_dbi(pci, base, reg, size); + return pci->ops->read_dbi(pci, pci->dbi_base, reg, size); - ret = dw_pcie_read(base + reg, size, &val); + ret = dw_pcie_read(pci->dbi_base + reg, size, &val); if (ret) dev_err(pci->dev, "Read DBI address failed\n"); return val; } +EXPORT_SYMBOL_GPL(dw_pcie_read_dbi); -void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size, u32 val) +void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val) { int ret; if (pci->ops->write_dbi) { - pci->ops->write_dbi(pci, base, reg, size, val); + pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val); return; } - ret = dw_pcie_write(base + reg, size, val); + ret = dw_pcie_write(pci->dbi_base + reg, size, val); if (ret) dev_err(pci->dev, "Write DBI address failed\n"); } +EXPORT_SYMBOL_GPL(dw_pcie_write_dbi); + +u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size) +{ + int ret; + u32 val; + + if (pci->ops->read_dbi2) + return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size); + + ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val); + if (ret) + dev_err(pci->dev, "read DBI address failed\n"); + + return val; +} + +void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) +{ + int ret; + + if (pci->ops->write_dbi2) { + pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val); + return; + } + + ret = dw_pcie_write(pci->dbi_base2 + reg, size, val); + if (ret) + dev_err(pci->dev, "write DBI address failed\n"); +} + +u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size) +{ + int ret; + u32 val; + + if (pci->ops->read_dbi) + return pci->ops->read_dbi(pci, pci->atu_base, reg, size); + + ret = dw_pcie_read(pci->atu_base + reg, size, &val); + if (ret) + dev_err(pci->dev, "Read ATU address failed\n"); + + return val; +} + +void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val) +{ + int ret; + + if (pci->ops->write_dbi) { + pci->ops->write_dbi(pci, pci->atu_base, reg, size, val); + return; + } + + ret = dw_pcie_write(pci->atu_base + reg, size, val); + if (ret) + dev_err(pci->dev, "Write ATU address failed\n"); +} static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) { @@ -322,10 +456,11 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci) usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); } - dev_err(pci->dev, "Phy link never came up\n"); + dev_info(pci->dev, "Phy link never came up\n"); return -ETIMEDOUT; } +EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); int dw_pcie_link_up(struct dw_pcie *pci) { @@ -334,9 +469,20 @@ int dw_pcie_link_up(struct dw_pcie *pci) if (pci->ops->link_up) return pci->ops->link_up(pci); - val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1); - return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) && - (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))); + val = readl(pci->dbi_base + PCIE_PORT_DEBUG1); + return ((val & PCIE_PORT_DEBUG1_LINK_UP) && + (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); +} + +static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) +{ + u32 val; + + val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); + if (val == 0xffffffff) + return 1; + + return 0; } void dw_pcie_setup(struct dw_pcie *pci) @@ -347,9 +493,21 @@ void dw_pcie_setup(struct dw_pcie *pci) struct device *dev = pci->dev; struct device_node *np = dev->of_node; + if (pci->version >= 0x480A || (!pci->version && + dw_pcie_iatu_unroll_enabled(pci))) { + pci->iatu_unroll_enabled = true; + if (!pci->atu_base) + pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; + } + dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? + "enabled" : "disabled"); + + ret = of_property_read_u32(np, "num-lanes", &lanes); - if (ret) - lanes = 0; + if (ret) { + dev_dbg(pci->dev, "property num-lanes isn't found\n"); + return; + } /* Set the number of lanes */ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); @@ -391,4 +549,11 @@ void dw_pcie_setup(struct dw_pcie *pci) break; } dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); + + if (of_property_read_bool(np, "snps,enable-cdm-check")) { + val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); + val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | + PCIE_PL_CHK_REG_CHK_REG_START; + dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); + } } diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 377f4c0b52da..5a18e94e52c8 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -41,6 +41,9 @@ #define PCIE_PORT_DEBUG0 0x728 #define PORT_LOGIC_LTSSM_STATE_MASK 0x1f #define PORT_LOGIC_LTSSM_STATE_L0 0x11 +#define PCIE_PORT_DEBUG1 0x72C +#define PCIE_PORT_DEBUG1_LINK_UP BIT(4) +#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29) #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C #define PORT_LOGIC_SPEED_CHANGE BIT(17) @@ -83,6 +86,15 @@ #define PCIE_MISC_CONTROL_1_OFF 0x8BC #define PCIE_DBI_RO_WR_EN BIT(0) +#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20 +#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0) +#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1) +#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR BIT(16) +#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR BIT(17) +#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE BIT(18) + +#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28 + /* * iATU Unroll-specific register definitions * From 4.80 core version the address translation will be made by unroll @@ -145,14 +157,9 @@ struct dw_pcie_host_ops { int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); int (*host_init)(struct pcie_port *pp); - void (*msi_set_irq)(struct pcie_port *pp, int irq); - void (*msi_clear_irq)(struct pcie_port *pp, int irq); - phys_addr_t (*get_msi_addr)(struct pcie_port *pp); - u32 (*get_msi_data)(struct pcie_port *pp, int pos); void (*scan_bus)(struct pcie_port *pp); void (*set_num_vectors)(struct pcie_port *pp); int (*msi_host_init)(struct pcie_port *pp); - void (*msi_irq_ack)(int irq, struct pcie_port *pp); }; struct pcie_port { @@ -179,8 +186,11 @@ struct pcie_port { struct irq_domain *irq_domain; struct irq_domain *msi_domain; dma_addr_t msi_data; + struct page *msi_page; + struct irq_chip *msi_irq_chip; u32 num_vectors; u32 irq_mask[MAX_MSI_CTRLS]; + struct pci_bus *root_bus; raw_spinlock_t lock; DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); }; @@ -200,7 +210,7 @@ struct dw_pcie_ep_ops { struct dw_pcie_ep { struct pci_epc *epc; - struct dw_pcie_ep_ops *ops; + const struct dw_pcie_ep_ops *ops; phys_addr_t phys_base; size_t addr_size; size_t page_size; @@ -222,6 +232,10 @@ struct dw_pcie_ops { size_t size); void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, size_t size, u32 val); + u32 (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg, + size_t size); + void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg, + size_t size, u32 val); int (*link_up)(struct dw_pcie *pcie); int (*start_link)(struct dw_pcie *pcie); void (*stop_link)(struct dw_pcie *pcie); @@ -238,6 +252,7 @@ struct dw_pcie { struct pcie_port pp; struct dw_pcie_ep ep; const struct dw_pcie_ops *ops; + unsigned int version; }; #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) @@ -245,13 +260,18 @@ struct dw_pcie { #define to_dw_pcie_from_ep(endpoint) \ container_of((endpoint), struct dw_pcie, ep) +u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap); +u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap); + int dw_pcie_read(void __iomem *addr, int size, u32 *val); int dw_pcie_write(void __iomem *addr, int size, u32 val); -u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size); -void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size, u32 val); +u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size); +void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val); +u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size); +void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val); +u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size); +void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val); int dw_pcie_link_up(struct dw_pcie *pci); int dw_pcie_wait_for_link(struct dw_pcie *pci); void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, @@ -265,52 +285,52 @@ void dw_pcie_setup(struct dw_pcie *pci); static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) { - __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val); + dw_pcie_write_dbi(pci, reg, 0x4, val); } static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4); + return dw_pcie_read_dbi(pci, reg, 0x4); } static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val) { - __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val); + dw_pcie_write_dbi(pci, reg, 0x2, val); } static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2); + return dw_pcie_read_dbi(pci, reg, 0x2); } static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val) { - __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val); + dw_pcie_write_dbi(pci, reg, 0x1, val); } static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1); + return dw_pcie_read_dbi(pci, reg, 0x1); } static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) { - __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); + dw_pcie_write_dbi2(pci, reg, 0x4, val); } static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); + return dw_pcie_read_dbi2(pci, reg, 0x4); } static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) { - __dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val); + dw_pcie_write_atu(pci, reg, 0x4, val); } static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4); + return dw_pcie_read_atu(pci, reg, 0x4); } static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) @@ -341,6 +361,7 @@ void dw_pcie_msi_init(struct pcie_port *pp); void dw_pcie_free_msi(struct pcie_port *pp); void dw_pcie_setup_rc(struct pcie_port *pp); int dw_pcie_host_init(struct pcie_port *pp); +void dw_pcie_host_deinit(struct pcie_port *pp); int dw_pcie_allocate_domains(struct pcie_port *pp); #else static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) @@ -365,6 +386,10 @@ static inline int dw_pcie_host_init(struct pcie_port *pp) return 0; } +static inline void dw_pcie_host_deinit(struct pcie_port *pp) +{ +} + static inline int dw_pcie_allocate_domains(struct pcie_port *pp) { return 0; diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 954bc2b74bbc..811b5c6d62ea 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev) hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); if (IS_ERR(hipcie->vpcie)) { - if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(hipcie->vpcie) != -ENODEV) + return PTR_ERR(hipcie->vpcie); hipcie->vpcie = NULL; } diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 9b599296205d..c19617a912bd 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -2,7 +2,7 @@ /* * PCIe host controller driver for Kirin Phone SoCs * - * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. + * Copyright (C) 2017 HiSilicon Electronics Co., Ltd. * http://www.huawei.com * * Author: Xiaowei Song <[email protected]> @@ -436,7 +436,7 @@ static int kirin_pcie_host_init(struct pcie_port *pp) return 0; } -static struct dw_pcie_ops kirin_dw_pcie_ops = { +static const struct dw_pcie_ops kirin_dw_pcie_ops = { .read_dbi = kirin_pcie_read_dbi, .write_dbi = kirin_pcie_write_dbi, .link_up = kirin_pcie_link_up, diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index a7f703556790..7e581748ee9f 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -112,10 +112,10 @@ struct qcom_pcie_resources_2_3_2 { struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; }; +#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 struct qcom_pcie_resources_2_4_0 { - struct clk *aux_clk; - struct clk *master_clk; - struct clk *slave_clk; + struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; + int num_clks; struct reset_control *axi_m_reset; struct reset_control *axi_s_reset; struct reset_control *pipe_reset; @@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie) static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) { + /* Ensure that PERST has been asserted for at least 100 ms */ + msleep(100); gpiod_set_value_cansleep(pcie->reset, 0); usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } @@ -638,18 +640,20 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); + int ret; - res->aux_clk = devm_clk_get(dev, "aux"); - if (IS_ERR(res->aux_clk)) - return PTR_ERR(res->aux_clk); + res->clks[0].id = "aux"; + res->clks[1].id = "master_bus"; + res->clks[2].id = "slave_bus"; + res->clks[3].id = "iface"; - res->master_clk = devm_clk_get(dev, "master_bus"); - if (IS_ERR(res->master_clk)) - return PTR_ERR(res->master_clk); + /* qcom,pcie-ipq4019 is defined without "iface" */ + res->num_clks = is_ipq ? 3 : 4; - res->slave_clk = devm_clk_get(dev, "slave_bus"); - if (IS_ERR(res->slave_clk)) - return PTR_ERR(res->slave_clk); + ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); + if (ret < 0) + return ret; res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); if (IS_ERR(res->axi_m_reset)) @@ -659,27 +663,33 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) if (IS_ERR(res->axi_s_reset)) return PTR_ERR(res->axi_s_reset); - res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); - if (IS_ERR(res->pipe_reset)) - return PTR_ERR(res->pipe_reset); - - res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, - "axi_m_vmid"); - if (IS_ERR(res->axi_m_vmid_reset)) - return PTR_ERR(res->axi_m_vmid_reset); - - res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, - "axi_s_xpu"); - if (IS_ERR(res->axi_s_xpu_reset)) - return PTR_ERR(res->axi_s_xpu_reset); - - res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); - if (IS_ERR(res->parf_reset)) - return PTR_ERR(res->parf_reset); - - res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); - if (IS_ERR(res->phy_reset)) - return PTR_ERR(res->phy_reset); + if (is_ipq) { + /* + * These resources relates to the PHY or are secure clocks, but + * are controlled here for IPQ4019 + */ + res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); + if (IS_ERR(res->pipe_reset)) + return PTR_ERR(res->pipe_reset); + + res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, + "axi_m_vmid"); + if (IS_ERR(res->axi_m_vmid_reset)) + return PTR_ERR(res->axi_m_vmid_reset); + + res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, + "axi_s_xpu"); + if (IS_ERR(res->axi_s_xpu_reset)) + return PTR_ERR(res->axi_s_xpu_reset); + + res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); + if (IS_ERR(res->parf_reset)) + return PTR_ERR(res->parf_reset); + + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + } res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, "axi_m_sticky"); @@ -699,9 +709,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) if (IS_ERR(res->ahb_reset)) return PTR_ERR(res->ahb_reset); - res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); - if (IS_ERR(res->phy_ahb_reset)) - return PTR_ERR(res->phy_ahb_reset); + if (is_ipq) { + res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); + if (IS_ERR(res->phy_ahb_reset)) + return PTR_ERR(res->phy_ahb_reset); + } return 0; } @@ -719,9 +731,7 @@ static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) reset_control_assert(res->axi_m_sticky_reset); reset_control_assert(res->pwr_reset); reset_control_assert(res->ahb_reset); - clk_disable_unprepare(res->aux_clk); - clk_disable_unprepare(res->master_clk); - clk_disable_unprepare(res->slave_clk); + clk_bulk_disable_unprepare(res->num_clks, res->clks); } static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) @@ -850,23 +860,9 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) usleep_range(10000, 12000); - ret = clk_prepare_enable(res->aux_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable iface clock\n"); - goto err_clk_aux; - } - - ret = clk_prepare_enable(res->master_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable core clock\n"); - goto err_clk_axi_m; - } - - ret = clk_prepare_enable(res->slave_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable phy clock\n"); - goto err_clk_axi_s; - } + ret = clk_bulk_prepare_enable(res->num_clks, res->clks); + if (ret) + goto err_clks; /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); @@ -891,11 +887,7 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) return 0; -err_clk_axi_s: - clk_disable_unprepare(res->master_clk); -err_clk_axi_m: - clk_disable_unprepare(res->aux_clk); -err_clk_aux: +err_clks: reset_control_assert(res->ahb_reset); err_rst_ahb: reset_control_assert(res->pwr_reset); @@ -1129,25 +1121,8 @@ err_deinit: return ret; } -static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, - u32 *val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - /* the device class is not reported correctly from the register */ - if (where == PCI_CLASS_REVISION && size == 4) { - *val = readl(pci->dbi_base + PCI_CLASS_REVISION); - *val &= 0xff; /* keep revision id */ - *val |= PCI_CLASS_BRIDGE_PCI << 16; - return PCIBIOS_SUCCESSFUL; - } - - return dw_pcie_read(pci->dbi_base + where, size, val); -} - static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { .host_init = qcom_pcie_host_init, - .rd_own_conf = qcom_pcie_rd_own_conf, }; /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ @@ -1306,9 +1281,16 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, + { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 }, { } }; +static void qcom_fixup_class(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_PCI << 8; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class); + static struct platform_driver qcom_pcie_driver = { .probe = qcom_pcie_probe, .driver = { diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c new file mode 100644 index 000000000000..f89f5acee72d --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -0,0 +1,1732 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe host controller driver for Tegra194 SoC + * + * Copyright (C) 2019 NVIDIA Corporation. + * + * Author: Vidya Sagar <[email protected]> + */ + +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/random.h> +#include <linux/reset.h> +#include <linux/resource.h> +#include <linux/types.h> +#include "pcie-designware.h" +#include <soc/tegra/bpmp.h> +#include <soc/tegra/bpmp-abi.h> +#include "../../pci.h" + +#define APPL_PINMUX 0x0 +#define APPL_PINMUX_PEX_RST BIT(0) +#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) +#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3) +#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4) +#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5) +#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN BIT(9) +#define APPL_PINMUX_CLKREQ_OUT_OVRD BIT(10) + +#define APPL_CTRL 0x4 +#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6) +#define APPL_CTRL_LTSSM_EN BIT(7) +#define APPL_CTRL_HW_HOT_RST_EN BIT(20) +#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) +#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 +#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 + +#define APPL_INTR_EN_L0_0 0x8 +#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) +#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) +#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) +#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) +#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) +#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) + +#define APPL_INTR_STATUS_L0 0xC +#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) +#define APPL_INTR_STATUS_L0_INT_INT BIT(8) +#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) + +#define APPL_INTR_EN_L1_0_0 0x1C +#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) + +#define APPL_INTR_STATUS_L1_0_0 0x20 +#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) + +#define APPL_INTR_STATUS_L1_1 0x2C +#define APPL_INTR_STATUS_L1_2 0x30 +#define APPL_INTR_STATUS_L1_3 0x34 +#define APPL_INTR_STATUS_L1_6 0x3C +#define APPL_INTR_STATUS_L1_7 0x40 + +#define APPL_INTR_EN_L1_8_0 0x44 +#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) +#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3) +#define APPL_INTR_EN_L1_8_INTX_EN BIT(11) +#define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15) + +#define APPL_INTR_STATUS_L1_8_0 0x4C +#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6) +#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2) +#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3) + +#define APPL_INTR_STATUS_L1_9 0x54 +#define APPL_INTR_STATUS_L1_10 0x58 +#define APPL_INTR_STATUS_L1_11 0x64 +#define APPL_INTR_STATUS_L1_13 0x74 +#define APPL_INTR_STATUS_L1_14 0x78 +#define APPL_INTR_STATUS_L1_15 0x7C +#define APPL_INTR_STATUS_L1_17 0x88 + +#define APPL_INTR_EN_L1_18 0x90 +#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2) +#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) +#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) + +#define APPL_INTR_STATUS_L1_18 0x94 +#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2) +#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) +#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) + +#define APPL_MSI_CTRL_2 0xB0 + +#define APPL_LTR_MSG_1 0xC4 +#define LTR_MSG_REQ BIT(15) +#define LTR_MST_NO_SNOOP_SHIFT 16 + +#define APPL_LTR_MSG_2 0xC8 +#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3) + +#define APPL_LINK_STATUS 0xCC +#define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0) + +#define APPL_DEBUG 0xD0 +#define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21) +#define APPL_DEBUG_PM_LINKST_IN_L0 0x11 +#define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3) +#define APPL_DEBUG_LTSSM_STATE_SHIFT 3 +#define LTSSM_STATE_PRE_DETECT 5 + +#define APPL_RADM_STATUS 0xE4 +#define APPL_PM_XMT_TURNOFF_STATE BIT(0) + +#define APPL_DM_TYPE 0x100 +#define APPL_DM_TYPE_MASK GENMASK(3, 0) +#define APPL_DM_TYPE_RP 0x4 +#define APPL_DM_TYPE_EP 0x0 + +#define APPL_CFG_BASE_ADDR 0x104 +#define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12) + +#define APPL_CFG_IATU_DMA_BASE_ADDR 0x108 +#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18) + +#define APPL_CFG_MISC 0x110 +#define APPL_CFG_MISC_SLV_EP_MODE BIT(14) +#define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10) +#define APPL_CFG_MISC_ARCACHE_SHIFT 10 +#define APPL_CFG_MISC_ARCACHE_VAL 3 + +#define APPL_CFG_SLCG_OVERRIDE 0x114 +#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0) + +#define APPL_CAR_RESET_OVRD 0x12C +#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0) + +#define IO_BASE_IO_DECODE BIT(0) +#define IO_BASE_IO_DECODE_BIT8 BIT(8) + +#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0) +#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16) + +#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 +#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) + +#define EVENT_COUNTER_ALL_CLEAR 0x3 +#define EVENT_COUNTER_ENABLE_ALL 0x7 +#define EVENT_COUNTER_ENABLE_SHIFT 2 +#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0) +#define EVENT_COUNTER_EVENT_SEL_SHIFT 16 +#define EVENT_COUNTER_EVENT_Tx_L0S 0x2 +#define EVENT_COUNTER_EVENT_Rx_L0S 0x3 +#define EVENT_COUNTER_EVENT_L1 0x5 +#define EVENT_COUNTER_EVENT_L1_1 0x7 +#define EVENT_COUNTER_EVENT_L1_2 0x8 +#define EVENT_COUNTER_GROUP_SEL_SHIFT 24 +#define EVENT_COUNTER_GROUP_5 0x5 + +#define PORT_LOGIC_ACK_F_ASPM_CTRL 0x70C +#define ENTER_ASPM BIT(30) +#define L0S_ENTRANCE_LAT_SHIFT 24 +#define L0S_ENTRANCE_LAT_MASK GENMASK(26, 24) +#define L1_ENTRANCE_LAT_SHIFT 27 +#define L1_ENTRANCE_LAT_MASK GENMASK(29, 27) +#define N_FTS_SHIFT 8 +#define N_FTS_MASK GENMASK(7, 0) +#define N_FTS_VAL 52 + +#define PORT_LOGIC_GEN2_CTRL 0x80C +#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE BIT(17) +#define FTS_MASK GENMASK(7, 0) +#define FTS_VAL 52 + +#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828 + +#define GEN3_EQ_CONTROL_OFF 0x8a8 +#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 +#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) +#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) + +#define GEN3_RELATED_OFF 0x890 +#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0) +#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16) +#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24 +#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24) + +#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 +#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 +#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) +#define AMBA_ERROR_RESPONSE_CRS_OKAY 0 +#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 +#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 + +#define PORT_LOGIC_MSIX_DOORBELL 0x948 + +#define CAP_SPCIE_CAP_OFF 0x154 +#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0) +#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8) +#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8 + +#define PME_ACK_TIMEOUT 10000 + +#define LTSSM_TIMEOUT 50000 /* 50ms */ + +#define GEN3_GEN4_EQ_PRESET_INIT 5 + +#define GEN1_CORE_CLK_FREQ 62500000 +#define GEN2_CORE_CLK_FREQ 125000000 +#define GEN3_CORE_CLK_FREQ 250000000 +#define GEN4_CORE_CLK_FREQ 500000000 + +static const unsigned int pcie_gen_freq[] = { + GEN1_CORE_CLK_FREQ, + GEN2_CORE_CLK_FREQ, + GEN3_CORE_CLK_FREQ, + GEN4_CORE_CLK_FREQ +}; + +static const u32 event_cntr_ctrl_offset[] = { + 0x1d8, + 0x1a8, + 0x1a8, + 0x1a8, + 0x1c4, + 0x1d8 +}; + +static const u32 event_cntr_data_offset[] = { + 0x1dc, + 0x1ac, + 0x1ac, + 0x1ac, + 0x1c8, + 0x1dc +}; + +struct tegra_pcie_dw { + struct device *dev; + struct resource *appl_res; + struct resource *dbi_res; + struct resource *atu_dma_res; + void __iomem *appl_base; + struct clk *core_clk; + struct reset_control *core_apb_rst; + struct reset_control *core_rst; + struct dw_pcie pci; + struct tegra_bpmp *bpmp; + + bool supports_clkreq; + bool enable_cdm_check; + bool link_state; + bool update_fc_fixup; + u8 init_link_width; + u32 msi_ctrl_int; + u32 num_lanes; + u32 max_speed; + u32 cid; + u32 cfg_link_cap_l1sub; + u32 pcie_cap_base; + u32 aspm_cmrt; + u32 aspm_pwr_on_t; + u32 aspm_l0s_enter_lat; + + struct regulator *pex_ctl_supply; + struct regulator *slot_ctl_3v3; + struct regulator *slot_ctl_12v; + + unsigned int phy_count; + struct phy **phys; + + struct dentry *debugfs; +}; + +static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) +{ + return container_of(pci, struct tegra_pcie_dw, pci); +} + +static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, + const u32 reg) +{ + writel_relaxed(value, pcie->appl_base + reg); +} + +static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) +{ + return readl_relaxed(pcie->appl_base + reg); +} + +struct tegra_pcie_soc { + enum dw_pcie_device_mode mode; +}; + +static void apply_bad_link_workaround(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 current_link_width; + u16 val; + + /* + * NOTE:- Since this scenario is uncommon and link as such is not + * stable anyway, not waiting to confirm if link is really + * transitioning to Gen-2 speed + */ + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); + if (val & PCI_EXP_LNKSTA_LBMS) { + current_link_width = (val & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + if (pcie->init_link_width > current_link_width) { + dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL2); + val &= ~PCI_EXP_LNKCTL2_TLS; + val |= PCI_EXP_LNKCTL2_TLS_2_5GT; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL2, val); + + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL); + val |= PCI_EXP_LNKCTL_RL; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL, val); + } + } +} + +static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + struct pcie_port *pp = &pci->pp; + u32 val, tmp; + u16 val_w; + + val = appl_readl(pcie, APPL_INTR_STATUS_L0); + if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); + if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { + appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); + + /* SBR & Surprise Link Down WAR */ + val = appl_readl(pcie, APPL_CAR_RESET_OVRD); + val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; + appl_writel(pcie, val, APPL_CAR_RESET_OVRD); + udelay(1); + val = appl_readl(pcie, APPL_CAR_RESET_OVRD); + val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; + appl_writel(pcie, val, APPL_CAR_RESET_OVRD); + + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL); + val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val); + } + } + + if (val & APPL_INTR_STATUS_L0_INT_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); + if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { + appl_writel(pcie, + APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, + APPL_INTR_STATUS_L1_8_0); + apply_bad_link_workaround(pp); + } + if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { + appl_writel(pcie, + APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, + APPL_INTR_STATUS_L1_8_0); + + val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKSTA); + dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w & + PCI_EXP_LNKSTA_CLS); + } + } + + val = appl_readl(pcie, APPL_INTR_STATUS_L0); + if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_18); + tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); + if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { + dev_info(pci->dev, "CDM check complete\n"); + tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; + } + if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { + dev_err(pci->dev, "CDM comparison mismatch\n"); + tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; + } + if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { + dev_err(pci->dev, "CDM Logic error\n"); + tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; + } + dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp); + tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); + dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp); + } + + return IRQ_HANDLED; +} + +static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg) +{ + struct tegra_pcie_dw *pcie = arg; + + return tegra_pcie_rp_irq_handler(pcie); +} + +static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + /* + * This is an endpoint mode specific register happen to appear even + * when controller is operating in root port mode and system hangs + * when it is accessed with link being in ASPM-L1 state. + * So skip accessing it altogether + */ + if (where == PORT_LOGIC_MSIX_DOORBELL) { + *val = 0x00000000; + return PCIBIOS_SUCCESSFUL; + } + + return dw_pcie_read(pci->dbi_base + where, size, val); +} + +static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size, + u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + /* + * This is an endpoint mode specific register happen to appear even + * when controller is operating in root port mode and system hangs + * when it is accessed with link being in ASPM-L1 state. + * So skip accessing it altogether + */ + if (where == PORT_LOGIC_MSIX_DOORBELL) + return PCIBIOS_SUCCESSFUL; + + return dw_pcie_write(pci->dbi_base + where, size, val); +} + +#if defined(CONFIG_PCIEASPM) +static void disable_aspm_l11(struct tegra_pcie_dw *pcie) +{ + u32 val; + + val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); + val &= ~PCI_L1SS_CAP_ASPM_L1_1; + dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); +} + +static void disable_aspm_l12(struct tegra_pcie_dw *pcie) +{ + u32 val; + + val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); + val &= ~PCI_L1SS_CAP_ASPM_L1_2; + dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); +} + +static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) +{ + u32 val; + + val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]); + val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; + val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; + val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); + val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]); + + return val; +} + +static int aspm_state_cnt(struct seq_file *s, void *data) +{ + struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) + dev_get_drvdata(s->private); + u32 val; + + seq_printf(s, "Tx L0s entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); + + seq_printf(s, "Rx L0s entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); + + seq_printf(s, "Link L1 entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); + + seq_printf(s, "Link L1.1 entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); + + seq_printf(s, "Link L1.2 entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); + + /* Clear all counters */ + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], + EVENT_COUNTER_ALL_CLEAR); + + /* Re-enable counting */ + val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); + + return 0; +} + +static void init_host_aspm(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + u32 val; + + val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); + pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; + + /* Enable ASPM counters */ + val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; + dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val); + + /* Program T_cmrt and T_pwr_on values */ + val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); + val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); + val |= (pcie->aspm_cmrt << 8); + val |= (pcie->aspm_pwr_on_t << 19); + dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val); + + /* Program L0s and L1 entrance latencies */ + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL); + val &= ~L0S_ENTRANCE_LAT_MASK; + val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT); + val |= ENTER_ASPM; + dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val); +} + +static int init_debugfs(struct tegra_pcie_dw *pcie) +{ + struct dentry *d; + + d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", + pcie->debugfs, aspm_state_cnt); + if (IS_ERR_OR_NULL(d)) + dev_err(pcie->dev, + "Failed to create debugfs file \"aspm_state_cnt\"\n"); + + return 0; +} +#else +static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } +static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } +static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } +static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; } +#endif + +static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val; + u16 val_w; + + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); + val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); + + if (pcie->enable_cdm_check) { + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_18); + val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; + val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; + appl_writel(pcie, val, APPL_INTR_EN_L1_18); + } + + val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + + PCI_EXP_LNKSTA); + pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL); + val_w |= PCI_EXP_LNKCTL_LBMIE; + dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, + val_w); +} + +static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val; + + /* Enable legacy interrupt generation */ + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; + val |= APPL_INTR_EN_L0_0_INT_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); + val |= APPL_INTR_EN_L1_8_INTX_EN; + val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN; + val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN; + if (IS_ENABLED(CONFIG_PCIEAER)) + val |= APPL_INTR_EN_L1_8_AER_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); +} + +static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val; + + dw_pcie_msi_init(pp); + + /* Enable MSI interrupt generation */ + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN; + val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); +} + +static void tegra_pcie_enable_interrupts(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + + /* Clear interrupt statuses before enabling interrupts */ + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); + + tegra_pcie_enable_system_interrupts(pp); + tegra_pcie_enable_legacy_interrupts(pp); + if (IS_ENABLED(CONFIG_PCI_MSI)) + tegra_pcie_enable_msi_interrupts(pp); +} + +static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + u32 val, offset, i; + + /* Program init preset */ + for (i = 0; i < pcie->num_lanes; i++) { + dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF + + (i * 2), 2, &val); + val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK; + val |= GEN3_GEN4_EQ_PRESET_INIT; + val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK; + val |= (GEN3_GEN4_EQ_PRESET_INIT << + CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); + dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF + + (i * 2), 2, val); + + offset = dw_pcie_find_ext_capability(pci, + PCI_EXT_CAP_ID_PL_16GT) + + PCI_PL_16GT_LE_CTRL; + dw_pcie_read(pci->dbi_base + offset + i, 1, &val); + val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK; + val |= GEN3_GEN4_EQ_PRESET_INIT; + val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK; + val |= (GEN3_GEN4_EQ_PRESET_INIT << + PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); + dw_pcie_write(pci->dbi_base + offset + i, 1, val); + } + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + + val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); + val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; + val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); + val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; + dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; + val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT); + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + + val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); + val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; + val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); + val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; + dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); +} + +static void tegra_pcie_prepare_host(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val; + + val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); + val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); + dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); + + val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE); + val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE; + val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE; + dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val); + + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); + + /* Configure FTS */ + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL); + val &= ~(N_FTS_MASK << N_FTS_SHIFT); + val |= N_FTS_VAL << N_FTS_SHIFT; + dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val); + + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL); + val &= ~FTS_MASK; + val |= FTS_VAL; + dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val); + + /* Enable as 0xFFFF0001 response for CRS */ + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); + val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); + val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << + AMBA_ERROR_RESPONSE_CRS_SHIFT); + dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); + + /* Configure Max Speed from DT */ + if (pcie->max_speed && pcie->max_speed != -EINVAL) { + val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_SLS; + val |= pcie->max_speed; + dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, + val); + } + + /* Configure Max lane width from DT */ + val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_MLW; + val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT); + dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); + + config_gen3_gen4_eq_presets(pcie); + + init_host_aspm(pcie); + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + + if (pcie->update_fc_fixup) { + val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); + val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; + dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); + } + + dw_pcie_setup_rc(pp); + + clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); + + /* Assert RST */ + val = appl_readl(pcie, APPL_PINMUX); + val &= ~APPL_PINMUX_PEX_RST; + appl_writel(pcie, val, APPL_PINMUX); + + usleep_range(100, 200); + + /* Enable LTSSM */ + val = appl_readl(pcie, APPL_CTRL); + val |= APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); + + /* De-assert RST */ + val = appl_readl(pcie, APPL_PINMUX); + val |= APPL_PINMUX_PEX_RST; + appl_writel(pcie, val, APPL_PINMUX); + + msleep(100); +} + +static int tegra_pcie_dw_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val, tmp, offset, speed; + + tegra_pcie_prepare_host(pp); + + if (dw_pcie_wait_for_link(pci)) { + /* + * There are some endpoints which can't get the link up if + * root port has Data Link Feature (DLF) enabled. + * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info + * on Scaled Flow Control and DLF. + * So, need to confirm that is indeed the case here and attempt + * link up once again with DLF disabled. + */ + val = appl_readl(pcie, APPL_DEBUG); + val &= APPL_DEBUG_LTSSM_STATE_MASK; + val >>= APPL_DEBUG_LTSSM_STATE_SHIFT; + tmp = appl_readl(pcie, APPL_LINK_STATUS); + tmp &= APPL_LINK_STATUS_RDLH_LINK_UP; + if (!(val == 0x11 && !tmp)) { + /* Link is down for all good reasons */ + return 0; + } + + dev_info(pci->dev, "Link is down in DLL"); + dev_info(pci->dev, "Trying again with DLFE disabled\n"); + /* Disable LTSSM */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); + + reset_control_assert(pcie->core_rst); + reset_control_deassert(pcie->core_rst); + + offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); + val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); + val &= ~PCI_DLF_EXCHANGE_ENABLE; + dw_pcie_writel_dbi(pci, offset, val); + + tegra_pcie_prepare_host(pp); + + if (dw_pcie_wait_for_link(pci)) + return 0; + } + + speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & + PCI_EXP_LNKSTA_CLS; + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + tegra_pcie_enable_interrupts(pp); + + return 0; +} + +static int tegra_pcie_dw_link_up(struct dw_pcie *pci) +{ + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); + + return !!(val & PCI_EXP_LNKSTA_DLLLA); +} + +static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp) +{ + pp->num_vectors = MAX_MSI_IRQS; +} + +static const struct dw_pcie_ops tegra_dw_pcie_ops = { + .link_up = tegra_pcie_dw_link_up, +}; + +static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { + .rd_own_conf = tegra_pcie_dw_rd_own_conf, + .wr_own_conf = tegra_pcie_dw_wr_own_conf, + .host_init = tegra_pcie_dw_host_init, + .set_num_vectors = tegra_pcie_set_msi_vec_num, +}; + +static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) +{ + unsigned int phy_count = pcie->phy_count; + + while (phy_count--) { + phy_power_off(pcie->phys[phy_count]); + phy_exit(pcie->phys[phy_count]); + } +} + +static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) +{ + unsigned int i; + int ret; + + for (i = 0; i < pcie->phy_count; i++) { + ret = phy_init(pcie->phys[i]); + if (ret < 0) + goto phy_power_off; + + ret = phy_power_on(pcie->phys[i]); + if (ret < 0) + goto phy_exit; + } + + return 0; + +phy_power_off: + while (i--) { + phy_power_off(pcie->phys[i]); +phy_exit: + phy_exit(pcie->phys[i]); + } + + return ret; +} + +static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) +{ + struct device_node *np = pcie->dev->of_node; + int ret; + + ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt); + if (ret < 0) { + dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret); + return ret; + } + + ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us", + &pcie->aspm_pwr_on_t); + if (ret < 0) + dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n", + ret); + + ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us", + &pcie->aspm_l0s_enter_lat); + if (ret < 0) + dev_info(pcie->dev, + "Failed to read ASPM L0s Entrance latency: %d\n", ret); + + ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); + if (ret < 0) { + dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret); + return ret; + } + + pcie->max_speed = of_pci_get_max_link_speed(np); + + ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid); + if (ret) { + dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); + return ret; + } + + ret = of_property_count_strings(np, "phy-names"); + if (ret < 0) { + dev_err(pcie->dev, "Failed to find PHY entries: %d\n", + ret); + return ret; + } + pcie->phy_count = ret; + + if (of_property_read_bool(np, "nvidia,update-fc-fixup")) + pcie->update_fc_fixup = true; + + pcie->supports_clkreq = + of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); + + pcie->enable_cdm_check = + of_property_read_bool(np, "snps,enable-cdm-check"); + + return 0; +} + +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, + bool enable) +{ + struct mrq_uphy_response resp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + + /* Controller-5 doesn't need to have its state set by BPMP-FW */ + if (pcie->cid == 5) + return 0; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; + req.controller_state.pcie_controller = pcie->cid; + req.controller_state.enable = enable; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + return tegra_bpmp_transfer(pcie->bpmp, &msg); +} + +static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) +{ + struct pcie_port *pp = &pcie->pci.pp; + struct pci_bus *child, *root_bus = NULL; + struct pci_dev *pdev; + + /* + * link doesn't go into L2 state with some of the endpoints with Tegra + * if they are not in D0 state. So, need to make sure that immediate + * downstream devices are in D0 state before sending PME_TurnOff to put + * link into L2 state. + * This is as per PCI Express Base r4.0 v1.0 September 27-2017, + * 5.2 Link State Power Management (Page #428). + */ + + list_for_each_entry(child, &pp->root_bus->children, node) { + /* Bring downstream devices to D0 if they are not already in */ + if (child->parent == pp->root_bus) { + root_bus = child; + break; + } + } + + if (!root_bus) { + dev_err(pcie->dev, "Failed to find downstream devices\n"); + return; + } + + list_for_each_entry(pdev, &root_bus->devices, bus_list) { + if (PCI_SLOT(pdev->devfn) == 0) { + if (pci_set_power_state(pdev, PCI_D0)) + dev_err(pcie->dev, + "Failed to transition %s to D0 state\n", + dev_name(&pdev->dev)); + } + } +} + +static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) +{ + pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); + if (IS_ERR(pcie->slot_ctl_3v3)) { + if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV) + return PTR_ERR(pcie->slot_ctl_3v3); + + pcie->slot_ctl_3v3 = NULL; + } + + pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v"); + if (IS_ERR(pcie->slot_ctl_12v)) { + if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV) + return PTR_ERR(pcie->slot_ctl_12v); + + pcie->slot_ctl_12v = NULL; + } + + return 0; +} + +static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) +{ + int ret; + + if (pcie->slot_ctl_3v3) { + ret = regulator_enable(pcie->slot_ctl_3v3); + if (ret < 0) { + dev_err(pcie->dev, + "Failed to enable 3.3V slot supply: %d\n", ret); + return ret; + } + } + + if (pcie->slot_ctl_12v) { + ret = regulator_enable(pcie->slot_ctl_12v); + if (ret < 0) { + dev_err(pcie->dev, + "Failed to enable 12V slot supply: %d\n", ret); + goto fail_12v_enable; + } + } + + /* + * According to PCI Express Card Electromechanical Specification + * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive) + * should be a minimum of 100ms. + */ + if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v) + msleep(100); + + return 0; + +fail_12v_enable: + if (pcie->slot_ctl_3v3) + regulator_disable(pcie->slot_ctl_3v3); + return ret; +} + +static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) +{ + if (pcie->slot_ctl_12v) + regulator_disable(pcie->slot_ctl_12v); + if (pcie->slot_ctl_3v3) + regulator_disable(pcie->slot_ctl_3v3); +} + +static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, + bool en_hw_hot_rst) +{ + int ret; + u32 val; + + ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); + if (ret) { + dev_err(pcie->dev, + "Failed to enable controller %u: %d\n", pcie->cid, ret); + return ret; + } + + ret = tegra_pcie_enable_slot_regulators(pcie); + if (ret < 0) + goto fail_slot_reg_en; + + ret = regulator_enable(pcie->pex_ctl_supply); + if (ret < 0) { + dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret); + goto fail_reg_en; + } + + ret = clk_prepare_enable(pcie->core_clk); + if (ret) { + dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret); + goto fail_core_clk; + } + + ret = reset_control_deassert(pcie->core_apb_rst); + if (ret) { + dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n", + ret); + goto fail_core_apb_rst; + } + + if (en_hw_hot_rst) { + /* Enable HW_HOT_RST mode */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT); + val |= APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + } + + ret = tegra_pcie_enable_phy(pcie); + if (ret) { + dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret); + goto fail_phy; + } + + /* Update CFG base address */ + appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, + APPL_CFG_BASE_ADDR); + + /* Configure this core for RP mode operation */ + appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE); + + appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); + + val = appl_readl(pcie, APPL_CTRL); + appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL); + + val = appl_readl(pcie, APPL_CFG_MISC); + val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); + appl_writel(pcie, val, APPL_CFG_MISC); + + if (!pcie->supports_clkreq) { + val = appl_readl(pcie, APPL_PINMUX); + val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN; + val |= APPL_PINMUX_CLKREQ_OUT_OVRD; + appl_writel(pcie, val, APPL_PINMUX); + } + + /* Update iATU_DMA base address */ + appl_writel(pcie, + pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, + APPL_CFG_IATU_DMA_BASE_ADDR); + + reset_control_deassert(pcie->core_rst); + + pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, + PCI_CAP_ID_EXP); + + /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */ + if (!pcie->supports_clkreq) { + disable_aspm_l11(pcie); + disable_aspm_l12(pcie); + } + + return ret; + +fail_phy: + reset_control_assert(pcie->core_apb_rst); +fail_core_apb_rst: + clk_disable_unprepare(pcie->core_clk); +fail_core_clk: + regulator_disable(pcie->pex_ctl_supply); +fail_reg_en: + tegra_pcie_disable_slot_regulators(pcie); +fail_slot_reg_en: + tegra_pcie_bpmp_set_ctrl_state(pcie, false); + + return ret; +} + +static int __deinit_controller(struct tegra_pcie_dw *pcie) +{ + int ret; + + ret = reset_control_assert(pcie->core_rst); + if (ret) { + dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", + ret); + return ret; + } + + tegra_pcie_disable_phy(pcie); + + ret = reset_control_assert(pcie->core_apb_rst); + if (ret) { + dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret); + return ret; + } + + clk_disable_unprepare(pcie->core_clk); + + ret = regulator_disable(pcie->pex_ctl_supply); + if (ret) { + dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret); + return ret; + } + + tegra_pcie_disable_slot_regulators(pcie); + + ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); + if (ret) { + dev_err(pcie->dev, "Failed to disable controller %d: %d\n", + pcie->cid, ret); + return ret; + } + + return ret; +} + +static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + struct pcie_port *pp = &pci->pp; + int ret; + + ret = tegra_pcie_config_controller(pcie, false); + if (ret < 0) + return ret; + + pp->ops = &tegra_pcie_dw_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret < 0) { + dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret); + goto fail_host_init; + } + + return 0; + +fail_host_init: + return __deinit_controller(pcie); +} + +static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) +{ + u32 val; + + if (!tegra_pcie_dw_link_up(&pcie->pci)) + return 0; + + val = appl_readl(pcie, APPL_RADM_STATUS); + val |= APPL_PM_XMT_TURNOFF_STATE; + appl_writel(pcie, val, APPL_RADM_STATUS); + + return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val, + val & APPL_DEBUG_PM_LINKST_IN_L2_LAT, + 1, PME_ACK_TIMEOUT); +} + +static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) +{ + u32 data; + int err; + + if (!tegra_pcie_dw_link_up(&pcie->pci)) { + dev_dbg(pcie->dev, "PCIe link is not up...!\n"); + return; + } + + if (tegra_pcie_try_link_l2(pcie)) { + dev_info(pcie->dev, "Link didn't transition to L2 state\n"); + /* + * TX lane clock freq will reset to Gen1 only if link is in L2 + * or detect state. + * So apply pex_rst to end point to force RP to go into detect + * state + */ + data = appl_readl(pcie, APPL_PINMUX); + data &= ~APPL_PINMUX_PEX_RST; + appl_writel(pcie, data, APPL_PINMUX); + + err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, + data, + ((data & + APPL_DEBUG_LTSSM_STATE_MASK) >> + APPL_DEBUG_LTSSM_STATE_SHIFT) == + LTSSM_STATE_PRE_DETECT, + 1, LTSSM_TIMEOUT); + if (err) { + dev_info(pcie->dev, "Link didn't go to detect state\n"); + } else { + /* Disable LTSSM after link is in detect state */ + data = appl_readl(pcie, APPL_CTRL); + data &= ~APPL_CTRL_LTSSM_EN; + appl_writel(pcie, data, APPL_CTRL); + } + } + /* + * DBI registers may not be accessible after this as PLL-E would be + * down depending on how CLKREQ is pulled by end point + */ + data = appl_readl(pcie, APPL_PINMUX); + data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE); + /* Cut REFCLK to slot */ + data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; + data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; + appl_writel(pcie, data, APPL_PINMUX); +} + +static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) +{ + tegra_pcie_downstream_dev_to_D0(pcie); + dw_pcie_host_deinit(&pcie->pci.pp); + tegra_pcie_dw_pme_turnoff(pcie); + + return __deinit_controller(pcie); +} + +static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) +{ + struct pcie_port *pp = &pcie->pci.pp; + struct device *dev = pcie->dev; + char *name; + int ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = of_irq_get_byname(dev->of_node, "msi"); + if (!pp->msi_irq) { + dev_err(dev, "Failed to get MSI interrupt\n"); + return -ENODEV; + } + } + + pm_runtime_enable(dev); + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", + ret); + goto fail_pm_get_sync; + } + + ret = pinctrl_pm_select_default_state(dev); + if (ret < 0) { + dev_err(dev, "Failed to configure sideband pins: %d\n", ret); + goto fail_pinctrl; + } + + tegra_pcie_init_controller(pcie); + + pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); + if (!pcie->link_state) { + ret = -ENOMEDIUM; + goto fail_host_init; + } + + name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); + if (!name) { + ret = -ENOMEM; + goto fail_host_init; + } + + pcie->debugfs = debugfs_create_dir(name, NULL); + if (!pcie->debugfs) + dev_err(dev, "Failed to create debugfs\n"); + else + init_debugfs(pcie); + + return ret; + +fail_host_init: + tegra_pcie_deinit_controller(pcie); +fail_pinctrl: + pm_runtime_put_sync(dev); +fail_pm_get_sync: + pm_runtime_disable(dev); + return ret; +} + +static int tegra_pcie_dw_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *atu_dma_res; + struct tegra_pcie_dw *pcie; + struct resource *dbi_res; + struct pcie_port *pp; + struct dw_pcie *pci; + struct phy **phys; + char *name; + int ret; + u32 i; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = &pcie->pci; + pci->dev = &pdev->dev; + pci->ops = &tegra_dw_pcie_ops; + pp = &pci->pp; + pcie->dev = &pdev->dev; + + ret = tegra_pcie_dw_parse_dt(pcie); + if (ret < 0) { + dev_err(dev, "Failed to parse device tree: %d\n", ret); + return ret; + } + + ret = tegra_pcie_get_slot_regulators(pcie); + if (ret < 0) { + dev_err(dev, "Failed to get slot regulators: %d\n", ret); + return ret; + } + + pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); + if (IS_ERR(pcie->pex_ctl_supply)) { + ret = PTR_ERR(pcie->pex_ctl_supply); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get regulator: %ld\n", + PTR_ERR(pcie->pex_ctl_supply)); + return ret; + } + + pcie->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(pcie->core_clk)) { + dev_err(dev, "Failed to get core clock: %ld\n", + PTR_ERR(pcie->core_clk)); + return PTR_ERR(pcie->core_clk); + } + + pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "appl"); + if (!pcie->appl_res) { + dev_err(dev, "Failed to find \"appl\" region\n"); + return -ENODEV; + } + + pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res); + if (IS_ERR(pcie->appl_base)) + return PTR_ERR(pcie->appl_base); + + pcie->core_apb_rst = devm_reset_control_get(dev, "apb"); + if (IS_ERR(pcie->core_apb_rst)) { + dev_err(dev, "Failed to get APB reset: %ld\n", + PTR_ERR(pcie->core_apb_rst)); + return PTR_ERR(pcie->core_apb_rst); + } + + phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL); + if (!phys) + return -ENOMEM; + + for (i = 0; i < pcie->phy_count; i++) { + name = kasprintf(GFP_KERNEL, "p2u-%u", i); + if (!name) { + dev_err(dev, "Failed to create P2U string\n"); + return -ENOMEM; + } + phys[i] = devm_phy_get(dev, name); + kfree(name); + if (IS_ERR(phys[i])) { + ret = PTR_ERR(phys[i]); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get PHY: %d\n", ret); + return ret; + } + } + + pcie->phys = phys; + + dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + if (!dbi_res) { + dev_err(dev, "Failed to find \"dbi\" region\n"); + return -ENODEV; + } + pcie->dbi_res = dbi_res; + + pci->dbi_base = devm_ioremap_resource(dev, dbi_res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + /* Tegra HW locates DBI2 at a fixed offset from DBI */ + pci->dbi_base2 = pci->dbi_base + 0x1000; + + atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "atu_dma"); + if (!atu_dma_res) { + dev_err(dev, "Failed to find \"atu_dma\" region\n"); + return -ENODEV; + } + pcie->atu_dma_res = atu_dma_res; + + pci->atu_base = devm_ioremap_resource(dev, atu_dma_res); + if (IS_ERR(pci->atu_base)) + return PTR_ERR(pci->atu_base); + + pcie->core_rst = devm_reset_control_get(dev, "core"); + if (IS_ERR(pcie->core_rst)) { + dev_err(dev, "Failed to get core reset: %ld\n", + PTR_ERR(pcie->core_rst)); + return PTR_ERR(pcie->core_rst); + } + + pp->irq = platform_get_irq_byname(pdev, "intr"); + if (!pp->irq) { + dev_err(dev, "Failed to get \"intr\" interrupt\n"); + return -ENODEV; + } + + ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler, + IRQF_SHARED, "tegra-pcie-intr", pcie); + if (ret) { + dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret); + return ret; + } + + pcie->bpmp = tegra_bpmp_get(dev); + if (IS_ERR(pcie->bpmp)) + return PTR_ERR(pcie->bpmp); + + platform_set_drvdata(pdev, pcie); + + ret = tegra_pcie_config_rp(pcie); + if (ret && ret != -ENOMEDIUM) + goto fail; + else + return 0; + +fail: + tegra_bpmp_put(pcie->bpmp); + return ret; +} + +static int tegra_pcie_dw_remove(struct platform_device *pdev) +{ + struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); + + if (!pcie->link_state) + return 0; + + debugfs_remove_recursive(pcie->debugfs); + tegra_pcie_deinit_controller(pcie); + pm_runtime_put_sync(pcie->dev); + pm_runtime_disable(pcie->dev); + tegra_bpmp_put(pcie->bpmp); + + return 0; +} + +static int tegra_pcie_dw_suspend_late(struct device *dev) +{ + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + u32 val; + + if (!pcie->link_state) + return 0; + + /* Enable HW_HOT_RST mode */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT); + val |= APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + + return 0; +} + +static int tegra_pcie_dw_suspend_noirq(struct device *dev) +{ + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + + if (!pcie->link_state) + return 0; + + /* Save MSI interrupt vector */ + pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci, + PORT_LOGIC_MSI_CTRL_INT_0_EN); + tegra_pcie_downstream_dev_to_D0(pcie); + tegra_pcie_dw_pme_turnoff(pcie); + + return __deinit_controller(pcie); +} + +static int tegra_pcie_dw_resume_noirq(struct device *dev) +{ + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + int ret; + + if (!pcie->link_state) + return 0; + + ret = tegra_pcie_config_controller(pcie, true); + if (ret < 0) + return ret; + + ret = tegra_pcie_dw_host_init(&pcie->pci.pp); + if (ret < 0) { + dev_err(dev, "Failed to init host: %d\n", ret); + goto fail_host_init; + } + + /* Restore MSI interrupt vector */ + dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN, + pcie->msi_ctrl_int); + + return 0; + +fail_host_init: + return __deinit_controller(pcie); +} + +static int tegra_pcie_dw_resume_early(struct device *dev) +{ + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); + u32 val; + + if (!pcie->link_state) + return 0; + + /* Disable HW_HOT_RST mode */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT); + val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT; + val &= ~APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + + return 0; +} + +static void tegra_pcie_dw_shutdown(struct platform_device *pdev) +{ + struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); + + if (!pcie->link_state) + return; + + debugfs_remove_recursive(pcie->debugfs); + tegra_pcie_downstream_dev_to_D0(pcie); + + disable_irq(pcie->pci.pp.irq); + if (IS_ENABLED(CONFIG_PCI_MSI)) + disable_irq(pcie->pci.pp.msi_irq); + + tegra_pcie_dw_pme_turnoff(pcie); + __deinit_controller(pcie); +} + +static const struct of_device_id tegra_pcie_dw_of_match[] = { + { + .compatible = "nvidia,tegra194-pcie", + }, + {}, +}; + +static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { + .suspend_late = tegra_pcie_dw_suspend_late, + .suspend_noirq = tegra_pcie_dw_suspend_noirq, + .resume_noirq = tegra_pcie_dw_resume_noirq, + .resume_early = tegra_pcie_dw_resume_early, +}; + +static struct platform_driver tegra_pcie_dw_driver = { + .probe = tegra_pcie_dw_probe, + .remove = tegra_pcie_dw_remove, + .shutdown = tegra_pcie_dw_shutdown, + .driver = { + .name = "tegra194-pcie", + .pm = &tegra_pcie_dw_pm_ops, + .of_match_table = tegra_pcie_dw_of_match, + }, +}; +module_platform_driver(tegra_pcie_dw_driver); + +MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); + +MODULE_AUTHOR("Vidya Sagar <[email protected]>"); +MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index d5dc40289cce..3f30ee4a00b3 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -270,6 +270,7 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); struct device_node *np = pci->dev->of_node; struct device_node *np_intc; + int ret = 0; np_intc = of_get_child_by_name(np, "legacy-interrupt-controller"); if (!np_intc) { @@ -280,20 +281,24 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) pp->irq = irq_of_parse_and_map(np_intc, 0); if (!pp->irq) { dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n"); - return -EINVAL; + ret = -EINVAL; + goto out_put_node; } priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, &uniphier_intx_domain_ops, pp); if (!priv->legacy_irq_domain) { dev_err(pci->dev, "Failed to get INTx domain\n"); - return -ENODEV; + ret = -ENODEV; + goto out_put_node; } irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler, pp); - return 0; +out_put_node: + of_node_put(np_intc); + return ret; } static int uniphier_pcie_host_init(struct pcie_port *pp) diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index eb58dfdaba1b..fc0fe4d4de49 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -308,7 +308,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); - /* Unmask all MSI's */ + /* Unmask all MSIs */ advk_writel(pcie, 0, PCIE_MSI_MASK_REG); /* Enable summary interrupt for GIC SPI source */ @@ -794,6 +794,7 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) struct device_node *node = dev->of_node; struct device_node *pcie_intc_node; struct irq_chip *irq_chip; + int ret = 0; pcie_intc_node = of_get_next_child(node, NULL); if (!pcie_intc_node) { @@ -806,8 +807,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", dev_name(dev)); if (!irq_chip->name) { - of_node_put(pcie_intc_node); - return -ENOMEM; + ret = -ENOMEM; + goto out_put_node; } irq_chip->irq_mask = advk_pcie_irq_mask; @@ -819,11 +820,13 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) &advk_pcie_irq_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); - of_node_put(pcie_intc_node); - return -ENOMEM; + ret = -ENOMEM; + goto out_put_node; } - return 0; +out_put_node: + of_node_put(pcie_intc_node); + return ret; } static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index c742881b5061..c8cb9c5188a4 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -43,9 +43,8 @@ static struct pci_config_window *gen_pci_init(struct device *dev, goto err_out; } - err = devm_add_action(dev, gen_pci_unmap_cfg, cfg); + err = devm_add_action_or_reset(dev, gen_pci_unmap_cfg, cfg); if (err) { - gen_pci_unmap_cfg(cfg); goto err_out; } return cfg; diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c index dea3ec7592a2..75a2fb930d4b 100644 --- a/drivers/pci/controller/pci-host-generic.c +++ b/drivers/pci/controller/pci-host-generic.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Simple, generic PCI host controller driver targetting firmware-initialised + * Simple, generic PCI host controller driver targeting firmware-initialised * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). * * Copyright (C) 2014 ARM Limited diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c new file mode 100644 index 000000000000..cc96be450360 --- /dev/null +++ b/drivers/pci/controller/pci-hyperv-intf.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Microsoft Corporation. + * + * Author: + * Haiyang Zhang <[email protected]> + * + * This small module is a helper driver allows other drivers to + * have a common interface with the Hyper-V PCI frontend driver. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/hyperv.h> + +struct hyperv_pci_block_ops hvpci_block_ops; +EXPORT_SYMBOL_GPL(hvpci_block_ops); + +int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, + unsigned int block_id, unsigned int *bytes_returned) +{ + if (!hvpci_block_ops.read_block) + return -EOPNOTSUPP; + + return hvpci_block_ops.read_block(dev, buf, buf_len, block_id, + bytes_returned); +} +EXPORT_SYMBOL_GPL(hyperv_read_cfg_blk); + +int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, + unsigned int block_id) +{ + if (!hvpci_block_ops.write_block) + return -EOPNOTSUPP; + + return hvpci_block_ops.write_block(dev, buf, len, block_id); +} +EXPORT_SYMBOL_GPL(hyperv_write_cfg_blk); + +int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)) +{ + if (!hvpci_block_ops.reg_blk_invalidate) + return -EOPNOTSUPP; + + return hvpci_block_ops.reg_blk_invalidate(dev, context, + block_invalidate); +} +EXPORT_SYMBOL_GPL(hyperv_reg_block_invalidate); + +static void __exit exit_hv_pci_intf(void) +{ +} + +static int __init init_hv_pci_intf(void) +{ + return 0; +} + +module_init(init_hv_pci_intf); +module_exit(exit_hv_pci_intf); + +MODULE_DESCRIPTION("Hyper-V PCI Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 95441a35eceb..f1f300218fab 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -365,6 +365,39 @@ struct pci_delete_interrupt { struct tran_int_desc int_desc; } __packed; +/* + * Note: the VM must pass a valid block id, wslot and bytes_requested. + */ +struct pci_read_block { + struct pci_message message_type; + u32 block_id; + union win_slot_encoding wslot; + u32 bytes_requested; +} __packed; + +struct pci_read_block_response { + struct vmpacket_descriptor hdr; + u32 status; + u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX]; +} __packed; + +/* + * Note: the VM must pass a valid block id, wslot and byte_count. + */ +struct pci_write_block { + struct pci_message message_type; + u32 block_id; + union win_slot_encoding wslot; + u32 byte_count; + u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX]; +} __packed; + +struct pci_dev_inval_block { + struct pci_incoming_message incoming; + union win_slot_encoding wslot; + u64 block_mask; +} __packed; + struct pci_dev_incoming { struct pci_incoming_message incoming; union win_slot_encoding wslot; @@ -499,6 +532,9 @@ struct hv_pci_dev { struct hv_pcibus_device *hbus; struct work_struct wrk; + void (*block_invalidate)(void *context, u64 block_mask); + void *invalidate_context; + /* * What would be observed if one wrote 0xFFFFFFFF to a BAR and then * read it back, for each of the BAR offsets within config space. @@ -817,6 +853,253 @@ static struct pci_ops hv_pcifront_ops = { .write = hv_pcifront_write_config, }; +/* + * Paravirtual backchannel + * + * Hyper-V SR-IOV provides a backchannel mechanism in software for + * communication between a VF driver and a PF driver. These + * "configuration blocks" are similar in concept to PCI configuration space, + * but instead of doing reads and writes in 32-bit chunks through a very slow + * path, packets of up to 128 bytes can be sent or received asynchronously. + * + * Nearly every SR-IOV device contains just such a communications channel in + * hardware, so using this one in software is usually optional. Using the + * software channel, however, allows driver implementers to leverage software + * tools that fuzz the communications channel looking for vulnerabilities. + * + * The usage model for these packets puts the responsibility for reading or + * writing on the VF driver. The VF driver sends a read or a write packet, + * indicating which "block" is being referred to by number. + * + * If the PF driver wishes to initiate communication, it can "invalidate" one or + * more of the first 64 blocks. This invalidation is delivered via a callback + * supplied by the VF driver by this driver. + * + * No protocol is implied, except that supplied by the PF and VF drivers. + */ + +struct hv_read_config_compl { + struct hv_pci_compl comp_pkt; + void *buf; + unsigned int len; + unsigned int bytes_returned; +}; + +/** + * hv_pci_read_config_compl() - Invoked when a response packet + * for a read config block operation arrives. + * @context: Identifies the read config operation + * @resp: The response packet itself + * @resp_packet_size: Size in bytes of the response packet + */ +static void hv_pci_read_config_compl(void *context, struct pci_response *resp, + int resp_packet_size) +{ + struct hv_read_config_compl *comp = context; + struct pci_read_block_response *read_resp = + (struct pci_read_block_response *)resp; + unsigned int data_len, hdr_len; + + hdr_len = offsetof(struct pci_read_block_response, bytes); + if (resp_packet_size < hdr_len) { + comp->comp_pkt.completion_status = -1; + goto out; + } + + data_len = resp_packet_size - hdr_len; + if (data_len > 0 && read_resp->status == 0) { + comp->bytes_returned = min(comp->len, data_len); + memcpy(comp->buf, read_resp->bytes, comp->bytes_returned); + } else { + comp->bytes_returned = 0; + } + + comp->comp_pkt.completion_status = read_resp->status; +out: + complete(&comp->comp_pkt.host_event); +} + +/** + * hv_read_config_block() - Sends a read config block request to + * the back-end driver running in the Hyper-V parent partition. + * @pdev: The PCI driver's representation for this device. + * @buf: Buffer into which the config block will be copied. + * @len: Size in bytes of buf. + * @block_id: Identifies the config block which has been requested. + * @bytes_returned: Size which came back from the back-end driver. + * + * Return: 0 on success, -errno on failure + */ +int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len, + unsigned int block_id, unsigned int *bytes_returned) +{ + struct hv_pcibus_device *hbus = + container_of(pdev->bus->sysdata, struct hv_pcibus_device, + sysdata); + struct { + struct pci_packet pkt; + char buf[sizeof(struct pci_read_block)]; + } pkt; + struct hv_read_config_compl comp_pkt; + struct pci_read_block *read_blk; + int ret; + + if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) + return -EINVAL; + + init_completion(&comp_pkt.comp_pkt.host_event); + comp_pkt.buf = buf; + comp_pkt.len = len; + + memset(&pkt, 0, sizeof(pkt)); + pkt.pkt.completion_func = hv_pci_read_config_compl; + pkt.pkt.compl_ctxt = &comp_pkt; + read_blk = (struct pci_read_block *)&pkt.pkt.message; + read_blk->message_type.type = PCI_READ_BLOCK; + read_blk->wslot.slot = devfn_to_wslot(pdev->devfn); + read_blk->block_id = block_id; + read_blk->bytes_requested = len; + + ret = vmbus_sendpacket(hbus->hdev->channel, read_blk, + sizeof(*read_blk), (unsigned long)&pkt.pkt, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) + return ret; + + ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event); + if (ret) + return ret; + + if (comp_pkt.comp_pkt.completion_status != 0 || + comp_pkt.bytes_returned == 0) { + dev_err(&hbus->hdev->device, + "Read Config Block failed: 0x%x, bytes_returned=%d\n", + comp_pkt.comp_pkt.completion_status, + comp_pkt.bytes_returned); + return -EIO; + } + + *bytes_returned = comp_pkt.bytes_returned; + return 0; +} + +/** + * hv_pci_write_config_compl() - Invoked when a response packet for a write + * config block operation arrives. + * @context: Identifies the write config operation + * @resp: The response packet itself + * @resp_packet_size: Size in bytes of the response packet + */ +static void hv_pci_write_config_compl(void *context, struct pci_response *resp, + int resp_packet_size) +{ + struct hv_pci_compl *comp_pkt = context; + + comp_pkt->completion_status = resp->status; + complete(&comp_pkt->host_event); +} + +/** + * hv_write_config_block() - Sends a write config block request to the + * back-end driver running in the Hyper-V parent partition. + * @pdev: The PCI driver's representation for this device. + * @buf: Buffer from which the config block will be copied. + * @len: Size in bytes of buf. + * @block_id: Identifies the config block which is being written. + * + * Return: 0 on success, -errno on failure + */ +int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len, + unsigned int block_id) +{ + struct hv_pcibus_device *hbus = + container_of(pdev->bus->sysdata, struct hv_pcibus_device, + sysdata); + struct { + struct pci_packet pkt; + char buf[sizeof(struct pci_write_block)]; + u32 reserved; + } pkt; + struct hv_pci_compl comp_pkt; + struct pci_write_block *write_blk; + u32 pkt_size; + int ret; + + if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) + return -EINVAL; + + init_completion(&comp_pkt.host_event); + + memset(&pkt, 0, sizeof(pkt)); + pkt.pkt.completion_func = hv_pci_write_config_compl; + pkt.pkt.compl_ctxt = &comp_pkt; + write_blk = (struct pci_write_block *)&pkt.pkt.message; + write_blk->message_type.type = PCI_WRITE_BLOCK; + write_blk->wslot.slot = devfn_to_wslot(pdev->devfn); + write_blk->block_id = block_id; + write_blk->byte_count = len; + memcpy(write_blk->bytes, buf, len); + pkt_size = offsetof(struct pci_write_block, bytes) + len; + /* + * This quirk is required on some hosts shipped around 2018, because + * these hosts don't check the pkt_size correctly (new hosts have been + * fixed since early 2019). The quirk is also safe on very old hosts + * and new hosts, because, on them, what really matters is the length + * specified in write_blk->byte_count. + */ + pkt_size += sizeof(pkt.reserved); + + ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size, + (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) + return ret; + + ret = wait_for_response(hbus->hdev, &comp_pkt.host_event); + if (ret) + return ret; + + if (comp_pkt.completion_status != 0) { + dev_err(&hbus->hdev->device, + "Write Config Block failed: 0x%x\n", + comp_pkt.completion_status); + return -EIO; + } + + return 0; +} + +/** + * hv_register_block_invalidate() - Invoked when a config block invalidation + * arrives from the back-end driver. + * @pdev: The PCI driver's representation for this device. + * @context: Identifies the device. + * @block_invalidate: Identifies all of the blocks being invalidated. + * + * Return: 0 on success, -errno on failure + */ +int hv_register_block_invalidate(struct pci_dev *pdev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)) +{ + struct hv_pcibus_device *hbus = + container_of(pdev->bus->sysdata, struct hv_pcibus_device, + sysdata); + struct hv_pci_dev *hpdev; + + hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); + if (!hpdev) + return -ENODEV; + + hpdev->block_invalidate = block_invalidate; + hpdev->invalidate_context = context; + + put_pcichild(hpdev); + return 0; + +} + /* Interrupt management hooks */ static void hv_int_desc_free(struct hv_pci_dev *hpdev, struct tran_int_desc *int_desc) @@ -1486,6 +1769,21 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) } } +/* + * Remove entries in sysfs pci slot directory. + */ +static void hv_pci_remove_slots(struct hv_pcibus_device *hbus) +{ + struct hv_pci_dev *hpdev; + + list_for_each_entry(hpdev, &hbus->children, list_entry) { + if (!hpdev->pci_slot) + continue; + pci_destroy_slot(hpdev->pci_slot); + hpdev->pci_slot = NULL; + } +} + /** * create_root_hv_pci_bus() - Expose a new root PCI bus * @hbus: Root PCI bus, as understood by this driver @@ -1761,6 +2059,10 @@ static void pci_devices_present_work(struct work_struct *work) hpdev = list_first_entry(&removed, struct hv_pci_dev, list_entry); list_del(&hpdev->list_entry); + + if (hpdev->pci_slot) + pci_destroy_slot(hpdev->pci_slot); + put_pcichild(hpdev); } @@ -1856,6 +2158,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus, static void hv_eject_device_work(struct work_struct *work) { struct pci_eject_response *ejct_pkt; + struct hv_pcibus_device *hbus; struct hv_pci_dev *hpdev; struct pci_dev *pdev; unsigned long flags; @@ -1866,6 +2169,7 @@ static void hv_eject_device_work(struct work_struct *work) } ctxt; hpdev = container_of(work, struct hv_pci_dev, wrk); + hbus = hpdev->hbus; WARN_ON(hpdev->state != hv_pcichild_ejecting); @@ -1876,8 +2180,7 @@ static void hv_eject_device_work(struct work_struct *work) * because hbus->pci_bus may not exist yet. */ wslot = wslot_to_devfn(hpdev->desc.win_slot.slot); - pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, - wslot); + pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot); if (pdev) { pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(pdev); @@ -1885,9 +2188,9 @@ static void hv_eject_device_work(struct work_struct *work) pci_unlock_rescan_remove(); } - spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); + spin_lock_irqsave(&hbus->device_list_lock, flags); list_del(&hpdev->list_entry); - spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); + spin_unlock_irqrestore(&hbus->device_list_lock, flags); if (hpdev->pci_slot) pci_destroy_slot(hpdev->pci_slot); @@ -1896,13 +2199,18 @@ static void hv_eject_device_work(struct work_struct *work) ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; - vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt, + vmbus_sendpacket(hbus->hdev->channel, ejct_pkt, sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); + /* For the get_pcichild() in hv_pci_eject_device() */ + put_pcichild(hpdev); + /* For the two refs got in new_pcichild_device() */ put_pcichild(hpdev); put_pcichild(hpdev); - put_hvpcibus(hpdev->hbus); + /* hpdev has been freed. Do not use it any more. */ + + put_hvpcibus(hbus); } /** @@ -1943,6 +2251,7 @@ static void hv_pci_onchannelcallback(void *context) struct pci_response *response; struct pci_incoming_message *new_message; struct pci_bus_relations *bus_rel; + struct pci_dev_inval_block *inval; struct pci_dev_incoming *dev_message; struct hv_pci_dev *hpdev; @@ -2020,6 +2329,21 @@ static void hv_pci_onchannelcallback(void *context) } break; + case PCI_INVALIDATE_BLOCK: + + inval = (struct pci_dev_inval_block *)buffer; + hpdev = get_pcichild_wslot(hbus, + inval->wslot.slot); + if (hpdev) { + if (hpdev->block_invalidate) { + hpdev->block_invalidate( + hpdev->invalidate_context, + inval->block_mask); + } + put_pcichild(hpdev); + } + break; + default: dev_warn(&hbus->hdev->device, "Unimplemented protocol message %x\n", @@ -2485,6 +2809,48 @@ static void put_hvpcibus(struct hv_pcibus_device *hbus) complete(&hbus->remove_event); } +#define HVPCI_DOM_MAP_SIZE (64 * 1024) +static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE); + +/* + * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0 + * as invalid for passthrough PCI devices of this driver. + */ +#define HVPCI_DOM_INVALID 0 + +/** + * hv_get_dom_num() - Get a valid PCI domain number + * Check if the PCI domain number is in use, and return another number if + * it is in use. + * + * @dom: Requested domain number + * + * return: domain number on success, HVPCI_DOM_INVALID on failure + */ +static u16 hv_get_dom_num(u16 dom) +{ + unsigned int i; + + if (test_and_set_bit(dom, hvpci_dom_map) == 0) + return dom; + + for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) { + if (test_and_set_bit(i, hvpci_dom_map) == 0) + return i; + } + + return HVPCI_DOM_INVALID; +} + +/** + * hv_put_dom_num() - Mark the PCI domain number as free + * @dom: Domain number to be freed + */ +static void hv_put_dom_num(u16 dom) +{ + clear_bit(dom, hvpci_dom_map); +} + /** * hv_pci_probe() - New VMBus channel probe, for a root PCI bus * @hdev: VMBus's tracking struct for this root PCI bus @@ -2496,6 +2862,8 @@ static int hv_pci_probe(struct hv_device *hdev, const struct hv_vmbus_device_id *dev_id) { struct hv_pcibus_device *hbus; + u16 dom_req, dom; + char *name; int ret; /* @@ -2510,19 +2878,34 @@ static int hv_pci_probe(struct hv_device *hdev, hbus->state = hv_pcibus_init; /* - * The PCI bus "domain" is what is called "segment" in ACPI and - * other specs. Pull it from the instance ID, to get something - * unique. Bytes 8 and 9 are what is used in Windows guests, so - * do the same thing for consistency. Note that, since this code - * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee - * that (1) the only domain in use for something that looks like - * a physical PCI bus (which is actually emulated by the - * hypervisor) is domain 0 and (2) there will be no overlap - * between domains derived from these instance IDs in the same - * VM. + * The PCI bus "domain" is what is called "segment" in ACPI and other + * specs. Pull it from the instance ID, to get something usually + * unique. In rare cases of collision, we will find out another number + * not in use. + * + * Note that, since this code only runs in a Hyper-V VM, Hyper-V + * together with this guest driver can guarantee that (1) The only + * domain used by Gen1 VMs for something that looks like a physical + * PCI bus (which is actually emulated by the hypervisor) is domain 0. + * (2) There will be no overlap between domains (after fixing possible + * collisions) in the same VM. */ - hbus->sysdata.domain = hdev->dev_instance.b[9] | - hdev->dev_instance.b[8] << 8; + dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4]; + dom = hv_get_dom_num(dom_req); + + if (dom == HVPCI_DOM_INVALID) { + dev_err(&hdev->device, + "Unable to use dom# 0x%hx or other numbers", dom_req); + ret = -EINVAL; + goto free_bus; + } + + if (dom != dom_req) + dev_info(&hdev->device, + "PCI dom# 0x%hx has collision, using 0x%hx", + dom_req, dom); + + hbus->sysdata.domain = dom; hbus->hdev = hdev; refcount_set(&hbus->remove_lock, 1); @@ -2537,7 +2920,7 @@ static int hv_pci_probe(struct hv_device *hdev, hbus->sysdata.domain); if (!hbus->wq) { ret = -ENOMEM; - goto free_bus; + goto free_dom; } ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, @@ -2564,7 +2947,14 @@ static int hv_pci_probe(struct hv_device *hdev, goto free_config; } - hbus->sysdata.fwnode = irq_domain_alloc_fwnode(hbus); + name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance); + if (!name) { + ret = -ENOMEM; + goto unmap; + } + + hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name); + kfree(name); if (!hbus->sysdata.fwnode) { ret = -ENOMEM; goto unmap; @@ -2614,6 +3004,8 @@ close: vmbus_close(hdev->channel); destroy_wq: destroy_workqueue(hbus->wq); +free_dom: + hv_put_dom_num(hbus->sysdata.domain); free_bus: free_page((unsigned long)hbus); return ret; @@ -2676,6 +3068,7 @@ static int hv_pci_remove(struct hv_device *hdev) /* Remove the bus from PCI's point of view. */ pci_lock_rescan_remove(); pci_stop_root_bus(hbus->pci_bus); + hv_pci_remove_slots(hbus); pci_remove_root_bus(hbus->pci_bus); pci_unlock_rescan_remove(); hbus->state = hv_pcibus_removed; @@ -2694,6 +3087,9 @@ static int hv_pci_remove(struct hv_device *hdev) put_hvpcibus(hbus); wait_for_completion(&hbus->remove_event); destroy_workqueue(hbus->wq); + + hv_put_dom_num(hbus->sysdata.domain); + free_page((unsigned long)hbus); return 0; } @@ -2717,10 +3113,22 @@ static struct hv_driver hv_pci_drv = { static void __exit exit_hv_pci_drv(void) { vmbus_driver_unregister(&hv_pci_drv); + + hvpci_block_ops.read_block = NULL; + hvpci_block_ops.write_block = NULL; + hvpci_block_ops.reg_blk_invalidate = NULL; } static int __init init_hv_pci_drv(void) { + /* Set the invalid domain number's bit, so it will not be used */ + set_bit(HVPCI_DOM_INVALID, hvpci_dom_map); + + /* Initialize PCI block r/w interface */ + hvpci_block_ops.read_block = hv_read_config_block; + hvpci_block_ops.write_block = hv_write_config_block; + hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate; + return vmbus_driver_register(&hv_pci_drv); } diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index f4f53d092e00..673a1725ef38 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -17,6 +17,7 @@ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/export.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irq.h> @@ -30,6 +31,7 @@ #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/phy/phy.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/sizes.h> @@ -95,7 +97,8 @@ #define AFI_MSI_EN_VEC7 0xa8 #define AFI_CONFIGURATION 0xac -#define AFI_CONFIGURATION_EN_FPCI (1 << 0) +#define AFI_CONFIGURATION_EN_FPCI (1 << 0) +#define AFI_CONFIGURATION_CLKEN_OVERRIDE (1 << 31) #define AFI_FPCI_ERROR_MASKS 0xb0 @@ -159,13 +162,14 @@ #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20) +#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x) (1 << ((x) + 29)) +#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL (0x7 << 29) #define AFI_FUSE 0x104 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) #define AFI_PEX0_CTRL 0x110 #define AFI_PEX1_CTRL 0x118 -#define AFI_PEX2_CTRL 0x128 #define AFI_PEX_CTRL_RST (1 << 0) #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) #define AFI_PEX_CTRL_REFCLK_EN (1 << 3) @@ -177,20 +181,74 @@ #define AFI_PEXBIAS_CTRL_0 0x168 +#define RP_PRIV_XP_DL 0x00000494 +#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1) + +#define RP_RX_HDR_LIMIT 0x00000e00 +#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8) +#define RP_RX_HDR_LIMIT_PW (0x0e << 8) + +#define RP_ECTL_2_R1 0x00000e84 +#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff + +#define RP_ECTL_4_R1 0x00000e8c +#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK (0xffff << 16) +#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT 16 + +#define RP_ECTL_5_R1 0x00000e90 +#define RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK 0xffffffff + +#define RP_ECTL_6_R1 0x00000e94 +#define RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK 0xffffffff + +#define RP_ECTL_2_R2 0x00000ea4 +#define RP_ECTL_2_R2_RX_CTLE_1C_MASK 0xffff + +#define RP_ECTL_4_R2 0x00000eac +#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK (0xffff << 16) +#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT 16 + +#define RP_ECTL_5_R2 0x00000eb0 +#define RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK 0xffffffff + +#define RP_ECTL_6_R2 0x00000eb4 +#define RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK 0xffffffff + #define RP_VEND_XP 0x00000f00 -#define RP_VEND_XP_DL_UP (1 << 30) +#define RP_VEND_XP_DL_UP (1 << 30) +#define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27) +#define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28) +#define RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK (0xff << 18) + +#define RP_VEND_CTL0 0x00000f44 +#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xf << 12) +#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12) + +#define RP_VEND_CTL1 0x00000f48 +#define RP_VEND_CTL1_ERPT (1 << 13) + +#define RP_VEND_XP_BIST 0x00000f4c +#define RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28) #define RP_VEND_CTL2 0x00000fa8 #define RP_VEND_CTL2_PCA_ENABLE (1 << 7) #define RP_PRIV_MISC 0x00000fe0 -#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) -#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) +#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) +#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) +#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK (0x7f << 16) +#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xf << 16) +#define RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23) +#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK (0x7f << 24) +#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xf << 24) +#define RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31) #define RP_LINK_CONTROL_STATUS 0x00000090 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 +#define RP_LINK_CONTROL_STATUS_2 0x000000b0 + #define PADS_CTL_SEL 0x0000009c #define PADS_CTL 0x000000a0 @@ -226,14 +284,15 @@ #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */ #define PME_ACK_TIMEOUT 10000 +#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */ struct tegra_msi { struct msi_controller chip; DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; - unsigned long pages; struct mutex lock; - u64 phys; + void *virt; + dma_addr_t phys; int irq; }; @@ -249,10 +308,12 @@ struct tegra_pcie_soc { unsigned int num_ports; const struct tegra_pcie_port_soc *ports; unsigned int msi_base_shift; + unsigned long afi_pex2_ctrl; u32 pads_pll_ctl; u32 tx_ref_sel; u32 pads_refclk_cfg0; u32 pads_refclk_cfg1; + u32 update_fc_threshold; bool has_pex_clkreq_en; bool has_pex_bias_ctrl; bool has_intr_prsnt_sense; @@ -260,6 +321,24 @@ struct tegra_pcie_soc { bool has_gen2; bool force_pca_enable; bool program_uphy; + bool update_clamp_threshold; + bool program_deskew_time; + bool raw_violation_fixup; + bool update_fc_timer; + bool has_cache_bars; + struct { + struct { + u32 rp_ectl_2_r1; + u32 rp_ectl_4_r1; + u32 rp_ectl_5_r1; + u32 rp_ectl_6_r1; + u32 rp_ectl_2_r2; + u32 rp_ectl_4_r2; + u32 rp_ectl_5_r2; + u32 rp_ectl_6_r2; + } regs; + bool enable; + } ectl; }; static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) @@ -321,6 +400,8 @@ struct tegra_pcie_port { unsigned int lanes; struct phy **phys; + + struct gpio_desc *reset_gpio; }; struct tegra_pcie_bus { @@ -440,6 +521,7 @@ static struct pci_ops tegra_pcie_ops = { static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) { + const struct tegra_pcie_soc *soc = port->pcie->soc; unsigned long ret = 0; switch (port->index) { @@ -452,7 +534,7 @@ static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) break; case 2: - ret = AFI_PEX2_CTRL; + ret = soc->afi_pex2_ctrl; break; } @@ -465,15 +547,162 @@ static void tegra_pcie_port_reset(struct tegra_pcie_port *port) unsigned long value; /* pulse reset signal */ - value = afi_readl(port->pcie, ctrl); - value &= ~AFI_PEX_CTRL_RST; - afi_writel(port->pcie, value, ctrl); + if (port->reset_gpio) { + gpiod_set_value(port->reset_gpio, 1); + } else { + value = afi_readl(port->pcie, ctrl); + value &= ~AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); + } usleep_range(1000, 2000); - value = afi_readl(port->pcie, ctrl); - value |= AFI_PEX_CTRL_RST; - afi_writel(port->pcie, value, ctrl); + if (port->reset_gpio) { + gpiod_set_value(port->reset_gpio, 0); + } else { + value = afi_readl(port->pcie, ctrl); + value |= AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); + } +} + +static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port) +{ + const struct tegra_pcie_soc *soc = port->pcie->soc; + u32 value; + + /* Enable AER capability */ + value = readl(port->base + RP_VEND_CTL1); + value |= RP_VEND_CTL1_ERPT; + writel(value, port->base + RP_VEND_CTL1); + + /* Optimal settings to enhance bandwidth */ + value = readl(port->base + RP_VEND_XP); + value |= RP_VEND_XP_OPPORTUNISTIC_ACK; + value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC; + writel(value, port->base + RP_VEND_XP); + + /* + * LTSSM will wait for DLLP to finish before entering L1 or L2, + * to avoid truncation of PM messages which results in receiver errors + */ + value = readl(port->base + RP_VEND_XP_BIST); + value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE; + writel(value, port->base + RP_VEND_XP_BIST); + + value = readl(port->base + RP_PRIV_MISC); + value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE; + value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE; + + if (soc->update_clamp_threshold) { + value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK | + RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK); + value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD | + RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD; + } + + writel(value, port->base + RP_PRIV_MISC); +} + +static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port) +{ + const struct tegra_pcie_soc *soc = port->pcie->soc; + u32 value; + + value = readl(port->base + RP_ECTL_2_R1); + value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK; + value |= soc->ectl.regs.rp_ectl_2_r1; + writel(value, port->base + RP_ECTL_2_R1); + + value = readl(port->base + RP_ECTL_4_R1); + value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK; + value |= soc->ectl.regs.rp_ectl_4_r1 << + RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT; + writel(value, port->base + RP_ECTL_4_R1); + + value = readl(port->base + RP_ECTL_5_R1); + value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK; + value |= soc->ectl.regs.rp_ectl_5_r1; + writel(value, port->base + RP_ECTL_5_R1); + + value = readl(port->base + RP_ECTL_6_R1); + value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK; + value |= soc->ectl.regs.rp_ectl_6_r1; + writel(value, port->base + RP_ECTL_6_R1); + + value = readl(port->base + RP_ECTL_2_R2); + value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK; + value |= soc->ectl.regs.rp_ectl_2_r2; + writel(value, port->base + RP_ECTL_2_R2); + + value = readl(port->base + RP_ECTL_4_R2); + value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK; + value |= soc->ectl.regs.rp_ectl_4_r2 << + RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT; + writel(value, port->base + RP_ECTL_4_R2); + + value = readl(port->base + RP_ECTL_5_R2); + value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK; + value |= soc->ectl.regs.rp_ectl_5_r2; + writel(value, port->base + RP_ECTL_5_R2); + + value = readl(port->base + RP_ECTL_6_R2); + value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK; + value |= soc->ectl.regs.rp_ectl_6_r2; + writel(value, port->base + RP_ECTL_6_R2); +} + +static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port) +{ + const struct tegra_pcie_soc *soc = port->pcie->soc; + u32 value; + + /* + * Sometimes link speed change from Gen2 to Gen1 fails due to + * instability in deskew logic on lane-0. Increase the deskew + * retry time to resolve this issue. + */ + if (soc->program_deskew_time) { + value = readl(port->base + RP_VEND_CTL0); + value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK; + value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH; + writel(value, port->base + RP_VEND_CTL0); + } + + /* Fixup for read after write violation. */ + if (soc->raw_violation_fixup) { + value = readl(port->base + RP_RX_HDR_LIMIT); + value &= ~RP_RX_HDR_LIMIT_PW_MASK; + value |= RP_RX_HDR_LIMIT_PW; + writel(value, port->base + RP_RX_HDR_LIMIT); + + value = readl(port->base + RP_PRIV_XP_DL); + value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD; + writel(value, port->base + RP_PRIV_XP_DL); + + value = readl(port->base + RP_VEND_XP); + value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; + value |= soc->update_fc_threshold; + writel(value, port->base + RP_VEND_XP); + } + + if (soc->update_fc_timer) { + value = readl(port->base + RP_VEND_XP); + value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; + value |= soc->update_fc_threshold; + writel(value, port->base + RP_VEND_XP); + } + + /* + * PCIe link doesn't come up with few legacy PCIe endpoints if + * root port advertises both Gen-1 and Gen-2 speeds in Tegra. + * Hence, the strategy followed here is to initially advertise + * only Gen-1 and after link is up, retrain link to Gen-2 speed + */ + value = readl(port->base + RP_LINK_CONTROL_STATUS_2); + value &= ~PCI_EXP_LNKSTA_CLS; + value |= PCI_EXP_LNKSTA_CLS_2_5GB; + writel(value, port->base + RP_LINK_CONTROL_STATUS_2); } static void tegra_pcie_port_enable(struct tegra_pcie_port *port) @@ -500,6 +729,13 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port) value |= RP_VEND_CTL2_PCA_ENABLE; writel(value, port->base + RP_VEND_CTL2); } + + tegra_pcie_enable_rp_features(port); + + if (soc->ectl.enable) + tegra_pcie_program_ectl_settings(port); + + tegra_pcie_apply_sw_fixup(port); } static void tegra_pcie_port_disable(struct tegra_pcie_port *port) @@ -521,6 +757,12 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port) value &= ~AFI_PEX_CTRL_REFCLK_EN; afi_writel(port->pcie, value, ctrl); + + /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */ + value = afi_readl(port->pcie, AFI_PCIE_CONFIG); + value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); + value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index); + afi_writel(port->pcie, value, AFI_PCIE_CONFIG); } static void tegra_pcie_port_free(struct tegra_pcie_port *port) @@ -545,12 +787,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); -/* Tegra PCIE requires relaxed ordering */ +/* Tegra20 and Tegra30 PCIE requires relaxed ordering */ static void tegra_pcie_relax_enable(struct pci_dev *dev) { pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); } -DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable); static int tegra_pcie_request_resources(struct tegra_pcie *pcie) { @@ -635,7 +880,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg) * do not pollute kernel log with master abort reports since they * happen a lot during enumeration */ - if (code == AFI_INTR_MASTER_ABORT) + if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE) dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature); else dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature); @@ -704,11 +949,13 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) afi_writel(pcie, 0, AFI_AXI_BAR5_SZ); afi_writel(pcie, 0, AFI_FPCI_BAR5); - /* map all upstream transactions as uncached */ - afi_writel(pcie, 0, AFI_CACHE_BAR0_ST); - afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); - afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); - afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); + if (pcie->soc->has_cache_bars) { + /* map all upstream transactions as uncached */ + afi_writel(pcie, 0, AFI_CACHE_BAR0_ST); + afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); + afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); + afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); + } /* MSI translations are setup only when needed */ afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST); @@ -852,7 +1099,6 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; - const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_pcie_port *port; int err; @@ -878,12 +1124,6 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) } } - /* Configure the reference clock driver */ - pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); - - if (soc->num_ports > 2) - pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); - return 0; } @@ -918,13 +1158,11 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) return 0; } -static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) +static void tegra_pcie_enable_controller(struct tegra_pcie *pcie) { - struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_pcie_port *port; unsigned long value; - int err; /* enable PLL power down */ if (pcie->phy) { @@ -942,9 +1180,12 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) value = afi_readl(pcie, AFI_PCIE_CONFIG); value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; + value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL; - list_for_each_entry(port, &pcie->ports, list) + list_for_each_entry(port, &pcie->ports, list) { value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); + value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index); + } afi_writel(pcie, value, AFI_PCIE_CONFIG); @@ -958,20 +1199,10 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) afi_writel(pcie, value, AFI_FUSE); } - if (soc->program_uphy) { - err = tegra_pcie_phy_power_on(pcie); - if (err < 0) { - dev_err(dev, "failed to power on PHY(s): %d\n", err); - return err; - } - } - - /* take the PCIe interface module out of reset */ - reset_control_deassert(pcie->pcie_xrst); - - /* finally enable PCIe */ + /* Disable AFI dynamic clock gating and enable PCIe */ value = afi_readl(pcie, AFI_CONFIGURATION); value |= AFI_CONFIGURATION_EN_FPCI; + value |= AFI_CONFIGURATION_CLKEN_OVERRIDE; afi_writel(pcie, value, AFI_CONFIGURATION); value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | @@ -989,22 +1220,6 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) /* disable all exceptions */ afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS); - - return 0; -} - -static void tegra_pcie_disable_controller(struct tegra_pcie *pcie) -{ - int err; - - reset_control_assert(pcie->pcie_xrst); - - if (pcie->soc->program_uphy) { - err = tegra_pcie_phy_power_off(pcie); - if (err < 0) - dev_err(pcie->dev, "failed to power off PHY(s): %d\n", - err); - } } static void tegra_pcie_power_off(struct tegra_pcie *pcie) @@ -1014,13 +1229,11 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie) int err; reset_control_assert(pcie->afi_rst); - reset_control_assert(pcie->pex_rst); clk_disable_unprepare(pcie->pll_e); if (soc->has_cml_clk) clk_disable_unprepare(pcie->cml_clk); clk_disable_unprepare(pcie->afi_clk); - clk_disable_unprepare(pcie->pex_clk); if (!dev->pm_domain) tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); @@ -1048,46 +1261,66 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie) if (err < 0) dev_err(dev, "failed to enable regulators: %d\n", err); - if (dev->pm_domain) { - err = clk_prepare_enable(pcie->pex_clk); + if (!dev->pm_domain) { + err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE); if (err) { - dev_err(dev, "failed to enable PEX clock: %d\n", err); - return err; + dev_err(dev, "failed to power ungate: %d\n", err); + goto regulator_disable; } - reset_control_deassert(pcie->pex_rst); - } else { - err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, - pcie->pex_clk, - pcie->pex_rst); + err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE); if (err) { - dev_err(dev, "powerup sequence failed: %d\n", err); - return err; + dev_err(dev, "failed to remove clamp: %d\n", err); + goto powergate; } } - reset_control_deassert(pcie->afi_rst); - err = clk_prepare_enable(pcie->afi_clk); if (err < 0) { dev_err(dev, "failed to enable AFI clock: %d\n", err); - return err; + goto powergate; } if (soc->has_cml_clk) { err = clk_prepare_enable(pcie->cml_clk); if (err < 0) { dev_err(dev, "failed to enable CML clock: %d\n", err); - return err; + goto disable_afi_clk; } } err = clk_prepare_enable(pcie->pll_e); if (err < 0) { dev_err(dev, "failed to enable PLLE clock: %d\n", err); - return err; + goto disable_cml_clk; } + reset_control_deassert(pcie->afi_rst); + return 0; + +disable_cml_clk: + if (soc->has_cml_clk) + clk_disable_unprepare(pcie->cml_clk); +disable_afi_clk: + clk_disable_unprepare(pcie->afi_clk); +powergate: + if (!dev->pm_domain) + tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); +regulator_disable: + regulator_bulk_disable(pcie->num_supplies, pcie->supplies); + + return err; +} + +static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + + /* Configure the reference clock driver */ + pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); + + if (soc->num_ports > 2) + pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); } static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) @@ -1536,7 +1769,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) err = platform_get_irq_byname(pdev, "msi"); if (err < 0) { dev_err(dev, "failed to get IRQ: %d\n", err); - goto err; + goto free_irq_domain; } msi->irq = err; @@ -1545,17 +1778,35 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) tegra_msi_irq_chip.name, pcie); if (err < 0) { dev_err(dev, "failed to request IRQ: %d\n", err); - goto err; + goto free_irq_domain; + } + + /* Though the PCIe controller can address >32-bit address space, to + * facilitate endpoints that support only 32-bit MSI target address, + * the mask is set to 32-bit to make sure that MSI target address is + * always a 32-bit address + */ + err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (err < 0) { + dev_err(dev, "failed to set DMA coherent mask: %d\n", err); + goto free_irq; + } + + msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL, + DMA_ATTR_NO_KERNEL_MAPPING); + if (!msi->virt) { + dev_err(dev, "failed to allocate DMA memory for MSI\n"); + err = -ENOMEM; + goto free_irq; } - /* setup AFI/FPCI range */ - msi->pages = __get_free_pages(GFP_KERNEL, 0); - msi->phys = virt_to_phys((void *)msi->pages); host->msi = &msi->chip; return 0; -err: +free_irq: + free_irq(msi->irq, pcie); +free_irq_domain: irq_domain_remove(msi->domain); return err; } @@ -1592,7 +1843,8 @@ static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie) struct tegra_msi *msi = &pcie->msi; unsigned int i, irq; - free_pages(msi->pages, 0); + dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys, + DMA_ATTR_NO_KERNEL_MAPPING); if (msi->irq > 0) free_irq(msi->irq, pcie); @@ -1628,6 +1880,15 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) return 0; } +static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie) +{ + u32 value; + + value = afi_readl(pcie, AFI_INTR_MASK); + value &= ~AFI_INTR_MASK_INT_MASK; + afi_writel(pcie, value, AFI_INTR_MASK); +} + static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, u32 *xbar) { @@ -1971,18 +2232,20 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) struct tegra_pcie_port *rp; unsigned int index; u32 value; + char *label; err = of_pci_get_devfn(port); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); - return err; + goto err_node_put; } index = PCI_SLOT(err); if (index < 1 || index > soc->num_ports) { dev_err(dev, "invalid port number: %d\n", index); - return -EINVAL; + err = -EINVAL; + goto err_node_put; } index--; @@ -1991,12 +2254,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) if (err < 0) { dev_err(dev, "failed to parse # of lanes: %d\n", err); - return err; + goto err_node_put; } if (value > 16) { dev_err(dev, "invalid # of lanes: %u\n", value); - return -EINVAL; + err = -EINVAL; + goto err_node_put; } lanes |= value << (index << 3); @@ -2010,13 +2274,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) lane += value; rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); - if (!rp) - return -ENOMEM; + if (!rp) { + err = -ENOMEM; + goto err_node_put; + } err = of_address_to_resource(port, 0, &rp->regs); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); - return err; + goto err_node_put; } INIT_LIST_HEAD(&rp->list); @@ -2029,6 +2295,31 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) if (IS_ERR(rp->base)) return PTR_ERR(rp->base); + label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index); + if (!label) { + dev_err(dev, "failed to create reset GPIO label\n"); + return -ENOMEM; + } + + /* + * Returns -ENOENT if reset-gpios property is not populated + * and in this case fall back to using AFI per port register + * to toggle PERST# SFIO line. + */ + rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port, + "reset-gpios", 0, + GPIOD_OUT_LOW, + label); + if (IS_ERR(rp->reset_gpio)) { + if (PTR_ERR(rp->reset_gpio) == -ENOENT) { + rp->reset_gpio = NULL; + } else { + dev_err(dev, "failed to get reset GPIO: %d\n", + err); + return PTR_ERR(rp->reset_gpio); + } + } + list_add_tail(&rp->list, &pcie->ports); } @@ -2043,6 +2334,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) return err; return 0; + +err_node_put: + of_node_put(port); + return err; } /* @@ -2076,7 +2371,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) } while (--timeout); if (!timeout) { - dev_err(dev, "link %u down, retrying\n", port->index); + dev_dbg(dev, "link %u down, retrying\n", port->index); goto retry; } @@ -2098,6 +2393,64 @@ retry: return false; } +static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct tegra_pcie_port *port; + ktime_t deadline; + u32 value; + + list_for_each_entry(port, &pcie->ports, list) { + /* + * "Supported Link Speeds Vector" in "Link Capabilities 2" + * is not supported by Tegra. tegra_pcie_change_link_speed() + * is called only for Tegra chips which support Gen2. + * So there no harm if supported link speed is not verified. + */ + value = readl(port->base + RP_LINK_CONTROL_STATUS_2); + value &= ~PCI_EXP_LNKSTA_CLS; + value |= PCI_EXP_LNKSTA_CLS_5_0GB; + writel(value, port->base + RP_LINK_CONTROL_STATUS_2); + + /* + * Poll until link comes back from recovery to avoid race + * condition. + */ + deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT); + + while (ktime_before(ktime_get(), deadline)) { + value = readl(port->base + RP_LINK_CONTROL_STATUS); + if ((value & PCI_EXP_LNKSTA_LT) == 0) + break; + + usleep_range(2000, 3000); + } + + if (value & PCI_EXP_LNKSTA_LT) + dev_warn(dev, "PCIe port %u link is in recovery\n", + port->index); + + /* Retrain the link */ + value = readl(port->base + RP_LINK_CONTROL_STATUS); + value |= PCI_EXP_LNKCTL_RL; + writel(value, port->base + RP_LINK_CONTROL_STATUS); + + deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT); + + while (ktime_before(ktime_get(), deadline)) { + value = readl(port->base + RP_LINK_CONTROL_STATUS); + if ((value & PCI_EXP_LNKSTA_LT) == 0) + break; + + usleep_range(2000, 3000); + } + + if (value & PCI_EXP_LNKSTA_LT) + dev_err(dev, "failed to retrain link of port %u\n", + port->index); + } +} + static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; @@ -2108,7 +2461,12 @@ static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) port->index, port->lanes); tegra_pcie_port_enable(port); + } + + /* Start LTSSM from Tegra side */ + reset_control_deassert(pcie->pcie_xrst); + list_for_each_entry_safe(port, tmp, &pcie->ports, list) { if (tegra_pcie_port_check_link(port)) continue; @@ -2117,12 +2475,17 @@ static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) tegra_pcie_port_disable(port); tegra_pcie_port_free(port); } + + if (pcie->soc->has_gen2) + tegra_pcie_change_link_speed(pcie); } static void tegra_pcie_disable_ports(struct tegra_pcie *pcie) { struct tegra_pcie_port *port, *tmp; + reset_control_assert(pcie->pcie_xrst); + list_for_each_entry_safe(port, tmp, &pcie->ports, list) tegra_pcie_port_disable(port); } @@ -2136,6 +2499,7 @@ static const struct tegra_pcie_soc tegra20_pcie = { .num_ports = 2, .ports = tegra20_pcie_ports, .msi_base_shift = 0, + .afi_pex2_ctrl = 0x128, .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, .pads_refclk_cfg0 = 0xfa5cfa5c, @@ -2146,6 +2510,12 @@ static const struct tegra_pcie_soc tegra20_pcie = { .has_gen2 = false, .force_pca_enable = false, .program_uphy = true, + .update_clamp_threshold = false, + .program_deskew_time = false, + .raw_violation_fixup = false, + .update_fc_timer = false, + .has_cache_bars = true, + .ectl.enable = false, }; static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = { @@ -2169,6 +2539,12 @@ static const struct tegra_pcie_soc tegra30_pcie = { .has_gen2 = false, .force_pca_enable = false, .program_uphy = true, + .update_clamp_threshold = false, + .program_deskew_time = false, + .raw_violation_fixup = false, + .update_fc_timer = false, + .has_cache_bars = false, + .ectl.enable = false, }; static const struct tegra_pcie_soc tegra124_pcie = { @@ -2178,6 +2554,8 @@ static const struct tegra_pcie_soc tegra124_pcie = { .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x44ac44ac, + /* FC threshold is bit[25:18] */ + .update_fc_threshold = 0x03fc0000, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2185,6 +2563,12 @@ static const struct tegra_pcie_soc tegra124_pcie = { .has_gen2 = true, .force_pca_enable = false, .program_uphy = true, + .update_clamp_threshold = true, + .program_deskew_time = false, + .raw_violation_fixup = true, + .update_fc_timer = false, + .has_cache_bars = false, + .ectl.enable = false, }; static const struct tegra_pcie_soc tegra210_pcie = { @@ -2194,6 +2578,8 @@ static const struct tegra_pcie_soc tegra210_pcie = { .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x90b890b8, + /* FC threshold is bit[25:18] */ + .update_fc_threshold = 0x01800000, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2201,6 +2587,24 @@ static const struct tegra_pcie_soc tegra210_pcie = { .has_gen2 = true, .force_pca_enable = true, .program_uphy = true, + .update_clamp_threshold = true, + .program_deskew_time = true, + .raw_violation_fixup = false, + .update_fc_timer = true, + .has_cache_bars = false, + .ectl = { + .regs = { + .rp_ectl_2_r1 = 0x0000000f, + .rp_ectl_4_r1 = 0x00000067, + .rp_ectl_5_r1 = 0x55010000, + .rp_ectl_6_r1 = 0x00000001, + .rp_ectl_2_r2 = 0x0000008f, + .rp_ectl_4_r2 = 0x000000c7, + .rp_ectl_5_r2 = 0x55010000, + .rp_ectl_6_r2 = 0x00000001, + }, + .enable = true, + }, }; static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = { @@ -2213,6 +2617,7 @@ static const struct tegra_pcie_soc tegra186_pcie = { .num_ports = 3, .ports = tegra186_pcie_ports, .msi_base_shift = 8, + .afi_pex2_ctrl = 0x19c, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x80b880b8, @@ -2224,6 +2629,12 @@ static const struct tegra_pcie_soc tegra186_pcie = { .has_gen2 = true, .force_pca_enable = false, .program_uphy = false, + .update_clamp_threshold = false, + .program_deskew_time = false, + .raw_violation_fixup = false, + .update_fc_timer = false, + .has_cache_bars = false, + .ectl.enable = false, }; static const struct of_device_id tegra_pcie_of_match[] = { @@ -2466,16 +2877,32 @@ static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev) { struct tegra_pcie *pcie = dev_get_drvdata(dev); struct tegra_pcie_port *port; + int err; list_for_each_entry(port, &pcie->ports, list) tegra_pcie_pme_turnoff(port); tegra_pcie_disable_ports(pcie); + /* + * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to + * avoid unwanted interrupts raised by AFI after pex_rst is asserted. + */ + tegra_pcie_disable_interrupts(pcie); + + if (pcie->soc->program_uphy) { + err = tegra_pcie_phy_power_off(pcie); + if (err < 0) + dev_err(dev, "failed to power off PHY(s): %d\n", err); + } + + reset_control_assert(pcie->pex_rst); + clk_disable_unprepare(pcie->pex_clk); + if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_disable_msi(pcie); - tegra_pcie_disable_controller(pcie); + pinctrl_pm_select_idle_state(dev); tegra_pcie_power_off(pcie); return 0; @@ -2491,20 +2918,45 @@ static int __maybe_unused tegra_pcie_pm_resume(struct device *dev) dev_err(dev, "tegra pcie power on fail: %d\n", err); return err; } - err = tegra_pcie_enable_controller(pcie); - if (err) { - dev_err(dev, "tegra pcie controller enable fail: %d\n", err); + + err = pinctrl_pm_select_default_state(dev); + if (err < 0) { + dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err); goto poweroff; } + + tegra_pcie_enable_controller(pcie); tegra_pcie_setup_translations(pcie); if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_enable_msi(pcie); + err = clk_prepare_enable(pcie->pex_clk); + if (err) { + dev_err(dev, "failed to enable PEX clock: %d\n", err); + goto pex_dpd_enable; + } + + reset_control_deassert(pcie->pex_rst); + + if (pcie->soc->program_uphy) { + err = tegra_pcie_phy_power_on(pcie); + if (err < 0) { + dev_err(dev, "failed to power on PHY(s): %d\n", err); + goto disable_pex_clk; + } + } + + tegra_pcie_apply_pad_settings(pcie); tegra_pcie_enable_ports(pcie); return 0; +disable_pex_clk: + reset_control_assert(pcie->pex_rst); + clk_disable_unprepare(pcie->pex_clk); +pex_dpd_enable: + pinctrl_pm_select_idle_state(dev); poweroff: tegra_pcie_power_off(pcie); diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index 025ef7d9a046..16d938920ca5 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -288,4 +289,13 @@ static int __init altera_msi_init(void) { return platform_driver_register(&altera_msi_driver); } + +static void __exit altera_msi_exit(void) +{ + platform_driver_unregister(&altera_msi_driver); +} + subsys_initcall(altera_msi_init); +MODULE_DEVICE_TABLE(of, altera_msi_of_match); +module_exit(altera_msi_exit); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 27edcebd1726..d2497ca43828 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> @@ -43,6 +44,8 @@ #define S10_RP_RXCPL_STATUS 0x200C #define S10_RP_CFG_ADDR(pcie, reg) \ (((pcie)->hip_base) + (reg) + (1 << 20)) +#define S10_RP_SECONDARY(pcie) \ + readb(S10_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS)) /* TLP configuration type 0 and 1 */ #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ @@ -54,14 +57,9 @@ #define TLP_WRITE_TAG 0x10 #define RP_DEVFN 0 #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) -#define TLP_CFGRD_DW0(pcie, bus) \ - ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgrd0 \ - : pcie->pcie_data->cfgrd1) << 24) | \ - TLP_PAYLOAD_SIZE) -#define TLP_CFGWR_DW0(pcie, bus) \ - ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgwr0 \ - : pcie->pcie_data->cfgwr1) << 24) | \ - TLP_PAYLOAD_SIZE) +#define TLP_CFG_DW0(pcie, cfg) \ + (((cfg) << 24) | \ + TLP_PAYLOAD_SIZE) #define TLP_CFG_DW1(pcie, tag, be) \ (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) #define TLP_CFG_DW2(bus, devfn, offset) \ @@ -321,14 +319,31 @@ static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers, s10_tlp_write_tx(pcie, data, RP_TX_EOP); } +static void get_tlp_header(struct altera_pcie *pcie, u8 bus, u32 devfn, + int where, u8 byte_en, bool read, u32 *headers) +{ + u8 cfg; + u8 cfg0 = read ? pcie->pcie_data->cfgrd0 : pcie->pcie_data->cfgwr0; + u8 cfg1 = read ? pcie->pcie_data->cfgrd1 : pcie->pcie_data->cfgwr1; + u8 tag = read ? TLP_READ_TAG : TLP_WRITE_TAG; + + if (pcie->pcie_data->version == ALTERA_PCIE_V1) + cfg = (bus == pcie->root_bus_nr) ? cfg0 : cfg1; + else + cfg = (bus > S10_RP_SECONDARY(pcie)) ? cfg0 : cfg1; + + headers[0] = TLP_CFG_DW0(pcie, cfg); + headers[1] = TLP_CFG_DW1(pcie, tag, byte_en); + headers[2] = TLP_CFG_DW2(bus, devfn, where); +} + static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, int where, u8 byte_en, u32 *value) { u32 headers[TLP_HDR_SIZE]; - headers[0] = TLP_CFGRD_DW0(pcie, bus); - headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); - headers[2] = TLP_CFG_DW2(bus, devfn, where); + get_tlp_header(pcie, bus, devfn, where, byte_en, true, + headers); pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false); @@ -341,9 +356,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, u32 headers[TLP_HDR_SIZE]; int ret; - headers[0] = TLP_CFGWR_DW0(pcie, bus); - headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); - headers[2] = TLP_CFG_DW2(bus, devfn, where); + get_tlp_header(pcie, bus, devfn, where, byte_en, false, + headers); /* check alignment to Qword */ if ((where & 0x7) == 0) @@ -705,6 +719,13 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) return 0; } +static void altera_pcie_irq_teardown(struct altera_pcie *pcie) +{ + irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); + irq_domain_remove(pcie->irq_domain); + irq_dispose_mapping(pcie->irq); +} + static int altera_pcie_parse_dt(struct altera_pcie *pcie) { struct device *dev = &pcie->pdev->dev; @@ -798,6 +819,7 @@ static int altera_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; + platform_set_drvdata(pdev, pcie); match = of_match_device(altera_pcie_of_match, &pdev->dev); if (!match) @@ -855,13 +877,28 @@ static int altera_pcie_probe(struct platform_device *pdev) return ret; } +static int altera_pcie_remove(struct platform_device *pdev) +{ + struct altera_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); + pci_free_resource_list(&pcie->resources); + altera_pcie_irq_teardown(pcie); + + return 0; +} + static struct platform_driver altera_pcie_driver = { .probe = altera_pcie_probe, + .remove = altera_pcie_remove, .driver = { .name = "altera-pcie", .of_match_table = altera_pcie_of_match, - .suppress_bind_attrs = true, }, }; -builtin_platform_driver(altera_pcie_driver); +MODULE_DEVICE_TABLE(of, altera_pcie_of_match); +module_platform_driver(altera_pcie_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index cb3401a931f8..0a3f61be5625 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -367,7 +367,7 @@ static void iproc_msi_handler(struct irq_desc *desc) /* * Now go read the tail pointer again to see if there are new - * oustanding events that came in during the above window. + * outstanding events that came in during the above window. */ } while (true); diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index f30f5f3fb5c1..9ee6200a66f4 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -87,18 +87,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) /* * DT nodes are not used by all platforms that use the iProc PCIe - * core driver. For platforms that require explict inbound mapping + * core driver. For platforms that require explicit inbound mapping * configuration, "dma-ranges" would have been present in DT */ pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); /* PHY use is optional */ - pcie->phy = devm_phy_get(dev, "pcie-phy"); - if (IS_ERR(pcie->phy)) { - if (PTR_ERR(pcie->phy) == -EPROBE_DEFER) - return -EPROBE_DEFER; - pcie->phy = NULL; - } + pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources, &iobase); diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index c20fd6bd68fd..2d457bfdaf66 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -60,6 +60,10 @@ #define APB_ERR_EN_SHIFT 0 #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) +#define CFG_RD_SUCCESS 0 +#define CFG_RD_UR 1 +#define CFG_RD_CRS 2 +#define CFG_RD_CA 3 #define CFG_RETRY_STATUS 0xffff0001 #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ @@ -159,7 +163,7 @@ enum iproc_pcie_ib_map_type { * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or * SZ_1G * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or - * GB, depedning on the size unit + * GB, depending on the size unit * @nr_sizes: number of supported inbound mapping region sizes * @nr_windows: number of supported inbound mapping windows for the region * @imap_addr_offset: register offset between the upper and lower 32-bit @@ -289,6 +293,9 @@ enum iproc_pcie_reg { IPROC_PCIE_IARR4, IPROC_PCIE_IMAP4, + /* config read status */ + IPROC_PCIE_CFG_RD_STATUS, + /* link status */ IPROC_PCIE_LINK_STATUS, @@ -350,6 +357,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = { [IPROC_PCIE_IMAP3] = 0xe08, [IPROC_PCIE_IARR4] = 0xe68, [IPROC_PCIE_IMAP4] = 0xe70, + [IPROC_PCIE_CFG_RD_STATUS] = 0xee0, [IPROC_PCIE_LINK_STATUS] = 0xf0c, [IPROC_PCIE_APB_ERR_EN] = 0xf40, }; @@ -474,10 +482,12 @@ static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie, return (pcie->base + offset); } -static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p) +static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie, + void __iomem *cfg_data_p) { int timeout = CFG_RETRY_STATUS_TIMEOUT_US; unsigned int data; + u32 status; /* * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only @@ -498,6 +508,15 @@ static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p) */ data = readl(cfg_data_p); while (data == CFG_RETRY_STATUS && timeout--) { + /* + * CRS state is set in CFG_RD status register + * This will handle the case where CFG_RETRY_STATUS is + * valid config data. + */ + status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS); + if (status != CFG_RD_CRS) + return data; + udelay(1); data = readl(cfg_data_p); } @@ -576,7 +595,7 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, if (!cfg_data_p) return PCIBIOS_DEVICE_NOT_FOUND; - data = iproc_pcie_cfg_retry(cfg_data_p); + data = iproc_pcie_cfg_retry(pcie, cfg_data_p); *val = data; if (size <= 2) @@ -936,8 +955,25 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, resource_size_t window_size = ob_map->window_sizes[size_idx] * SZ_1M; - if (size < window_size) - continue; + /* + * Keep iterating until we reach the last window and + * with the minimal window size at index zero. In this + * case, we take a compromise by mapping it using the + * minimum window size that can be supported + */ + if (size < window_size) { + if (size_idx > 0 || window_idx > 0) + continue; + + /* + * For the corner case of reaching the minimal + * window size that can be supported on the + * last window + */ + axi_addr = ALIGN_DOWN(axi_addr, window_size); + pci_addr = ALIGN_DOWN(pci_addr, window_size); + size = window_size; + } if (!IS_ALIGNED(axi_addr, window_size) || !IS_ALIGNED(pci_addr, window_size)) { @@ -1146,11 +1182,43 @@ err_ib: return ret; } +static int iproc_pcie_add_dma_range(struct device *dev, + struct list_head *resources, + struct of_pci_range *range) +{ + struct resource *res; + struct resource_entry *entry, *tmp; + struct list_head *head = resources; + + res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL); + if (!res) + return -ENOMEM; + + resource_list_for_each_entry(tmp, resources) { + if (tmp->res->start < range->cpu_addr) + head = &tmp->node; + } + + res->start = range->cpu_addr; + res->end = res->start + range->size - 1; + + entry = resource_list_create_entry(res, 0); + if (!entry) + return -ENOMEM; + + entry->offset = res->start - range->cpu_addr; + resource_list_add(entry, head); + + return 0; +} + static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) { + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); struct of_pci_range range; struct of_pci_range_parser parser; int ret; + LIST_HEAD(resources); /* Get the dma-ranges from DT */ ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node); @@ -1158,13 +1226,23 @@ static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) return ret; for_each_of_pci_range(&parser, &range) { + ret = iproc_pcie_add_dma_range(pcie->dev, + &resources, + &range); + if (ret) + goto out; /* Each range entry corresponds to an inbound mapping region */ ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM); if (ret) - return ret; + goto out; } + list_splice_init(&resources, &host->dma_ranges); + return 0; +out: + pci_free_resource_list(&resources); + return ret; } static int iproce_pcie_get_msi(struct iproc_pcie *pcie, @@ -1320,14 +1398,18 @@ static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) if (pcie->need_msi_steer) { ret = iproc_pcie_msi_steer(pcie, msi_node); if (ret) - return ret; + goto out_put_node; } /* * If another MSI controller is being used, the call below should fail * but that is okay */ - return iproc_msi_init(pcie, msi_node); + ret = iproc_msi_init(pcie, msi_node); + +out_put_node: + of_node_put(msi_node); + return ret; } static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) @@ -1347,7 +1429,6 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie) break; case IPROC_PCIE_PAXB: regs = iproc_pcie_reg_paxb; - pcie->iproc_cfg_read = true; pcie->has_apb_err_disable = true; if (pcie->need_ob_cfg) { pcie->ob_map = paxb_ob_map; @@ -1356,6 +1437,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie) break; case IPROC_PCIE_PAXB_V2: regs = iproc_pcie_reg_paxb_v2; + pcie->iproc_cfg_read = true; pcie->has_apb_err_disable = true; if (pcie->need_ob_cfg) { pcie->ob_map = paxb_v2_ob_map; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 0b6c72804e03..626a7c352dfd 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -73,6 +73,7 @@ #define PCIE_MSI_VECTOR 0x0c0 #define PCIE_CONF_VEND_ID 0x100 +#define PCIE_CONF_DEVICE_ID 0x102 #define PCIE_CONF_CLASS_ID 0x106 #define PCIE_INT_MASK 0x420 @@ -141,12 +142,16 @@ struct mtk_pcie_port; /** * struct mtk_pcie_soc - differentiate between host generations * @need_fix_class_id: whether this host's class ID needed to be fixed or not + * @need_fix_device_id: whether this host's device ID needed to be fixed or not + * @device_id: device ID which this host need to be fixed * @ops: pointer to configuration access functions * @startup: pointer to controller setting functions * @setup_irq: pointer to initialize IRQ functions */ struct mtk_pcie_soc { bool need_fix_class_id; + bool need_fix_device_id; + unsigned int device_id; struct pci_ops *ops; int (*startup)(struct mtk_pcie_port *port); int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); @@ -578,6 +583,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, port); + of_node_put(pcie_intc_node); if (!port->irq_domain) { dev_err(dev, "failed to get INTx IRQ domain\n"); return -ENODEV; @@ -629,8 +635,6 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc) } chained_irq_exit(irqchip, desc); - - return; } static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, @@ -695,6 +699,9 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) writew(val, port->base + PCIE_CONF_CLASS_ID); } + if (soc->need_fix_device_id) + writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID); + /* 100ms timeout value should be enough for Gen1/2 training */ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, !!(val & PCIE_PORT_LINKUP_V2), 20, @@ -915,49 +922,29 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie, /* sys_ck might be divided into the following parts in some chips */ snprintf(name, sizeof(name), "ahb_ck%d", slot); - port->ahb_ck = devm_clk_get(dev, name); - if (IS_ERR(port->ahb_ck)) { - if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->ahb_ck = NULL; - } + port->ahb_ck = devm_clk_get_optional(dev, name); + if (IS_ERR(port->ahb_ck)) + return PTR_ERR(port->ahb_ck); snprintf(name, sizeof(name), "axi_ck%d", slot); - port->axi_ck = devm_clk_get(dev, name); - if (IS_ERR(port->axi_ck)) { - if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->axi_ck = NULL; - } + port->axi_ck = devm_clk_get_optional(dev, name); + if (IS_ERR(port->axi_ck)) + return PTR_ERR(port->axi_ck); snprintf(name, sizeof(name), "aux_ck%d", slot); - port->aux_ck = devm_clk_get(dev, name); - if (IS_ERR(port->aux_ck)) { - if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->aux_ck = NULL; - } + port->aux_ck = devm_clk_get_optional(dev, name); + if (IS_ERR(port->aux_ck)) + return PTR_ERR(port->aux_ck); snprintf(name, sizeof(name), "obff_ck%d", slot); - port->obff_ck = devm_clk_get(dev, name); - if (IS_ERR(port->obff_ck)) { - if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->obff_ck = NULL; - } + port->obff_ck = devm_clk_get_optional(dev, name); + if (IS_ERR(port->obff_ck)) + return PTR_ERR(port->obff_ck); snprintf(name, sizeof(name), "pipe_ck%d", slot); - port->pipe_ck = devm_clk_get(dev, name); - if (IS_ERR(port->pipe_ck)) { - if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->pipe_ck = NULL; - } + port->pipe_ck = devm_clk_get_optional(dev, name); + if (IS_ERR(port->pipe_ck)) + return PTR_ERR(port->pipe_ck); snprintf(name, sizeof(name), "pcie-rst%d", slot); port->reset = devm_reset_control_get_optional_exclusive(dev, name); @@ -1235,11 +1222,21 @@ static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = { .setup_irq = mtk_pcie_setup_irq, }; +static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = { + .need_fix_class_id = true, + .need_fix_device_id = true, + .device_id = PCI_DEVICE_ID_MEDIATEK_7629, + .ops = &mtk_pcie_ops_v2, + .startup = mtk_pcie_startup_port_v2, + .setup_irq = mtk_pcie_setup_irq, +}; + static const struct of_device_id mtk_pcie_ids[] = { { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 }, + { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 }, {}, }; diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c index 77052a0712d0..a45a6447b01d 100644 --- a/drivers/pci/controller/pcie-mobiveil.c +++ b/drivers/pci/controller/pcie-mobiveil.c @@ -31,91 +31,103 @@ * translation tables are grouped into windows, each window registers are * grouped into blocks of 4 or 16 registers each */ -#define PAB_REG_BLOCK_SIZE 16 -#define PAB_EXT_REG_BLOCK_SIZE 4 +#define PAB_REG_BLOCK_SIZE 16 +#define PAB_EXT_REG_BLOCK_SIZE 4 -#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE)) -#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE)) +#define PAB_REG_ADDR(offset, win) \ + (offset + (win * PAB_REG_BLOCK_SIZE)) +#define PAB_EXT_REG_ADDR(offset, win) \ + (offset + (win * PAB_EXT_REG_BLOCK_SIZE)) -#define LTSSM_STATUS 0x0404 -#define LTSSM_STATUS_L0_MASK 0x3f -#define LTSSM_STATUS_L0 0x2d +#define LTSSM_STATUS 0x0404 +#define LTSSM_STATUS_L0_MASK 0x3f +#define LTSSM_STATUS_L0 0x2d -#define PAB_CTRL 0x0808 -#define AMBA_PIO_ENABLE_SHIFT 0 -#define PEX_PIO_ENABLE_SHIFT 1 -#define PAGE_SEL_SHIFT 13 -#define PAGE_SEL_MASK 0x3f -#define PAGE_LO_MASK 0x3ff -#define PAGE_SEL_EN 0xc00 -#define PAGE_SEL_OFFSET_SHIFT 10 +#define PAB_CTRL 0x0808 +#define AMBA_PIO_ENABLE_SHIFT 0 +#define PEX_PIO_ENABLE_SHIFT 1 +#define PAGE_SEL_SHIFT 13 +#define PAGE_SEL_MASK 0x3f +#define PAGE_LO_MASK 0x3ff +#define PAGE_SEL_OFFSET_SHIFT 10 -#define PAB_AXI_PIO_CTRL 0x0840 -#define APIO_EN_MASK 0xf +#define PAB_AXI_PIO_CTRL 0x0840 +#define APIO_EN_MASK 0xf -#define PAB_PEX_PIO_CTRL 0x08c0 -#define PIO_ENABLE_SHIFT 0 +#define PAB_PEX_PIO_CTRL 0x08c0 +#define PIO_ENABLE_SHIFT 0 #define PAB_INTP_AMBA_MISC_ENB 0x0b0c -#define PAB_INTP_AMBA_MISC_STAT 0x0b1c +#define PAB_INTP_AMBA_MISC_STAT 0x0b1c #define PAB_INTP_INTX_MASK 0x01e0 #define PAB_INTP_MSI_MASK 0x8 -#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win) -#define WIN_ENABLE_SHIFT 0 -#define WIN_TYPE_SHIFT 1 +#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win) +#define WIN_ENABLE_SHIFT 0 +#define WIN_TYPE_SHIFT 1 +#define WIN_TYPE_MASK 0x3 +#define WIN_SIZE_MASK 0xfffffc00 #define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win) +#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win) #define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win) #define AXI_WINDOW_ALIGN_MASK 3 #define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win) -#define PAB_BUS_SHIFT 24 -#define PAB_DEVICE_SHIFT 19 -#define PAB_FUNCTION_SHIFT 16 +#define PAB_BUS_SHIFT 24 +#define PAB_DEVICE_SHIFT 19 +#define PAB_FUNCTION_SHIFT 16 #define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win) #define PAB_INTP_AXI_PIO_CLASS 0x474 -#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win) -#define AMAP_CTRL_EN_SHIFT 0 -#define AMAP_CTRL_TYPE_SHIFT 1 +#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win) +#define AMAP_CTRL_EN_SHIFT 0 +#define AMAP_CTRL_TYPE_SHIFT 1 +#define AMAP_CTRL_TYPE_MASK 3 #define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) +#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win) #define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) #define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) #define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) /* starting offset of INTX bits in status register */ -#define PAB_INTX_START 5 +#define PAB_INTX_START 5 /* supported number of MSI interrupts */ -#define PCI_NUM_MSI 16 +#define PCI_NUM_MSI 16 /* MSI registers */ -#define MSI_BASE_LO_OFFSET 0x04 -#define MSI_BASE_HI_OFFSET 0x08 -#define MSI_SIZE_OFFSET 0x0c -#define MSI_ENABLE_OFFSET 0x14 -#define MSI_STATUS_OFFSET 0x18 -#define MSI_DATA_OFFSET 0x20 -#define MSI_ADDR_L_OFFSET 0x24 -#define MSI_ADDR_H_OFFSET 0x28 +#define MSI_BASE_LO_OFFSET 0x04 +#define MSI_BASE_HI_OFFSET 0x08 +#define MSI_SIZE_OFFSET 0x0c +#define MSI_ENABLE_OFFSET 0x14 +#define MSI_STATUS_OFFSET 0x18 +#define MSI_DATA_OFFSET 0x20 +#define MSI_ADDR_L_OFFSET 0x24 +#define MSI_ADDR_H_OFFSET 0x28 /* outbound and inbound window definitions */ -#define WIN_NUM_0 0 -#define WIN_NUM_1 1 -#define CFG_WINDOW_TYPE 0 -#define IO_WINDOW_TYPE 1 -#define MEM_WINDOW_TYPE 2 -#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024) -#define MAX_PIO_WINDOWS 8 +#define WIN_NUM_0 0 +#define WIN_NUM_1 1 +#define CFG_WINDOW_TYPE 0 +#define IO_WINDOW_TYPE 1 +#define MEM_WINDOW_TYPE 2 +#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024) +#define MAX_PIO_WINDOWS 8 /* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_MIN 90000 -#define LINK_WAIT_MAX 100000 +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_MIN 90000 +#define LINK_WAIT_MAX 100000 + +#define PAGED_ADDR_BNDRY 0xc00 +#define OFFSET_TO_PAGE_ADDR(off) \ + ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY) +#define OFFSET_TO_PAGE_IDX(off) \ + ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK) struct mobiveil_msi { /* MSI information */ struct mutex lock; /* protect bitmap variable */ @@ -145,15 +157,119 @@ struct mobiveil_pcie { struct mobiveil_msi msi; }; -static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value, - const u32 reg) +/* + * mobiveil_pcie_sel_page - routine to access paged register + * + * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged, + * for this scheme to work extracted higher 6 bits of the offset will be + * written to pg_sel field of PAB_CTRL register and rest of the lower 10 + * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register. + */ +static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx) +{ + u32 val; + + val = readl(pcie->csr_axi_slave_base + PAB_CTRL); + val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT); + val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT; + + writel(val, pcie->csr_axi_slave_base + PAB_CTRL); +} + +static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off) +{ + if (off < PAGED_ADDR_BNDRY) { + /* For directly accessed registers, clear the pg_sel field */ + mobiveil_pcie_sel_page(pcie, 0); + return pcie->csr_axi_slave_base + off; + } + + mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off)); + return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off); +} + +static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val) { - writel_relaxed(value, pcie->csr_axi_slave_base + reg); + if ((uintptr_t)addr & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + switch (size) { + case 4: + *val = readl(addr); + break; + case 2: + *val = readw(addr); + break; + case 1: + *val = readb(addr); + break; + default: + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + return PCIBIOS_SUCCESSFUL; } -static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg) +static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val) { - return readl_relaxed(pcie->csr_axi_slave_base + reg); + if ((uintptr_t)addr & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 4: + writel(val, addr); + break; + case 2: + writew(val, addr); + break; + case 1: + writeb(val, addr); + break; + default: + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + return PCIBIOS_SUCCESSFUL; +} + +static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size) +{ + void *addr; + u32 val; + int ret; + + addr = mobiveil_pcie_comp_addr(pcie, off); + + ret = mobiveil_pcie_read(addr, size, &val); + if (ret) + dev_err(&pcie->pdev->dev, "read CSR address failed\n"); + + return val; +} + +static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size) +{ + void *addr; + int ret; + + addr = mobiveil_pcie_comp_addr(pcie, off); + + ret = mobiveil_pcie_write(addr, size, val); + if (ret) + dev_err(&pcie->pdev->dev, "write CSR address failed\n"); +} + +static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off) +{ + return csr_read(pcie, off, 0x4); +} + +static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off) +{ + csr_write(pcie, val, off, 0x4); } static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie) @@ -174,7 +290,7 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) * Do not read more than one device on the bus directly * attached to RC */ - if ((bus->primary == pcie->root_bus_nr) && (devfn > 0)) + if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0)) return false; return true; @@ -185,17 +301,17 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) * root port or endpoint */ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, - unsigned int devfn, int where) + unsigned int devfn, int where) { struct mobiveil_pcie *pcie = bus->sysdata; + u32 value; if (!mobiveil_pcie_valid_device(bus, devfn)) return NULL; - if (bus->number == pcie->root_bus_nr) { - /* RC config access */ + /* RC config access */ + if (bus->number == pcie->root_bus_nr) return pcie->csr_axi_slave_base + where; - } /* * EP config access (in Config/APIO space) @@ -203,10 +319,12 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register. * Relies on pci_lock serialization */ - csr_writel(pcie, bus->number << PAB_BUS_SHIFT | - PCI_SLOT(devfn) << PAB_DEVICE_SHIFT | - PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT, - PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); + value = bus->number << PAB_BUS_SHIFT | + PCI_SLOT(devfn) << PAB_DEVICE_SHIFT | + PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT; + + csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); + return pcie->config_axi_slave_base + where; } @@ -241,24 +359,29 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) /* Handle INTx */ if (intr_status & PAB_INTP_INTX_MASK) { - shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >> - PAB_INTX_START; + shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); + shifted_status &= PAB_INTP_INTX_MASK; + shifted_status >>= PAB_INTX_START; do { for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) { virq = irq_find_mapping(pcie->intx_domain, - bit + 1); + bit + 1); if (virq) generic_handle_irq(virq); else - dev_err_ratelimited(dev, - "unexpected IRQ, INT%d\n", bit); + dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", + bit); - /* clear interrupt */ - csr_writel(pcie, - shifted_status << PAB_INTX_START, - PAB_INTP_AMBA_MISC_STAT); + /* clear interrupt handled */ + csr_writel(pcie, 1 << (PAB_INTX_START + bit), + PAB_INTP_AMBA_MISC_STAT); } - } while ((shifted_status >> PAB_INTX_START) != 0); + + shifted_status = csr_readl(pcie, + PAB_INTP_AMBA_MISC_STAT); + shifted_status &= PAB_INTP_INTX_MASK; + shifted_status >>= PAB_INTX_START; + } while (shifted_status != 0); } /* read extra MSI status register */ @@ -266,8 +389,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) /* handle MSI interrupts */ while (msi_status & 1) { - msi_data = readl_relaxed(pcie->apb_csr_base - + MSI_DATA_OFFSET); + msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET); /* * MSI_STATUS_OFFSET register gets updated to zero @@ -276,18 +398,18 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) * two dummy reads. */ msi_addr_lo = readl_relaxed(pcie->apb_csr_base + - MSI_ADDR_L_OFFSET); + MSI_ADDR_L_OFFSET); msi_addr_hi = readl_relaxed(pcie->apb_csr_base + - MSI_ADDR_H_OFFSET); + MSI_ADDR_H_OFFSET); dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n", - msi_data, msi_addr_hi, msi_addr_lo); + msi_data, msi_addr_hi, msi_addr_lo); virq = irq_find_mapping(msi->dev_domain, msi_data); if (virq) generic_handle_irq(virq); msi_status = readl_relaxed(pcie->apb_csr_base + - MSI_STATUS_OFFSET); + MSI_STATUS_OFFSET); } /* Clear the interrupt status */ @@ -304,7 +426,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) /* map config resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "config_axi_slave"); + "config_axi_slave"); pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pcie->config_axi_slave_base)) return PTR_ERR(pcie->config_axi_slave_base); @@ -312,7 +434,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) /* map csr resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "csr_axi_slave"); + "csr_axi_slave"); pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pcie->csr_axi_slave_base)) return PTR_ERR(pcie->csr_axi_slave_base); @@ -337,92 +459,53 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) return -ENODEV; } - irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie); - return 0; } -/* - * select_paged_register - routine to access paged register of root complex - * - * registers of RC are paged, for this scheme to work - * extracted higher 6 bits of the offset will be written to pg_sel - * field of PAB_CTRL register and rest of the lower 10 bits enabled with - * PAGE_SEL_EN are used as offset of the register. - */ -static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset) -{ - int pab_ctrl_dw, pg_sel; - - /* clear pg_sel field */ - pab_ctrl_dw = csr_readl(pcie, PAB_CTRL); - pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT)); - - /* set pg_sel field */ - pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK; - pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT)); - csr_writel(pcie, pab_ctrl_dw, PAB_CTRL); -} - -static void write_paged_register(struct mobiveil_pcie *pcie, - u32 val, u32 offset) -{ - u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; - - select_paged_register(pcie, offset); - csr_writel(pcie, val, off); -} - -static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset) -{ - u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; - - select_paged_register(pcie, offset); - return csr_readl(pcie, off); -} - static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, - int pci_addr, u32 type, u64 size) + u64 cpu_addr, u64 pci_addr, u32 type, u64 size) { - int pio_ctrl_val; - int amap_ctrl_dw; + u32 value; u64 size64 = ~(size - 1); - if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) { + if (win_num >= pcie->ppio_wins) { dev_err(&pcie->pdev->dev, "ERROR: max inbound windows reached !\n"); return; } - pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL); - csr_writel(pcie, - pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL); - amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num)); - amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT)); - amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT)); + value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num)); + value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK); + value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT | + (lower_32_bits(size64) & WIN_SIZE_MASK); + csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num)); + + csr_writel(pcie, upper_32_bits(size64), + PAB_EXT_PEX_AMAP_SIZEN(win_num)); - write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64), - PAB_PEX_AMAP_CTRL(win_num)); + csr_writel(pcie, lower_32_bits(cpu_addr), + PAB_PEX_AMAP_AXI_WIN(win_num)); + csr_writel(pcie, upper_32_bits(cpu_addr), + PAB_EXT_PEX_AMAP_AXI_WIN(win_num)); - write_paged_register(pcie, upper_32_bits(size64), - PAB_EXT_PEX_AMAP_SIZEN(win_num)); + csr_writel(pcie, lower_32_bits(pci_addr), + PAB_PEX_AMAP_PEX_WIN_L(win_num)); + csr_writel(pcie, upper_32_bits(pci_addr), + PAB_PEX_AMAP_PEX_WIN_H(win_num)); - write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num)); - write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num)); - write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num)); + pcie->ib_wins_configured++; } /* * routine to program the outbound windows */ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, - u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size) + u64 cpu_addr, u64 pci_addr, u32 type, u64 size) { - - u32 value, type; + u32 value; u64 size64 = ~(size - 1); - if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) { + if (win_num >= pcie->apio_wins) { dev_err(&pcie->pdev->dev, "ERROR: max outbound windows reached !\n"); return; @@ -432,28 +515,27 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit * to 4 KB in PAB_AXI_AMAP_CTRL register */ - type = config_io_bit; value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); - csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | - lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num)); + value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK); + value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | + (lower_32_bits(size64) & WIN_SIZE_MASK); + csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num)); - write_paged_register(pcie, upper_32_bits(size64), - PAB_EXT_AXI_AMAP_SIZE(win_num)); + csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num)); /* * program AXI window base with appropriate value in * PAB_AXI_AMAP_AXI_WIN0 register */ - value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num)); - csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK), - PAB_AXI_AMAP_AXI_WIN(win_num)); - - value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num)); + csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK), + PAB_AXI_AMAP_AXI_WIN(win_num)); + csr_writel(pcie, upper_32_bits(cpu_addr), + PAB_EXT_AXI_AMAP_AXI_WIN(win_num)); csr_writel(pcie, lower_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_L(win_num)); + PAB_AXI_AMAP_PEX_WIN_L(win_num)); csr_writel(pcie, upper_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_H(win_num)); + PAB_AXI_AMAP_PEX_WIN_H(win_num)); pcie->ob_wins_configured++; } @@ -469,7 +551,9 @@ static int mobiveil_bringup_link(struct mobiveil_pcie *pcie) usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); } + dev_err(&pcie->pdev->dev, "link never came up\n"); + return -ETIMEDOUT; } @@ -482,50 +566,55 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie) msi->msi_pages_phys = (phys_addr_t)msg_addr; writel_relaxed(lower_32_bits(msg_addr), - pcie->apb_csr_base + MSI_BASE_LO_OFFSET); + pcie->apb_csr_base + MSI_BASE_LO_OFFSET); writel_relaxed(upper_32_bits(msg_addr), - pcie->apb_csr_base + MSI_BASE_HI_OFFSET); + pcie->apb_csr_base + MSI_BASE_HI_OFFSET); writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET); writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET); } static int mobiveil_host_init(struct mobiveil_pcie *pcie) { - u32 value, pab_ctrl, type = 0; - int err; - struct resource_entry *win, *tmp; - - err = mobiveil_bringup_link(pcie); - if (err) { - dev_info(&pcie->pdev->dev, "link bring-up failed\n"); - return err; - } + u32 value, pab_ctrl, type; + struct resource_entry *win; + + /* setup bus numbers */ + value = csr_readl(pcie, PCI_PRIMARY_BUS); + value &= 0xff000000; + value |= 0x00ff0100; + csr_writel(pcie, value, PCI_PRIMARY_BUS); /* * program Bus Master Enable Bit in Command Register in PAB Config * Space */ value = csr_readl(pcie, PCI_COMMAND); - csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_MASTER, PCI_COMMAND); + value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; + csr_writel(pcie, value, PCI_COMMAND); /* * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL * register */ pab_ctrl = csr_readl(pcie, PAB_CTRL); - csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) | - (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL); + pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT); + csr_writel(pcie, pab_ctrl, PAB_CTRL); csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), - PAB_INTP_AMBA_MISC_ENB); + PAB_INTP_AMBA_MISC_ENB); /* * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in * PAB_AXI_PIO_CTRL Register */ value = csr_readl(pcie, PAB_AXI_PIO_CTRL); - csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL); + value |= APIO_EN_MASK; + csr_writel(pcie, value, PAB_AXI_PIO_CTRL); + + /* Enable PCIe PIO master */ + value = csr_readl(pcie, PAB_PEX_PIO_CTRL); + value |= 1 << PIO_ENABLE_SHIFT; + csr_writel(pcie, value, PAB_PEX_PIO_CTRL); /* * we'll program one outbound window for config reads and @@ -535,32 +624,38 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) */ /* config outbound translation window */ - program_ob_windows(pcie, pcie->ob_wins_configured, - pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE, - resource_size(pcie->ob_io_res)); + program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0, + CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res)); /* memory inbound translation window */ - program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); + program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { - type = 0; + resource_list_for_each_entry(win, &pcie->resources) { if (resource_type(win->res) == IORESOURCE_MEM) type = MEM_WINDOW_TYPE; - if (resource_type(win->res) == IORESOURCE_IO) + else if (resource_type(win->res) == IORESOURCE_IO) type = IO_WINDOW_TYPE; - if (type) { - /* configure outbound translation window */ - program_ob_windows(pcie, pcie->ob_wins_configured, - win->res->start, 0, type, - resource_size(win->res)); - } + else + continue; + + /* configure outbound translation window */ + program_ob_windows(pcie, pcie->ob_wins_configured, + win->res->start, + win->res->start - win->offset, + type, resource_size(win->res)); } + /* fixup for PCIe class register */ + value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); + value &= 0xff; + value |= (PCI_CLASS_BRIDGE_PCI << 16); + csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); + /* setup MSI hardware registers */ mobiveil_pcie_enable_msi(pcie); - return err; + return 0; } static void mobiveil_mask_intx_irq(struct irq_data *data) @@ -574,7 +669,8 @@ static void mobiveil_mask_intx_irq(struct irq_data *data) mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); - csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB); + shifted_val &= ~mask; + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); } @@ -589,7 +685,8 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data) mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); - csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB); + shifted_val |= mask; + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); } @@ -603,10 +700,11 @@ static struct irq_chip intx_irq_chip = { /* routine to setup the INTx related data */ static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) + irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); + return 0; } @@ -623,7 +721,7 @@ static struct irq_chip mobiveil_msi_irq_chip = { static struct msi_domain_info mobiveil_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), + MSI_FLAG_PCI_MSIX), .chip = &mobiveil_msi_irq_chip, }; @@ -641,7 +739,7 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) } static int mobiveil_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) + const struct cpumask *mask, bool force) { return -EINVAL; } @@ -653,7 +751,8 @@ static struct irq_chip mobiveil_msi_bottom_irq_chip = { }; static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs, void *args) + unsigned int virq, + unsigned int nr_irqs, void *args) { struct mobiveil_pcie *pcie = domain->host_data; struct mobiveil_msi *msi = &pcie->msi; @@ -673,13 +772,13 @@ static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, mutex_unlock(&msi->lock); irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip, - domain->host_data, handle_level_irq, - NULL, NULL); + domain->host_data, handle_level_irq, NULL, NULL); return 0; } static void mobiveil_irq_msi_domain_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) + unsigned int virq, + unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d); @@ -687,12 +786,11 @@ static void mobiveil_irq_msi_domain_free(struct irq_domain *domain, mutex_lock(&msi->lock); - if (!test_bit(d->hwirq, msi->msi_irq_in_use)) { + if (!test_bit(d->hwirq, msi->msi_irq_in_use)) dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n", d->hwirq); - } else { + else __clear_bit(d->hwirq, msi->msi_irq_in_use); - } mutex_unlock(&msi->lock); } @@ -716,12 +814,14 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) } msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &mobiveil_msi_domain_info, msi->dev_domain); + &mobiveil_msi_domain_info, + msi->dev_domain); if (!msi->msi_domain) { dev_err(dev, "failed to create MSI domain\n"); irq_domain_remove(msi->dev_domain); return -ENOMEM; } + return 0; } @@ -732,12 +832,12 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) int ret; /* setup INTx */ - pcie->intx_domain = irq_domain_add_linear(node, - PCI_NUM_INTX, &intx_domain_ops, pcie); + pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX, + &intx_domain_ops, pcie); if (!pcie->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return -ENODEV; + return -ENOMEM; } raw_spin_lock_init(&pcie->intx_mask_lock); @@ -763,11 +863,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) /* allocate the PCIe port */ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) - return -ENODEV; + return -ENOMEM; pcie = pci_host_bridge_priv(bridge); - if (!pcie) - return -ENOMEM; pcie->pdev = pdev; @@ -784,7 +882,7 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) &pcie->resources, &iobase); if (ret) { dev_err(dev, "Getting bridge resources failed\n"); - return -ENOMEM; + return ret; } /* @@ -797,9 +895,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) goto error; } - /* fixup for PCIe class register */ - csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS); - /* initialize the IRQ domains */ ret = mobiveil_pcie_init_irq_domain(pcie); if (ret) { @@ -807,6 +902,8 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) goto error; } + irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie); + ret = devm_request_pci_bus_resources(dev, &pcie->resources); if (ret) goto error; @@ -820,6 +917,12 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) bridge->map_irq = of_irq_parse_and_map_pci; bridge->swizzle_irq = pci_common_swizzle; + ret = mobiveil_bringup_link(pcie); + if (ret) { + dev_info(dev, "link bring-up failed\n"); + goto error; + } + /* setup the kernel resources for the newly added PCIe root bus */ ret = pci_scan_root_bus_bridge(bridge); if (ret) @@ -848,10 +951,10 @@ MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match); static struct platform_driver mobiveil_pcie_driver = { .probe = mobiveil_pcie_probe, .driver = { - .name = "mobiveil-pcie", - .of_match_table = mobiveil_pcie_of_match, - .suppress_bind_attrs = true, - }, + .name = "mobiveil-pcie", + .of_match_table = mobiveil_pcie_of_match, + .suppress_bind_attrs = true, + }, }; builtin_platform_driver(mobiveil_pcie_driver); diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index c8febb009454..f6a669a9af41 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c @@ -46,14 +46,15 @@ /* Transfer control */ #define PCIETCTLR 0x02000 -#define CFINIT 1 +#define DL_DOWN BIT(3) +#define CFINIT BIT(0) #define PCIETSTR 0x02004 -#define DATA_LINK_ACTIVE 1 +#define DATA_LINK_ACTIVE BIT(0) #define PCIEERRFR 0x02020 #define UNSUPPORTED_REQUEST BIT(4) #define PCIEMSIFR 0x02044 #define PCIEMSIALR 0x02048 -#define MSIFE 1 +#define MSIFE BIT(0) #define PCIEMSIAUR 0x0204c #define PCIEMSIIER 0x02050 @@ -94,6 +95,7 @@ #define MACCTLR 0x011058 #define SPEED_CHANGE BIT(24) #define SCRAMBLE_DISABLE BIT(27) +#define PMSR 0x01105c #define MACS2R 0x011078 #define MACCGSPSETR 0x011084 #define SPCNGRSN BIT(31) @@ -152,14 +154,13 @@ struct rcar_pcie { struct rcar_msi msi; }; -static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val, - unsigned long reg) +static void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, + unsigned int reg) { writel(val, pcie->base + reg); } -static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie, - unsigned long reg) +static u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg) { return readl(pcie->base + reg); } @@ -171,7 +172,7 @@ enum { static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) { - int shift = 8 * (where & 3); + unsigned int shift = BITS_PER_BYTE * (where & 3); u32 val = rcar_pci_read_reg(pcie, where & ~3); val &= ~(mask << shift); @@ -181,7 +182,7 @@ static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) { - int shift = 8 * (where & 3); + unsigned int shift = BITS_PER_BYTE * (where & 3); u32 val = rcar_pci_read_reg(pcie, where & ~3); return val >> shift; @@ -192,7 +193,7 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie, unsigned char access_type, struct pci_bus *bus, unsigned int devfn, int where, u32 *data) { - int dev, func, reg, index; + unsigned int dev, func, reg, index; dev = PCI_SLOT(devfn); func = PCI_FUNC(devfn); @@ -281,12 +282,12 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, } if (size == 1) - *val = (*val >> (8 * (where & 3))) & 0xff; + *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff; else if (size == 2) - *val = (*val >> (8 * (where & 2))) & 0xffff; + *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff; - dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", - bus->number, devfn, where, size, (unsigned long)*val); + dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", + bus->number, devfn, where, size, *val); return ret; } @@ -296,23 +297,24 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct rcar_pcie *pcie = bus->sysdata; - int shift, ret; + unsigned int shift; u32 data; + int ret; ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, bus, devfn, where, &data); if (ret != PCIBIOS_SUCCESSFUL) return ret; - dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", - bus->number, devfn, where, size, (unsigned long)val); + dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", + bus->number, devfn, where, size, val); if (size == 1) { - shift = 8 * (where & 3); + shift = BITS_PER_BYTE * (where & 3); data &= ~(0xff << shift); data |= ((val & 0xff) << shift); } else if (size == 2) { - shift = 8 * (where & 2); + shift = BITS_PER_BYTE * (where & 2); data &= ~(0xffff << shift); data |= ((val & 0xffff) << shift); } else @@ -507,10 +509,10 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie) } static void phy_write_reg(struct rcar_pcie *pcie, - unsigned int rate, unsigned int addr, - unsigned int lane, unsigned int data) + unsigned int rate, u32 addr, + unsigned int lane, u32 data) { - unsigned long phyaddr; + u32 phyaddr; phyaddr = WRITE_CMD | ((rate & 1) << RATE_POS) | @@ -738,15 +740,15 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) while (reg) { unsigned int index = find_first_bit(®, 32); - unsigned int irq; + unsigned int msi_irq; /* clear the interrupt */ rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); - irq = irq_find_mapping(msi->domain, index); - if (irq) { + msi_irq = irq_find_mapping(msi->domain, index); + if (msi_irq) { if (test_bit(index, msi->used)) - generic_handle_irq(irq); + generic_handle_irq(msi_irq); else dev_info(dev, "unhandled MSI\n"); } else { @@ -890,7 +892,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) { struct device *dev = pcie->dev; struct rcar_msi *msi = &pcie->msi; - unsigned long base; + phys_addr_t base; int err, i; mutex_init(&msi->lock); @@ -929,10 +931,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) /* setup MSI data target */ msi->pages = __get_free_pages(GFP_KERNEL, 0); + if (!msi->pages) { + err = -ENOMEM; + goto err; + } base = virt_to_phys((void *)msi->pages); - rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); - rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); + rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR); + rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR); /* enable all MSI interrupts */ rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); @@ -1118,7 +1124,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rcar_pcie *pcie; - unsigned int data; + u32 data; int err; int (*phy_init_fn)(struct rcar_pcie *); struct pci_host_bridge *bridge; @@ -1130,6 +1136,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; + platform_set_drvdata(pdev, pcie); err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL); if (err) @@ -1221,10 +1228,28 @@ err_free_bridge: return err; } +static int rcar_pcie_resume_noirq(struct device *dev) +{ + struct rcar_pcie *pcie = dev_get_drvdata(dev); + + if (rcar_pci_read_reg(pcie, PMSR) && + !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN)) + return 0; + + /* Re-establish the PCIe link */ + rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); + return rcar_pcie_wait_for_dl(pcie); +} + +static const struct dev_pm_ops rcar_pcie_pm_ops = { + .resume_noirq = rcar_pcie_resume_noirq, +}; + static struct platform_driver rcar_pcie_driver = { .driver = { .name = "rcar-pcie", .of_match_table = rcar_pcie_of_match, + .pm = &rcar_pcie_pm_ops, .suppress_bind_attrs = true, }, .probe = rcar_pcie_probe, diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index a5d799e2dff2..d743b0a48988 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -350,7 +350,7 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, struct rockchip_pcie *rockchip = &ep->rockchip; u32 r = ep->max_regions - 1; u32 offset; - u16 status; + u32 status; u8 msg_code; if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR || diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 1372d270764f..ef8e677ce9d1 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); if (IS_ERR(rockchip->vpcie12v)) { - if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie12v) != -ENODEV) + return PTR_ERR(rockchip->vpcie12v); dev_info(dev, "no vpcie12v regulator found\n"); } rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); if (IS_ERR(rockchip->vpcie3v3)) { - if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV) + return PTR_ERR(rockchip->vpcie3v3); dev_info(dev, "no vpcie3v3 regulator found\n"); } rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); if (IS_ERR(rockchip->vpcie1v8)) { - if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV) + return PTR_ERR(rockchip->vpcie1v8); dev_info(dev, "no vpcie1v8 regulator found\n"); } rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); if (IS_ERR(rockchip->vpcie0v9)) { - if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV) + return PTR_ERR(rockchip->vpcie0v9); dev_info(dev, "no vpcie0v9 regulator found\n"); } @@ -724,6 +724,7 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, &intx_domain_ops, rockchip); + of_node_put(intc); if (!rockchip->irq_domain) { dev_err(dev, "failed to get a INTx IRQ domain\n"); return -EINVAL; diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 81538d77f790..45c0f344ccd1 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -438,11 +438,10 @@ static const struct irq_domain_ops legacy_domain_ops = { #ifdef CONFIG_PCI_MSI static struct irq_chip nwl_msi_irq_chip = { .name = "nwl_pcie:msi", - .irq_enable = unmask_msi_irq, - .irq_disable = mask_msi_irq, - .irq_mask = mask_msi_irq, - .irq_unmask = unmask_msi_irq, - + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, }; static struct msi_domain_info nwl_msi_domain_info = { @@ -483,15 +482,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, int i; mutex_lock(&msi->lock); - bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0, - nr_irqs, 0); - if (bit >= INT_PCI_MSI_NR) { + bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR, + get_count_order(nr_irqs)); + if (bit < 0) { mutex_unlock(&msi->lock); return -ENOSPC; } - bitmap_set(msi->bitmap, bit, nr_irqs); - for (i = 0; i < nr_irqs; i++) { irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, domain->host_data, handle_simple_irq, @@ -509,7 +506,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, struct nwl_msi *msi = &pcie->msi; mutex_lock(&msi->lock); - bitmap_clear(msi->bitmap, data->hwirq, nr_irqs); + bitmap_release_region(msi->bitmap, data->hwirq, + get_count_order(nr_irqs)); mutex_unlock(&msi->lock); } diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 9bd1a35cd5d8..5bf3af3b28e6 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -336,14 +336,19 @@ static const struct irq_domain_ops msi_domain_ops = { * xilinx_pcie_enable_msi - Enable MSI support * @port: PCIe port information */ -static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) +static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) { phys_addr_t msg_addr; port->msi_pages = __get_free_pages(GFP_KERNEL, 0); + if (!port->msi_pages) + return -ENOMEM; + msg_addr = virt_to_phys((void *)port->msi_pages); pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); + + return 0; } /* INTx Functions */ @@ -498,6 +503,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) struct device *dev = port->dev; struct device_node *node = dev->of_node; struct device_node *pcie_intc_node; + int ret; /* Setup INTx */ pcie_intc_node = of_get_next_child(node, NULL); @@ -526,7 +532,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) return -ENODEV; } - xilinx_pcie_enable_msi(port); + ret = xilinx_pcie_enable_msi(port); + if (ret) + return ret; } return 0; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index cf6816b55b5e..a35d3f3996d7 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -31,6 +31,9 @@ #define PCI_REG_VMLOCK 0x70 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) +#define MB2_SHADOW_OFFSET 0x2000 +#define MB2_SHADOW_SIZE 16 + enum vmd_features { /* * Device may contain registers which hint the physical location of the @@ -94,11 +97,10 @@ struct vmd_dev { struct resource resources[3]; struct irq_domain *irq_domain; struct pci_bus *bus; + u8 busn_start; -#ifdef CONFIG_X86_DEV_DMA_OPS struct dma_map_ops dma_ops; struct dma_domain dma_domain; -#endif }; static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) @@ -293,7 +295,6 @@ static struct msi_domain_info vmd_msi_domain_info = { .chip = &vmd_msi_controller, }; -#ifdef CONFIG_X86_DEV_DMA_OPS /* * VMD replaces the requester ID with its own. DMA mappings for devices in a * VMD domain need to be mapped for the VMD, not the device requiring @@ -438,16 +439,13 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd) add_dma_domain(domain); } #undef ASSIGN_VMD_DMA_OPS -#else -static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} -static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} -#endif static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, unsigned int devfn, int reg, int len) { char __iomem *addr = vmd->cfgbar + - (bus->number << 20) + (devfn << 12) + reg; + ((bus->number - vmd->busn_start) << 20) + + (devfn << 12) + reg; if ((addr - vmd->cfgbar) + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR])) @@ -570,7 +568,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) unsigned long flags; LIST_HEAD(resources); resource_size_t offset[2] = {0}; - resource_size_t membar2_offset = 0x2000, busn_start = 0; + resource_size_t membar2_offset = 0x2000; struct pci_bus *child; /* @@ -583,7 +581,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) u32 vmlock; int ret; - membar2_offset = 0x2018; + membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); if (ret || vmlock == ~0) return -ENODEV; @@ -595,9 +593,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) if (!membar2) return -ENOMEM; offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - - readq(membar2 + 0x2008); + readq(membar2 + MB2_SHADOW_OFFSET); offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - - readq(membar2 + 0x2010); + readq(membar2 + MB2_SHADOW_OFFSET + 8); pci_iounmap(vmd->dev, membar2); } } @@ -613,14 +611,14 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); if (BUS_RESTRICT_CAP(vmcap) && (BUS_RESTRICT_CFG(vmconfig) == 0x1)) - busn_start = 128; + vmd->busn_start = 128; } res = &vmd->dev->resource[VMD_CFGBAR]; vmd->resources[0] = (struct resource) { .name = "VMD CFGBAR", - .start = busn_start, - .end = busn_start + (resource_size(res) >> 20) - 1, + .start = vmd->busn_start, + .end = vmd->busn_start + (resource_size(res) >> 20) - 1, .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, }; @@ -634,7 +632,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) * 32-bit resources. __pci_assign_resource() enforces that * artificial restriction to make sure everything will fit. * - * The only way we could use a 64-bit non-prefechable MEMBAR is + * The only way we could use a 64-bit non-prefetchable MEMBAR is * if its address is <4GB so that we can convert it to a 32-bit * resource. To be visible to the host OS, all VMD endpoints must * be initially configured by platform BIOS, which includes setting @@ -688,8 +686,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); - vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops, - sd, &resources); + vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start, + &vmd_ops, sd, &resources); if (!vmd->bus) { pci_free_resource_list(&resources); irq_domain_remove(vmd->irq_domain); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index d0b91da49bf4..1cfe3687a211 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -381,15 +381,15 @@ static void pci_epf_test_unbind(struct pci_epf *epf) epf_bar = &epf->bar[bar]; if (epf_test->reg[bar]) { - pci_epf_free_space(epf, epf_test->reg[bar], bar); pci_epc_clear_bar(epc, epf->func_no, epf_bar); + pci_epf_free_space(epf, epf_test->reg[bar], bar); } } } static int pci_epf_test_set_bar(struct pci_epf *epf) { - int bar; + int bar, add; int ret; struct pci_epf_bar *epf_bar; struct pci_epc *epc = epf->epc; @@ -400,8 +400,14 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) epc_features = epf_test->epc_features; - for (bar = BAR_0; bar <= BAR_5; bar++) { + for (bar = BAR_0; bar <= BAR_5; bar += add) { epf_bar = &epf->bar[bar]; + /* + * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64 + * if the specific implementation required a 64-bit BAR, + * even if we only requested a 32-bit BAR. + */ + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; if (!!(epc_features->reserved_bar & (1 << bar))) continue; @@ -413,13 +419,6 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) if (bar == test_reg_bar) return ret; } - /* - * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64 - * if the specific implementation required a 64-bit BAR, - * even if we only requested a 32-bit BAR. - */ - if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) - bar++; } return 0; @@ -431,35 +430,42 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) struct device *dev = &epf->dev; struct pci_epf_bar *epf_bar; void *base; - int bar; + int bar, add; enum pci_barno test_reg_bar = epf_test->test_reg_bar; const struct pci_epc_features *epc_features; + size_t test_reg_size; epc_features = epf_test->epc_features; - base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), - test_reg_bar); + if (epc_features->bar_fixed_size[test_reg_bar]) + test_reg_size = bar_size[test_reg_bar]; + else + test_reg_size = sizeof(struct pci_epf_test_reg); + + base = pci_epf_alloc_space(epf, test_reg_size, + test_reg_bar, epc_features->align); if (!base) { dev_err(dev, "Failed to allocated register space\n"); return -ENOMEM; } epf_test->reg[test_reg_bar] = base; - for (bar = BAR_0; bar <= BAR_5; bar++) { + for (bar = BAR_0; bar <= BAR_5; bar += add) { epf_bar = &epf->bar[bar]; + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; + if (bar == test_reg_bar) continue; if (!!(epc_features->reserved_bar & (1 << bar))) continue; - base = pci_epf_alloc_space(epf, bar_size[bar], bar); + base = pci_epf_alloc_space(epf, bar_size[bar], bar, + epc_features->align); if (!base) dev_err(dev, "Failed to allocate space for BAR%d\n", bar); epf_test->reg[bar] = base; - if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) - bar++; } return 0; @@ -591,6 +597,11 @@ static int __init pci_epf_test_init(void) kpcitest_workqueue = alloc_workqueue("kpcitest", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (!kpcitest_workqueue) { + pr_err("Failed to allocate the kpcitest work queue\n"); + return -ENOMEM; + } + ret = pci_epf_register_driver(&test_driver); if (ret) { pr_err("Failed to register pci epf test driver --> %d\n", ret); diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index e4712a0f249c..2091508c1620 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -519,11 +519,12 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf) { unsigned long flags; - if (!epc || IS_ERR(epc)) + if (!epc || IS_ERR(epc) || !epf) return; spin_lock_irqsave(&epc->lock, flags); list_del(&epf->list); + epf->epc = NULL; spin_unlock_irqrestore(&epc->lock, flags); } EXPORT_SYMBOL_GPL(pci_epc_remove_epf); diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 8bfdcd291196..fb1306de8f40 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -109,10 +109,12 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space); * pci_epf_alloc_space() - allocate memory for the PCI EPF register space * @size: the size of the memory that has to be allocated * @bar: the BAR number corresponding to the allocated register space + * @align: alignment size for the allocation region * * Invoke to allocate memory for the PCI EPF register space. */ -void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) +void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, + size_t align) { void *space; struct device *dev = epf->epc->dev.parent; @@ -120,7 +122,11 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) if (size < 128) size = 128; - size = roundup_pow_of_two(size); + + if (align) + size = ALIGN(size, align); + else + size = roundup_pow_of_two(size); space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); if (!space) { diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index e9f78eb390d2..e7b493c22bf3 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -147,15 +147,6 @@ config HOTPLUG_PCI_RPA_DLPAR When in doubt, say N. -config HOTPLUG_PCI_SGI - tristate "SGI PCI Hotplug Support" - depends on IA64_SGI_SN2 || IA64_GENERIC - help - Say Y here if you want to use the SGI Altix Hotplug - Driver for PCI devices. - - When in doubt, say N. - config HOTPLUG_PCI_S390 bool "System z PCI Hotplug Support" depends on S390 && 64BIT diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 7e3331603714..5196983220df 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile @@ -18,7 +18,6 @@ obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o obj-$(CONFIG_HOTPLUG_PCI_POWERNV) += pnv-php.o obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o -obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index 603eadf3d965..d0559d2faf50 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -563,7 +563,6 @@ cleanup_slots(void) } cleanup_null: up_write(&list_rwsem); - return; } int diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 16bbb183695a..b8aacb41a83c 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -173,7 +173,6 @@ static void pci_print_IRQ_route(void) dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot); } - return; } diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index b7f4e1f099d9..68de958a9be8 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -1872,8 +1872,6 @@ static void interrupt_event_handler(struct controller *ctrl) } } /* End of FOR loop */ } - - return; } @@ -1943,8 +1941,6 @@ void cpqhp_pushbutton_thread(struct timer_list *t) p_slot->state = STATIC_STATE; } - - return; } diff --git a/drivers/pci/hotplug/cpqphp_nvram.h b/drivers/pci/hotplug/cpqphp_nvram.h index 918ff8dbfe62..70e879b6a23f 100644 --- a/drivers/pci/hotplug/cpqphp_nvram.h +++ b/drivers/pci/hotplug/cpqphp_nvram.h @@ -16,10 +16,7 @@ #ifndef CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM -static inline void compaq_nvram_init(void __iomem *rom_start) -{ - return; -} +static inline void compaq_nvram_init(void __iomem *rom_start) { } static inline int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl) { diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c index 5e8caf7a4452..5c93aa14f0de 100644 --- a/drivers/pci/hotplug/ibmphp_res.c +++ b/drivers/pci/hotplug/ibmphp_res.c @@ -1941,6 +1941,7 @@ static int __init update_bridge_ranges(struct bus_node **bus) break; case PCI_HEADER_TYPE_BRIDGE: function = 0x8; + /* fall through */ case PCI_HEADER_TYPE_MULTIBRIDGE: /* We assume here that only 1 bus behind the bridge TO DO: add functionality for several: diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 506e1d923a1f..654c972b8ea0 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -25,36 +25,21 @@ #include "../pcie/portdrv.h" -#define MY_NAME "pciehp" - extern bool pciehp_poll_mode; extern int pciehp_poll_time; -extern bool pciehp_debug; - -#define dbg(format, arg...) \ -do { \ - if (pciehp_debug) \ - printk(KERN_DEBUG "%s: " format, MY_NAME, ## arg); \ -} while (0) -#define err(format, arg...) \ - printk(KERN_ERR "%s: " format, MY_NAME, ## arg) -#define info(format, arg...) \ - printk(KERN_INFO "%s: " format, MY_NAME, ## arg) -#define warn(format, arg...) \ - printk(KERN_WARNING "%s: " format, MY_NAME, ## arg) +/* + * Set CONFIG_DYNAMIC_DEBUG=y and boot with 'dyndbg="file pciehp* +p"' to + * enable debug messages. + */ #define ctrl_dbg(ctrl, format, arg...) \ - do { \ - if (pciehp_debug) \ - dev_printk(KERN_DEBUG, &ctrl->pcie->device, \ - format, ## arg); \ - } while (0) + pci_dbg(ctrl->pcie->port, format, ## arg) #define ctrl_err(ctrl, format, arg...) \ - dev_err(&ctrl->pcie->device, format, ## arg) + pci_err(ctrl->pcie->port, format, ## arg) #define ctrl_info(ctrl, format, arg...) \ - dev_info(&ctrl->pcie->device, format, ## arg) + pci_info(ctrl->pcie->port, format, ## arg) #define ctrl_warn(ctrl, format, arg...) \ - dev_warn(&ctrl->pcie->device, format, ## arg) + pci_warn(ctrl->pcie->port, format, ## arg) #define SLOT_NAME_SIZE 10 @@ -125,9 +110,9 @@ struct controller { * * @OFF_STATE: slot is powered off, no subordinate devices are enumerated * @BLINKINGON_STATE: slot will be powered on after the 5 second delay, - * green led is blinking + * Power Indicator is blinking * @BLINKINGOFF_STATE: slot will be powered off after the 5 second delay, - * green led is blinking + * Power Indicator is blinking * @POWERON_STATE: slot is currently powering on * @POWEROFF_STATE: slot is currently powering off * @ON_STATE: slot is powered on, subordinate devices have been enumerated @@ -182,12 +167,11 @@ int pciehp_power_on_slot(struct controller *ctrl); void pciehp_power_off_slot(struct controller *ctrl); void pciehp_get_power_status(struct controller *ctrl, u8 *status); -void pciehp_set_attention_status(struct controller *ctrl, u8 status); +#define INDICATOR_NOOP -1 /* Leave indicator unchanged */ +void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn); + void pciehp_get_latch_status(struct controller *ctrl, u8 *status); int pciehp_query_power_fault(struct controller *ctrl); -void pciehp_green_led_on(struct controller *ctrl); -void pciehp_green_led_off(struct controller *ctrl); -void pciehp_green_led_blink(struct controller *ctrl); bool pciehp_card_present(struct controller *ctrl); bool pciehp_card_present_or_link_active(struct controller *ctrl); int pciehp_check_link_status(struct controller *ctrl); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index fc5366b50e95..b3122c151b80 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -17,6 +17,9 @@ * Dely Sy <[email protected]>" */ +#define pr_fmt(fmt) "pciehp: " fmt +#define dev_fmt pr_fmt + #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -27,7 +30,6 @@ #include "../pci.h" /* Global variables */ -bool pciehp_debug; bool pciehp_poll_mode; int pciehp_poll_time; @@ -35,15 +37,11 @@ int pciehp_poll_time; * not really modular, but the easiest way to keep compat with existing * bootargs behaviour is to continue using module_param here. */ -module_param(pciehp_debug, bool, 0644); module_param(pciehp_poll_mode, bool, 0644); module_param(pciehp_poll_time, int, 0644); -MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); -#define PCIE_MODULE_NAME "pciehp" - static int set_attention_status(struct hotplug_slot *slot, u8 value); static int get_power_status(struct hotplug_slot *slot, u8 *value); static int get_latch_status(struct hotplug_slot *slot, u8 *value); @@ -97,15 +95,20 @@ static void cleanup_slot(struct controller *ctrl) } /* - * set_attention_status - Turns the Amber LED for a slot on, off or blink + * set_attention_status - Turns the Attention Indicator on, off or blinking */ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl->pcie->port; + if (status) + status <<= PCI_EXP_SLTCTL_ATTN_IND_SHIFT; + else + status = PCI_EXP_SLTCTL_ATTN_IND_OFF; + pci_config_pm_runtime_get(pdev); - pciehp_set_attention_status(ctrl, status); + pciehp_set_indicators(ctrl, INDICATOR_NOOP, status); pci_config_pm_runtime_put(pdev); return 0; } @@ -182,14 +185,14 @@ static int pciehp_probe(struct pcie_device *dev) if (!dev->port->subordinate) { /* Can happen if we run out of bus numbers during probe */ - dev_err(&dev->device, + pci_err(dev->port, "Hotplug bridge without secondary bus, ignoring\n"); return -ENODEV; } ctrl = pcie_init(dev); if (!ctrl) { - dev_err(&dev->device, "Controller initialization failed\n"); + pci_err(dev->port, "Controller initialization failed\n"); return -ENODEV; } set_service_data(dev, ctrl); @@ -307,7 +310,7 @@ static int pciehp_runtime_resume(struct pcie_device *dev) #endif /* PM */ static struct pcie_port_service_driver hpdriver_portdrv = { - .name = PCIE_MODULE_NAME, + .name = "pciehp", .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_HP, @@ -328,9 +331,9 @@ int __init pcie_hp_init(void) int retval = 0; retval = pcie_port_service_register(&hpdriver_portdrv); - dbg("pcie_port_service_register = %d\n", retval); + pr_debug("pcie_port_service_register = %d\n", retval); if (retval) - dbg("Failure to register service\n"); + pr_debug("Failure to register service\n"); return retval; } diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 3f3df4c29f6e..21af7b16d7a4 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -13,6 +13,8 @@ * */ +#define dev_fmt(fmt) "pciehp: " fmt + #include <linux/kernel.h> #include <linux/types.h> #include <linux/pm_runtime.h> @@ -28,7 +30,10 @@ static void set_slot_off(struct controller *ctrl) { - /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ + /* + * Turn off slot, turn on attention indicator, turn off power + * indicator + */ if (POWER_CTRL(ctrl)) { pciehp_power_off_slot(ctrl); @@ -40,8 +45,8 @@ static void set_slot_off(struct controller *ctrl) msleep(1000); } - pciehp_green_led_off(ctrl); - pciehp_set_attention_status(ctrl, 1); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + PCI_EXP_SLTCTL_ATTN_IND_ON); } /** @@ -63,7 +68,8 @@ static int board_added(struct controller *ctrl) return retval; } - pciehp_green_led_blink(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, + INDICATOR_NOOP); /* Check link training status */ retval = pciehp_check_link_status(ctrl); @@ -88,8 +94,8 @@ static int board_added(struct controller *ctrl) } } - pciehp_green_led_on(ctrl); - pciehp_set_attention_status(ctrl, 0); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, + PCI_EXP_SLTCTL_ATTN_IND_OFF); return 0; err_exit: @@ -98,7 +104,7 @@ err_exit: } /** - * remove_board - Turns off slot and LEDs + * remove_board - Turn off slot and Power Indicator * @ctrl: PCIe hotplug controller where board is being removed * @safe_removal: whether the board is safely removed (versus surprise removed) */ @@ -115,10 +121,14 @@ static void remove_board(struct controller *ctrl, bool safe_removal) * removed from the slot/adapter. */ msleep(1000); + + /* Ignore link or presence changes caused by power off */ + atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC), + &ctrl->pending_events); } - /* turn off Green LED */ - pciehp_green_led_off(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + INDICATOR_NOOP); } static int pciehp_enable_slot(struct controller *ctrl); @@ -165,9 +175,9 @@ void pciehp_handle_button_press(struct controller *ctrl) ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n", slot_name(ctrl)); } - /* blink green LED and turn off amber */ - pciehp_green_led_blink(ctrl); - pciehp_set_attention_status(ctrl, 0); + /* blink power indicator and turn off attention */ + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, + PCI_EXP_SLTCTL_ATTN_IND_OFF); schedule_delayed_work(&ctrl->button_work, 5 * HZ); break; case BLINKINGOFF_STATE: @@ -181,12 +191,13 @@ void pciehp_handle_button_press(struct controller *ctrl) cancel_delayed_work(&ctrl->button_work); if (ctrl->state == BLINKINGOFF_STATE) { ctrl->state = ON_STATE; - pciehp_green_led_on(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, + PCI_EXP_SLTCTL_ATTN_IND_OFF); } else { ctrl->state = OFF_STATE; - pciehp_green_led_off(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + PCI_EXP_SLTCTL_ATTN_IND_OFF); } - pciehp_set_attention_status(ctrl, 0); ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n", slot_name(ctrl)); break; @@ -304,7 +315,9 @@ static int pciehp_enable_slot(struct controller *ctrl) pm_runtime_get_sync(&ctrl->pcie->port->dev); ret = __pciehp_enable_slot(ctrl); if (ret && ATTN_BUTTN(ctrl)) - pciehp_green_led_off(ctrl); /* may be blinking */ + /* may be blinking */ + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + INDICATOR_NOOP); pm_runtime_put(&ctrl->pcie->port->dev); mutex_lock(&ctrl->state_lock); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 6a2365cd794e..1a522c1c4177 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -12,6 +12,8 @@ * Send feedback to <[email protected]>,<[email protected]> */ +#define dev_fmt(fmt) "pciehp: " fmt + #include <linux/kernel.h> #include <linux/types.h> #include <linux/jiffies.h> @@ -46,7 +48,7 @@ static inline int pciehp_request_irq(struct controller *ctrl) /* Installs the interrupt handler */ retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist, - IRQF_SHARED, MY_NAME, ctrl); + IRQF_SHARED, "pciehp", ctrl); if (retval) ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n", irq); @@ -232,8 +234,8 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn) delay -= step; } while (delay > 0); - if (count > 1 && pciehp_debug) - printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n", + if (count > 1) + pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), count, step, l); @@ -416,65 +418,40 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot, return 0; } -void pciehp_set_attention_status(struct controller *ctrl, u8 value) +/** + * pciehp_set_indicators() - set attention indicator, power indicator, or both + * @ctrl: PCIe hotplug controller + * @pwr: one of: + * PCI_EXP_SLTCTL_PWR_IND_ON + * PCI_EXP_SLTCTL_PWR_IND_BLINK + * PCI_EXP_SLTCTL_PWR_IND_OFF + * @attn: one of: + * PCI_EXP_SLTCTL_ATTN_IND_ON + * PCI_EXP_SLTCTL_ATTN_IND_BLINK + * PCI_EXP_SLTCTL_ATTN_IND_OFF + * + * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator + * unchanged. + */ +void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn) { - u16 slot_cmd; + u16 cmd = 0, mask = 0; - if (!ATTN_LED(ctrl)) - return; - - switch (value) { - case 0: /* turn off */ - slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF; - break; - case 1: /* turn on */ - slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON; - break; - case 2: /* turn blink */ - slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK; - break; - default: - return; + if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) { + cmd |= (pwr & PCI_EXP_SLTCTL_PIC); + mask |= PCI_EXP_SLTCTL_PIC; } - pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); -} - -void pciehp_green_led_on(struct controller *ctrl) -{ - if (!PWR_LED(ctrl)) - return; - - pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, - PCI_EXP_SLTCTL_PIC); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PWR_IND_ON); -} - -void pciehp_green_led_off(struct controller *ctrl) -{ - if (!PWR_LED(ctrl)) - return; - - pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, - PCI_EXP_SLTCTL_PIC); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PWR_IND_OFF); -} -void pciehp_green_led_blink(struct controller *ctrl) -{ - if (!PWR_LED(ctrl)) - return; + if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) { + cmd |= (attn & PCI_EXP_SLTCTL_AIC); + mask |= PCI_EXP_SLTCTL_AIC; + } - pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, - PCI_EXP_SLTCTL_PIC); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PWR_IND_BLINK); + if (cmd) { + pcie_write_cmd_nowait(ctrl, cmd, mask); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, + pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); + } } int pciehp_power_on_slot(struct controller *ctrl) @@ -636,8 +613,8 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { ctrl->power_fault_detected = 1; ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl)); - pciehp_set_attention_status(ctrl, 1); - pciehp_green_led_off(ctrl); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + PCI_EXP_SLTCTL_ATTN_IND_ON); } /* @@ -822,14 +799,11 @@ static inline void dbg_ctrl(struct controller *ctrl) struct pci_dev *pdev = ctrl->pcie->port; u16 reg16; - if (!pciehp_debug) - return; - - ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); + ctrl_dbg(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, ®16); - ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); + ctrl_dbg(ctrl, "Slot Status : 0x%04x\n", reg16); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, ®16); - ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); + ctrl_dbg(ctrl, "Slot Control : 0x%04x\n", reg16); } #define FLAG(x, y) (((x) & (y)) ? '+' : '-') diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index b9c1396db6fe..d17f3bf36f70 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -13,6 +13,8 @@ * */ +#define dev_fmt(fmt) "pciehp: " fmt + #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index 6758fd7c382e..d7b2b47bc33e 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -419,9 +419,21 @@ static int pnv_php_get_attention_state(struct hotplug_slot *slot, u8 *state) static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); + struct pci_dev *bridge = php_slot->pdev; + u16 new, mask; - /* FIXME: Make it real once firmware supports it */ php_slot->attention_state = state; + if (!bridge) + return 0; + + mask = PCI_EXP_SLTCTL_AIC; + + if (state) + new = PCI_EXP_SLTCTL_ATTN_IND_ON; + else + new = PCI_EXP_SLTCTL_ATTN_IND_OFF; + + pcie_capability_clear_and_set_word(bridge, PCI_EXP_SLTCTL, mask, new); return 0; } @@ -511,6 +523,37 @@ scan: return 0; } +static int pnv_php_reset_slot(struct hotplug_slot *slot, int probe) +{ + struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); + struct pci_dev *bridge = php_slot->pdev; + uint16_t sts; + + /* + * The CAPI folks want pnv_php to drive OpenCAPI slots + * which don't have a bridge. Only claim to support + * reset_slot() if we have a bridge device (for now...) + */ + if (probe) + return !bridge; + + /* mask our interrupt while resetting the bridge */ + if (php_slot->irq > 0) + disable_irq(php_slot->irq); + + pci_bridge_secondary_bus_reset(bridge); + + /* clear any state changes that happened due to the reset */ + pcie_capability_read_word(php_slot->pdev, PCI_EXP_SLTSTA, &sts); + sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); + pcie_capability_write_word(php_slot->pdev, PCI_EXP_SLTSTA, sts); + + if (php_slot->irq > 0) + enable_irq(php_slot->irq); + + return 0; +} + static int pnv_php_enable_slot(struct hotplug_slot *slot) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); @@ -548,6 +591,7 @@ static const struct hotplug_slot_ops php_slot_ops = { .set_attention_status = pnv_php_set_attention_state, .enable_slot = pnv_php_enable_slot, .disable_slot = pnv_php_disable_slot, + .reset_slot = pnv_php_reset_slot, }; static void pnv_php_release(struct pnv_php_slot *php_slot) @@ -721,6 +765,12 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts); sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts); + + pci_dbg(pdev, "PCI slot [%s]: HP int! DLAct: %d, PresDet: %d\n", + php_slot->name, + !!(sts & PCI_EXP_SLTSTA_DLLSC), + !!(sts & PCI_EXP_SLTSTA_PDC)); + if (sts & PCI_EXP_SLTSTA_DLLSC) { pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts); added = !!(lsts & PCI_EXP_LNKSTA_DLLLA); @@ -735,6 +785,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) added = !!(presence == OPAL_PCI_SLOT_PRESENT); } else { + pci_dbg(pdev, "PCI slot [%s]: Spurious IRQ?\n", php_slot->name); return IRQ_NONE; } @@ -955,6 +1006,9 @@ static int __init pnv_php_init(void) for_each_compatible_node(dn, NULL, "ibm,ioda2-phb") pnv_php_register(dn); + for_each_compatible_node(dn, NULL, "ibm,ioda3-phb") + pnv_php_register(dn); + return 0; } @@ -964,6 +1018,9 @@ static void __exit pnv_php_exit(void) for_each_compatible_node(dn, NULL, "ibm,ioda2-phb") pnv_php_unregister(dn); + + for_each_compatible_node(dn, NULL, "ibm,ioda3-phb") + pnv_php_unregister(dn); } module_init(pnv_php_init); diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index e2356a9c7088..977946e4e613 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name) if (rc == 0) break; } + of_node_put(parent); return dn; } @@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name, return np; } +/* Returns a device_node with its reference count incremented */ static struct device_node *find_dlpar_node(char *drc_name, int *node_type) { struct device_node *dn; @@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name) rc = dlpar_add_phb(drc_name, dn); break; } + of_node_put(dn); printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); exit: @@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name) rc = dlpar_remove_pci_slot(drc_name, dn); break; } + of_node_put(dn); vm_unmap_aliases(); printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); @@ -469,7 +473,6 @@ int __init rpadlpar_io_init(void) void rpadlpar_io_exit(void) { dlpar_sysfs_exit(); - return; } module_init(rpadlpar_io_init); diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index bcd5d357ca23..951f7f216fb3 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -154,11 +154,11 @@ static enum pci_bus_speed get_max_bus_speed(struct slot *slot) return speed; } -static int get_children_props(struct device_node *dn, const int **drc_indexes, - const int **drc_names, const int **drc_types, - const int **drc_power_domains) +static int get_children_props(struct device_node *dn, const __be32 **drc_indexes, + const __be32 **drc_names, const __be32 **drc_types, + const __be32 **drc_power_domains) { - const int *indexes, *names, *types, *domains; + const __be32 *indexes, *names, *types, *domains; indexes = of_get_property(dn, "ibm,drc-indexes", NULL); names = of_get_property(dn, "ibm,drc-names", NULL); @@ -194,8 +194,8 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name, char *drc_type, unsigned int my_index) { char *name_tmp, *type_tmp; - const int *indexes, *names; - const int *types, *domains; + const __be32 *indexes, *names; + const __be32 *types, *domains; int i, rc; rc = get_children_props(dn->parent, &indexes, &names, &types, &domains); @@ -208,7 +208,7 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name, /* Iterate through parent properties, looking for my-drc-index */ for (i = 0; i < be32_to_cpu(indexes[0]); i++) { - if ((unsigned int) indexes[i + 1] == my_index) + if (be32_to_cpu(indexes[i + 1]) == my_index) break; name_tmp += (strlen(name_tmp) + 1); @@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, struct of_drc_info drc; const __be32 *value; char cell_drc_name[MAX_DRC_NAME_LEN]; - int j, fndit; + int j; info = of_find_property(dn->parent, "ibm,drc-info", NULL); if (info == NULL) @@ -239,23 +239,22 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, value = of_prop_next_u32(info, NULL, &entries); if (!value) return -EINVAL; + else + value++; for (j = 0; j < entries; j++) { of_read_drc_info_cell(&info, &value, &drc); /* Should now know end of current entry */ - if (my_index > drc.last_drc_index) - continue; - - fndit = 1; - break; + /* Found it */ + if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) { + int index = my_index - drc.drc_index_start; + sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, + drc.drc_name_suffix_start + index); + break; + } } - /* Found it */ - - if (fndit) - sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, - my_index); if (((drc_name == NULL) || (drc_name && !strcmp(drc_name, cell_drc_name))) && @@ -269,7 +268,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, int rpaphp_check_drc_props(struct device_node *dn, char *drc_name, char *drc_type) { - const unsigned int *my_index; + const __be32 *my_index; my_index = of_get_property(dn, "ibm,my-drc-index", NULL); if (!my_index) { @@ -277,12 +276,12 @@ int rpaphp_check_drc_props(struct device_node *dn, char *drc_name, return -EINVAL; } - if (firmware_has_feature(FW_FEATURE_DRC_INFO)) + if (of_find_property(dn->parent, "ibm,drc-info", NULL)) return rpaphp_check_drc_props_v2(dn, drc_name, drc_type, - *my_index); + be32_to_cpu(*my_index)); else return rpaphp_check_drc_props_v1(dn, drc_name, drc_type, - *my_index); + be32_to_cpu(*my_index)); } EXPORT_SYMBOL_GPL(rpaphp_check_drc_props); @@ -313,10 +312,11 @@ static int is_php_type(char *drc_type) * for built-in pci slots (even when the built-in slots are * dlparable.) */ -static int is_php_dn(struct device_node *dn, const int **indexes, - const int **names, const int **types, const int **power_domains) +static int is_php_dn(struct device_node *dn, const __be32 **indexes, + const __be32 **names, const __be32 **types, + const __be32 **power_domains) { - const int *drc_types; + const __be32 *drc_types; int rc; rc = get_children_props(dn, indexes, names, &drc_types, power_domains); @@ -330,33 +330,55 @@ static int is_php_dn(struct device_node *dn, const int **indexes, return 1; } -/** - * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem. - * @dn: device node of slot - * - * This subroutine will register a hotpluggable slot with the - * PCI hotplug infrastructure. This routine is typically called - * during boot time, if the hotplug slots are present at boot time, - * or is called later, by the dlpar add code, if the slot is - * being dynamically added during runtime. - * - * If the device node points at an embedded (built-in) slot, this - * routine will just return without doing anything, since embedded - * slots cannot be hotplugged. - * - * To remove a slot, it suffices to call rpaphp_deregister_slot(). - */ -int rpaphp_add_slot(struct device_node *dn) +static int rpaphp_drc_info_add_slot(struct device_node *dn) { struct slot *slot; + struct property *info; + struct of_drc_info drc; + char drc_name[MAX_DRC_NAME_LEN]; + const __be32 *cur; + u32 count; int retval = 0; - int i; - const int *indexes, *names, *types, *power_domains; - char *name, *type; - if (!dn->name || strcmp(dn->name, "pci")) + info = of_find_property(dn, "ibm,drc-info", NULL); + if (!info) return 0; + cur = of_prop_next_u32(info, NULL, &count); + if (cur) + cur++; + else + return 0; + + of_read_drc_info_cell(&info, &cur, &drc); + if (!is_php_type(drc.drc_type)) + return 0; + + sprintf(drc_name, "%s%d", drc.drc_name_prefix, drc.drc_name_suffix_start); + + slot = alloc_slot_struct(dn, drc.drc_index_start, drc_name, drc.drc_power_domain); + if (!slot) + return -ENOMEM; + + slot->type = simple_strtoul(drc.drc_type, NULL, 10); + retval = rpaphp_enable_slot(slot); + if (!retval) + retval = rpaphp_register_slot(slot); + + if (retval) + dealloc_slot_struct(slot); + + return retval; +} + +static int rpaphp_drc_add_slot(struct device_node *dn) +{ + struct slot *slot; + int retval = 0; + int i; + const __be32 *indexes, *names, *types, *power_domains; + char *name, *type; + /* If this is not a hotplug slot, return without doing anything. */ if (!is_php_dn(dn, &indexes, &names, &types, &power_domains)) return 0; @@ -395,6 +417,33 @@ int rpaphp_add_slot(struct device_node *dn) /* XXX FIXME: reports a failure only if last entry in loop failed */ return retval; } + +/** + * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem. + * @dn: device node of slot + * + * This subroutine will register a hotpluggable slot with the + * PCI hotplug infrastructure. This routine is typically called + * during boot time, if the hotplug slots are present at boot time, + * or is called later, by the dlpar add code, if the slot is + * being dynamically added during runtime. + * + * If the device node points at an embedded (built-in) slot, this + * routine will just return without doing anything, since embedded + * slots cannot be hotplugged. + * + * To remove a slot, it suffices to call rpaphp_deregister_slot(). + */ +int rpaphp_add_slot(struct device_node *dn) +{ + if (!dn->name || strcmp(dn->name, "pci")) + return 0; + + if (of_find_property(dn, "ibm,drc-info", NULL)) + return rpaphp_drc_info_add_slot(dn); + else + return rpaphp_drc_add_slot(dn); +} EXPORT_SYMBOL_GPL(rpaphp_add_slot); static void __exit cleanup_slots(void) @@ -412,7 +461,6 @@ static void __exit cleanup_slots(void) pci_hp_deregister(&slot->hotplug_slot); dealloc_slot_struct(slot); } - return; } static int __init rpaphp_init(void) diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 5282aa3e33c5..93b4a945c55d 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -21,6 +21,7 @@ /* free up the memory used by a slot */ void dealloc_slot_struct(struct slot *slot) { + of_node_put(slot->dn); kfree(slot->name); kfree(slot); } @@ -36,7 +37,7 @@ struct slot *alloc_slot_struct(struct device_node *dn, slot->name = kstrdup(drc_name, GFP_KERNEL); if (!slot->name) goto error_slot; - slot->dn = dn; + slot->dn = of_node_get(dn); slot->index = drc_index; slot->power_domain = power_domain; slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops; diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c deleted file mode 100644 index 231f5bdd3d2d..000000000000 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ /dev/null @@ -1,700 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2005-2006 Silicon Graphics, Inc. All rights reserved. - * - * This work was based on the 2.4/2.6 kernel development by Dick Reigner. - * Work to add BIOS PROM support was completed by Mike Habeck. - */ - -#include <linux/acpi.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/pci_hotplug.h> -#include <linux/proc_fs.h> -#include <linux/slab.h> -#include <linux/types.h> -#include <linux/mutex.h> - -#include <asm/sn/addrs.h> -#include <asm/sn/geo.h> -#include <asm/sn/l1.h> -#include <asm/sn/module.h> -#include <asm/sn/pcibr_provider.h> -#include <asm/sn/pcibus_provider_defs.h> -#include <asm/sn/pcidev.h> -#include <asm/sn/sn_feature_sets.h> -#include <asm/sn/sn_sal.h> -#include <asm/sn/types.h> -#include <asm/sn/acpi.h> - -#include "../pci.h" - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver"); - - -/* SAL call error codes. Keep in sync with prom header io/include/pcibr.h */ -#define PCI_SLOT_ALREADY_UP 2 /* slot already up */ -#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */ -#define PCI_L1_ERR 7 /* L1 console command error */ -#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */ - - -#define PCIIO_ASIC_TYPE_TIOCA 4 -#define PCI_L1_QSIZE 128 /* our L1 message buffer size */ -#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */ -#define SN_SLOT_NAME_SIZE 33 /* size of name string */ - -/* internal list head */ -static struct list_head sn_hp_list; - -/* hotplug_slot struct's private pointer */ -struct slot { - int device_num; - struct pci_bus *pci_bus; - /* this struct for glue internal only */ - struct hotplug_slot hotplug_slot; - struct list_head hp_list; - char physical_path[SN_SLOT_NAME_SIZE]; -}; - -struct pcibr_slot_enable_resp { - int resp_sub_errno; - char resp_l1_msg[PCI_L1_QSIZE + 1]; -}; - -struct pcibr_slot_disable_resp { - int resp_sub_errno; - char resp_l1_msg[PCI_L1_QSIZE + 1]; -}; - -enum sn_pci_req_e { - PCI_REQ_SLOT_ELIGIBLE, - PCI_REQ_SLOT_DISABLE -}; - -static int enable_slot(struct hotplug_slot *slot); -static int disable_slot(struct hotplug_slot *slot); -static inline int get_power_status(struct hotplug_slot *slot, u8 *value); - -static const struct hotplug_slot_ops sn_hotplug_slot_ops = { - .enable_slot = enable_slot, - .disable_slot = disable_slot, - .get_power_status = get_power_status, -}; - -static DEFINE_MUTEX(sn_hotplug_mutex); - -static struct slot *to_slot(struct hotplug_slot *bss_hotplug_slot) -{ - return container_of(bss_hotplug_slot, struct slot, hotplug_slot); -} - -static ssize_t path_show(struct pci_slot *pci_slot, char *buf) -{ - int retval = -ENOENT; - struct slot *slot = to_slot(pci_slot->hotplug); - - if (!slot) - return retval; - - retval = sprintf(buf, "%s\n", slot->physical_path); - return retval; -} - -static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path); - -static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) -{ - struct pcibus_info *pcibus_info; - u16 busnum, segment, ioboard_type; - - pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus); - - /* Check to see if this is a valid slot on 'pci_bus' */ - if (!(pcibus_info->pbi_valid_devices & (1 << device))) - return -EPERM; - - ioboard_type = sn_ioboard_to_pci_bus(pci_bus); - busnum = pcibus_info->pbi_buscommon.bs_persist_busnum; - segment = pci_domain_nr(pci_bus) & 0xf; - - /* Do not allow hotplug operations on base I/O cards */ - if ((ioboard_type == L1_BRICKTYPE_IX || - ioboard_type == L1_BRICKTYPE_IA) && - (segment == 1 && busnum == 0 && device != 1)) - return -EPERM; - - return 1; -} - -static int sn_pci_bus_valid(struct pci_bus *pci_bus) -{ - struct pcibus_info *pcibus_info; - u32 asic_type; - u16 ioboard_type; - - /* Don't register slots hanging off the TIOCA bus */ - pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus); - asic_type = pcibus_info->pbi_buscommon.bs_asic_type; - if (asic_type == PCIIO_ASIC_TYPE_TIOCA) - return -EPERM; - - /* Only register slots in I/O Bricks that support hotplug */ - ioboard_type = sn_ioboard_to_pci_bus(pci_bus); - switch (ioboard_type) { - case L1_BRICKTYPE_IX: - case L1_BRICKTYPE_PX: - case L1_BRICKTYPE_IA: - case L1_BRICKTYPE_PA: - case L1_BOARDTYPE_PCIX3SLOT: - return 1; - break; - default: - return -EPERM; - break; - } - - return -EIO; -} - -static int sn_hp_slot_private_alloc(struct hotplug_slot **bss_hotplug_slot, - struct pci_bus *pci_bus, int device, - char *name) -{ - struct pcibus_info *pcibus_info; - struct slot *slot; - - pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus); - - slot = kzalloc(sizeof(*slot), GFP_KERNEL); - if (!slot) - return -ENOMEM; - - slot->device_num = device; - slot->pci_bus = pci_bus; - sprintf(name, "%04x:%02x:%02x", - pci_domain_nr(pci_bus), - ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum), - device + 1); - - sn_generate_path(pci_bus, slot->physical_path); - - list_add(&slot->hp_list, &sn_hp_list); - *bss_hotplug_slot = &slot->hotplug_slot; - - return 0; -} - -static struct hotplug_slot *sn_hp_destroy(void) -{ - struct slot *slot; - struct pci_slot *pci_slot; - struct hotplug_slot *bss_hotplug_slot = NULL; - - list_for_each_entry(slot, &sn_hp_list, hp_list) { - bss_hotplug_slot = &slot->hotplug_slot; - pci_slot = bss_hotplug_slot->pci_slot; - list_del(&slot->hp_list); - sysfs_remove_file(&pci_slot->kobj, - &sn_slot_path_attr.attr); - break; - } - return bss_hotplug_slot; -} - -static void sn_bus_free_data(struct pci_dev *dev) -{ - struct pci_bus *subordinate_bus; - struct pci_dev *child; - - /* Recursively clean up sn_irq_info structs */ - if (dev->subordinate) { - subordinate_bus = dev->subordinate; - list_for_each_entry(child, &subordinate_bus->devices, bus_list) - sn_bus_free_data(child); - } - /* - * Some drivers may use dma accesses during the - * driver remove function. We release the sysdata - * areas after the driver remove functions have - * been called. - */ - sn_bus_store_sysdata(dev); - sn_pci_unfixup_slot(dev); -} - -static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot, - int device_num, char **ssdt) -{ - struct slot *slot = to_slot(bss_hotplug_slot); - struct pcibus_info *pcibus_info; - struct pcibr_slot_enable_resp resp; - int rc; - - pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); - - /* - * Power-on and initialize the slot in the SN - * PCI infrastructure. - */ - rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp, ssdt); - - - if (rc == PCI_SLOT_ALREADY_UP) { - pci_dbg(slot->pci_bus->self, "is already active\n"); - return 1; /* return 1 to user */ - } - - if (rc == PCI_L1_ERR) { - pci_dbg(slot->pci_bus->self, "L1 failure %d with message: %s", - resp.resp_sub_errno, resp.resp_l1_msg); - return -EPERM; - } - - if (rc) { - pci_dbg(slot->pci_bus->self, "insert failed with error %d sub-error %d\n", - rc, resp.resp_sub_errno); - return -EIO; - } - - pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); - pcibus_info->pbi_enabled_devices |= (1 << device_num); - - return 0; -} - -static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot, - int device_num, int action) -{ - struct slot *slot = to_slot(bss_hotplug_slot); - struct pcibus_info *pcibus_info; - struct pcibr_slot_disable_resp resp; - int rc; - - pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); - - rc = sal_pcibr_slot_disable(pcibus_info, device_num, action, &resp); - - if ((action == PCI_REQ_SLOT_ELIGIBLE) && - (rc == PCI_SLOT_ALREADY_DOWN)) { - pci_dbg(slot->pci_bus->self, "Slot %s already inactive\n", slot->physical_path); - return 1; /* return 1 to user */ - } - - if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) { - pci_dbg(slot->pci_bus->self, "Cannot remove last 33MHz card\n"); - return -EPERM; - } - - if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) { - pci_dbg(slot->pci_bus->self, "L1 failure %d with message \n%s\n", - resp.resp_sub_errno, resp.resp_l1_msg); - return -EPERM; - } - - if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) { - pci_dbg(slot->pci_bus->self, "remove failed with error %d sub-error %d\n", - rc, resp.resp_sub_errno); - return -EIO; - } - - if ((action == PCI_REQ_SLOT_ELIGIBLE) && !rc) - return 0; - - if ((action == PCI_REQ_SLOT_DISABLE) && !rc) { - pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); - pcibus_info->pbi_enabled_devices &= ~(1 << device_num); - pci_dbg(slot->pci_bus->self, "remove successful\n"); - return 0; - } - - if ((action == PCI_REQ_SLOT_DISABLE) && rc) { - pci_dbg(slot->pci_bus->self, "remove failed rc = %d\n", rc); - } - - return rc; -} - -/* - * Power up and configure the slot via a SAL call to PROM. - * Scan slot (and any children), do any platform specific fixup, - * and find device driver. - */ -static int enable_slot(struct hotplug_slot *bss_hotplug_slot) -{ - struct slot *slot = to_slot(bss_hotplug_slot); - struct pci_bus *new_bus = NULL; - struct pci_dev *dev; - int num_funcs; - int new_ppb = 0; - int rc; - char *ssdt = NULL; - void pcibios_fixup_device_resources(struct pci_dev *); - - /* Serialize the Linux PCI infrastructure */ - mutex_lock(&sn_hotplug_mutex); - - /* - * Power-on and initialize the slot in the SN - * PCI infrastructure. Also, retrieve the ACPI SSDT - * table for the slot (if ACPI capable PROM). - */ - rc = sn_slot_enable(bss_hotplug_slot, slot->device_num, &ssdt); - if (rc) { - mutex_unlock(&sn_hotplug_mutex); - return rc; - } - - if (ssdt) - ssdt = __va(ssdt); - /* Add the new SSDT for the slot to the ACPI namespace */ - if (SN_ACPI_BASE_SUPPORT() && ssdt) { - acpi_status ret; - - ret = acpi_load_table((struct acpi_table_header *)ssdt); - if (ACPI_FAILURE(ret)) { - printk(KERN_ERR "%s: acpi_load_table failed (0x%x)\n", - __func__, ret); - /* try to continue on */ - } - } - - num_funcs = pci_scan_slot(slot->pci_bus, - PCI_DEVFN(slot->device_num + 1, 0)); - if (!num_funcs) { - pci_dbg(slot->pci_bus->self, "no device in slot\n"); - mutex_unlock(&sn_hotplug_mutex); - return -ENODEV; - } - - /* - * Map SN resources for all functions on the card - * to the Linux PCI interface and tell the drivers - * about them. - */ - list_for_each_entry(dev, &slot->pci_bus->devices, bus_list) { - if (PCI_SLOT(dev->devfn) != slot->device_num + 1) - continue; - - /* Need to do slot fixup on PPB before fixup of children - * (PPB's pcidev_info needs to be in pcidev_info list - * before child's SN_PCIDEV_INFO() call to setup - * pdi_host_pcidev_info). - */ - pcibios_fixup_device_resources(dev); - if (SN_ACPI_BASE_SUPPORT()) - sn_acpi_slot_fixup(dev); - else - sn_io_slot_fixup(dev); - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { - pci_hp_add_bridge(dev); - if (dev->subordinate) { - new_bus = dev->subordinate; - new_ppb = 1; - } - } - } - - /* - * Add the slot's devices to the ACPI infrastructure */ - if (SN_ACPI_BASE_SUPPORT() && ssdt) { - unsigned long long adr; - struct acpi_device *pdevice; - acpi_handle phandle; - acpi_handle chandle = NULL; - acpi_handle rethandle; - acpi_status ret; - - phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); - - if (acpi_bus_get_device(phandle, &pdevice)) { - pci_dbg(slot->pci_bus->self, "no parent device, assuming NULL\n"); - pdevice = NULL; - } - - acpi_scan_lock_acquire(); - /* - * Walk the rootbus node's immediate children looking for - * the slot's device node(s). There can be more than - * one for multifunction devices. - */ - for (;;) { - rethandle = NULL; - ret = acpi_get_next_object(ACPI_TYPE_DEVICE, - phandle, chandle, - &rethandle); - - if (ret == AE_NOT_FOUND || rethandle == NULL) - break; - - chandle = rethandle; - - ret = acpi_evaluate_integer(chandle, METHOD_NAME__ADR, - NULL, &adr); - - if (ACPI_SUCCESS(ret) && - (adr>>16) == (slot->device_num + 1)) { - - ret = acpi_bus_scan(chandle); - if (ACPI_FAILURE(ret)) { - printk(KERN_ERR "%s: acpi_bus_scan failed (0x%x) for slot %d func %d\n", - __func__, ret, (int)(adr>>16), - (int)(adr&0xffff)); - /* try to continue on */ - } - } - } - acpi_scan_lock_release(); - } - - pci_lock_rescan_remove(); - - /* Call the driver for the new device */ - pci_bus_add_devices(slot->pci_bus); - /* Call the drivers for the new devices subordinate to PPB */ - if (new_ppb) - pci_bus_add_devices(new_bus); - - pci_unlock_rescan_remove(); - mutex_unlock(&sn_hotplug_mutex); - - if (rc == 0) - pci_dbg(slot->pci_bus->self, "insert operation successful\n"); - else - pci_dbg(slot->pci_bus->self, "insert operation failed rc = %d\n", rc); - - return rc; -} - -static int disable_slot(struct hotplug_slot *bss_hotplug_slot) -{ - struct slot *slot = to_slot(bss_hotplug_slot); - struct pci_dev *dev, *temp; - int rc; - acpi_handle ssdt_hdl = NULL; - - /* Acquire update access to the bus */ - mutex_lock(&sn_hotplug_mutex); - - /* is it okay to bring this slot down? */ - rc = sn_slot_disable(bss_hotplug_slot, slot->device_num, - PCI_REQ_SLOT_ELIGIBLE); - if (rc) - goto leaving; - - /* free the ACPI resources for the slot */ - if (SN_ACPI_BASE_SUPPORT() && - PCI_CONTROLLER(slot->pci_bus)->companion) { - unsigned long long adr; - struct acpi_device *device; - acpi_handle phandle; - acpi_handle chandle = NULL; - acpi_handle rethandle; - acpi_status ret; - - /* Get the rootbus node pointer */ - phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); - - acpi_scan_lock_acquire(); - /* - * Walk the rootbus node's immediate children looking for - * the slot's device node(s). There can be more than - * one for multifunction devices. - */ - for (;;) { - rethandle = NULL; - ret = acpi_get_next_object(ACPI_TYPE_DEVICE, - phandle, chandle, - &rethandle); - - if (ret == AE_NOT_FOUND || rethandle == NULL) - break; - - chandle = rethandle; - - ret = acpi_evaluate_integer(chandle, - METHOD_NAME__ADR, - NULL, &adr); - if (ACPI_SUCCESS(ret) && - (adr>>16) == (slot->device_num + 1)) { - /* retain the owner id */ - ssdt_hdl = chandle; - - ret = acpi_bus_get_device(chandle, - &device); - if (ACPI_SUCCESS(ret)) - acpi_bus_trim(device); - } - } - acpi_scan_lock_release(); - } - - pci_lock_rescan_remove(); - /* Free the SN resources assigned to the Linux device.*/ - list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) { - if (PCI_SLOT(dev->devfn) != slot->device_num + 1) - continue; - - pci_dev_get(dev); - sn_bus_free_data(dev); - pci_stop_and_remove_bus_device(dev); - pci_dev_put(dev); - } - pci_unlock_rescan_remove(); - - /* Remove the SSDT for the slot from the ACPI namespace */ - if (SN_ACPI_BASE_SUPPORT() && ssdt_hdl) { - acpi_status ret; - ret = acpi_unload_parent_table(ssdt_hdl); - if (ACPI_FAILURE(ret)) { - acpi_handle_err(ssdt_hdl, - "%s: acpi_unload_parent_table failed (0x%x)\n", - __func__, ret); - /* try to continue on */ - } - } - - /* free the collected sysdata pointers */ - sn_bus_free_sysdata(); - - /* Deactivate slot */ - rc = sn_slot_disable(bss_hotplug_slot, slot->device_num, - PCI_REQ_SLOT_DISABLE); - leaving: - /* Release the bus lock */ - mutex_unlock(&sn_hotplug_mutex); - - return rc; -} - -static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot, - u8 *value) -{ - struct slot *slot = to_slot(bss_hotplug_slot); - struct pcibus_info *pcibus_info; - u32 power; - - pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); - mutex_lock(&sn_hotplug_mutex); - power = pcibus_info->pbi_enabled_devices & (1 << slot->device_num); - *value = power ? 1 : 0; - mutex_unlock(&sn_hotplug_mutex); - return 0; -} - -static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot) -{ - kfree(to_slot(bss_hotplug_slot)); -} - -static int sn_hotplug_slot_register(struct pci_bus *pci_bus) -{ - int device; - struct pci_slot *pci_slot; - struct hotplug_slot *bss_hotplug_slot; - char name[SN_SLOT_NAME_SIZE]; - int rc = 0; - - /* - * Currently only four devices are supported, - * in the future there maybe more -- up to 32. - */ - - for (device = 0; device < SN_MAX_HP_SLOTS ; device++) { - if (sn_pci_slot_valid(pci_bus, device) != 1) - continue; - - if (sn_hp_slot_private_alloc(&bss_hotplug_slot, - pci_bus, device, name)) { - rc = -ENOMEM; - goto alloc_err; - } - bss_hotplug_slot->ops = &sn_hotplug_slot_ops; - - rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name); - if (rc) - goto register_err; - - pci_slot = bss_hotplug_slot->pci_slot; - rc = sysfs_create_file(&pci_slot->kobj, - &sn_slot_path_attr.attr); - if (rc) - goto alloc_err; - } - pci_dbg(pci_bus->self, "Registered bus with hotplug\n"); - return rc; - -register_err: - pci_dbg(pci_bus->self, "bus failed to register with err = %d\n", - rc); - - /* destroy THIS element */ - sn_hp_destroy(); - sn_release_slot(bss_hotplug_slot); - -alloc_err: - /* destroy anything else on the list */ - while ((bss_hotplug_slot = sn_hp_destroy())) { - pci_hp_deregister(bss_hotplug_slot); - sn_release_slot(bss_hotplug_slot); - } - - return rc; -} - -static int __init sn_pci_hotplug_init(void) -{ - struct pci_bus *pci_bus = NULL; - int rc; - int registered = 0; - - if (!sn_prom_feature_available(PRF_HOTPLUG_SUPPORT)) { - printk(KERN_ERR "%s: PROM version does not support hotplug.\n", - __func__); - return -EPERM; - } - - INIT_LIST_HEAD(&sn_hp_list); - - while ((pci_bus = pci_find_next_bus(pci_bus))) { - if (!pci_bus->sysdata) - continue; - - rc = sn_pci_bus_valid(pci_bus); - if (rc != 1) { - pci_dbg(pci_bus->self, "not a valid hotplug bus\n"); - continue; - } - pci_dbg(pci_bus->self, "valid hotplug bus\n"); - - rc = sn_hotplug_slot_register(pci_bus); - if (!rc) { - registered = 1; - } else { - registered = 0; - break; - } - } - - return registered == 1 ? 0 : -ENODEV; -} - -static void __exit sn_pci_hotplug_exit(void) -{ - struct hotplug_slot *bss_hotplug_slot; - - while ((bss_hotplug_slot = sn_hp_destroy())) { - pci_hp_deregister(bss_hotplug_slot); - sn_release_slot(bss_hotplug_slot); - } - - if (!list_empty(&sn_hp_list)) - printk(KERN_ERR "%s: internal list is not empty\n", __FILE__); -} - -module_init(sn_pci_hotplug_init); -module_exit(sn_pci_hotplug_exit); diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 3aa115ed3a65..b3f972e8cfed 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -132,8 +132,6 @@ static void pci_read_vf_config_common(struct pci_dev *virtfn) &physfn->sriov->subsystem_vendor); pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID, &physfn->sriov->subsystem_device); - - physfn->sriov->cfg_size = pci_cfg_space_size(virtfn); } int pci_iov_add_virtfn(struct pci_dev *dev, int id) @@ -242,6 +240,173 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id) pci_dev_put(dev); } +static ssize_t sriov_totalvfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); +} + +static ssize_t sriov_numvfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pdev->sriov->num_VFs); +} + +/* + * num_vfs > 0; number of VFs to enable + * num_vfs = 0; disable all VFs + * + * Note: SRIOV spec does not allow partial VF + * disable, so it's all or none. + */ +static ssize_t sriov_numvfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + u16 num_vfs; + + ret = kstrtou16(buf, 0, &num_vfs); + if (ret < 0) + return ret; + + if (num_vfs > pci_sriov_get_totalvfs(pdev)) + return -ERANGE; + + device_lock(&pdev->dev); + + if (num_vfs == pdev->sriov->num_VFs) + goto exit; + + /* is PF driver loaded w/callback */ + if (!pdev->driver || !pdev->driver->sriov_configure) { + pci_info(pdev, "Driver does not support SRIOV configuration via sysfs\n"); + ret = -ENOENT; + goto exit; + } + + if (num_vfs == 0) { + /* disable VFs */ + ret = pdev->driver->sriov_configure(pdev, 0); + goto exit; + } + + /* enable VFs */ + if (pdev->sriov->num_VFs) { + pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", + pdev->sriov->num_VFs, num_vfs); + ret = -EBUSY; + goto exit; + } + + ret = pdev->driver->sriov_configure(pdev, num_vfs); + if (ret < 0) + goto exit; + + if (ret != num_vfs) + pci_warn(pdev, "%d VFs requested; only %d enabled\n", + num_vfs, ret); + +exit: + device_unlock(&pdev->dev); + + if (ret < 0) + return ret; + + return count; +} + +static ssize_t sriov_offset_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pdev->sriov->offset); +} + +static ssize_t sriov_stride_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pdev->sriov->stride); +} + +static ssize_t sriov_vf_device_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%x\n", pdev->sriov->vf_device); +} + +static ssize_t sriov_drivers_autoprobe_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); +} + +static ssize_t sriov_drivers_autoprobe_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + bool drivers_autoprobe; + + if (kstrtobool(buf, &drivers_autoprobe) < 0) + return -EINVAL; + + pdev->sriov->drivers_autoprobe = drivers_autoprobe; + + return count; +} + +static DEVICE_ATTR_RO(sriov_totalvfs); +static DEVICE_ATTR_RW(sriov_numvfs); +static DEVICE_ATTR_RO(sriov_offset); +static DEVICE_ATTR_RO(sriov_stride); +static DEVICE_ATTR_RO(sriov_vf_device); +static DEVICE_ATTR_RW(sriov_drivers_autoprobe); + +static struct attribute *sriov_dev_attrs[] = { + &dev_attr_sriov_totalvfs.attr, + &dev_attr_sriov_numvfs.attr, + &dev_attr_sriov_offset.attr, + &dev_attr_sriov_stride.attr, + &dev_attr_sriov_vf_device.attr, + &dev_attr_sriov_drivers_autoprobe.attr, + NULL, +}; + +static umode_t sriov_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + + if (!dev_is_pf(dev)) + return 0; + + return a->mode; +} + +const struct attribute_group sriov_dev_attr_group = { + .attrs = sriov_dev_attrs, + .is_visible = sriov_attrs_are_visible, +}; + int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { return 0; @@ -559,8 +724,8 @@ static void sriov_restore_state(struct pci_dev *dev) ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI; pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl); - for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) - pci_update_resource(dev, i); + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) + pci_update_resource(dev, i + PCI_IOV_RESOURCES); pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); pci_iov_set_numvfs(dev, iov->num_VFs); diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c index 24505b08de40..b8c9011987f4 100644 --- a/drivers/pci/mmap.c +++ b/drivers/pci/mmap.c @@ -73,7 +73,7 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar, #elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */ /* - * Legacy setup: Impement pci_mmap_resource_range() as a wrapper around + * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around * the architecture's pci_mmap_page_range(), converting to "user visible" * addresses as necessary. */ diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 73986825d221..0884bedcfc7a 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -192,6 +192,9 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) { + if (desc->msi_attrib.is_virtual) + return NULL; + return desc->mask_base + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; } @@ -206,14 +209,19 @@ static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) { u32 mask_bits = desc->masked; + void __iomem *desc_addr; if (pci_msi_ignore_mask) return 0; + desc_addr = pci_msix_desc_addr(desc); + if (!desc_addr) + return 0; mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; if (flag) mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; - writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL); + + writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); return mask_bits; } @@ -237,7 +245,7 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag) } /** - * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts + * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts * @data: pointer to irqdata associated to that interrupt */ void pci_msi_mask_irq(struct irq_data *data) @@ -247,7 +255,7 @@ void pci_msi_mask_irq(struct irq_data *data) EXPORT_SYMBOL_GPL(pci_msi_mask_irq); /** - * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts + * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts * @data: pointer to irqdata associated to that interrupt */ void pci_msi_unmask_irq(struct irq_data *data) @@ -273,6 +281,11 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) if (entry->msi_attrib.is_msix) { void __iomem *base = pci_msix_desc_addr(entry); + if (!base) { + WARN_ON(1); + return; + } + msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); msg->data = readl(base + PCI_MSIX_ENTRY_DATA); @@ -303,6 +316,9 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) } else if (entry->msi_attrib.is_msix) { void __iomem *base = pci_msix_desc_addr(entry); + if (!base) + goto skip; + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); writel(msg->data, base + PCI_MSIX_ENTRY_DATA); @@ -327,7 +343,13 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) msg->data); } } + +skip: entry->msg = *msg; + + if (entry->write_msi_msg) + entry->write_msi_msg(entry, entry->write_msi_msg_data); + } void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) @@ -550,6 +572,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) entry->msi_attrib.is_msix = 0; entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); + entry->msi_attrib.is_virtual = 0; entry->msi_attrib.entry_nr = 0; entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ @@ -588,11 +611,11 @@ static int msi_verify_entries(struct pci_dev *dev) * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function * @nvec: number of interrupts to allocate - * @affd: description of automatic irq affinity assignments (may be %NULL) + * @affd: description of automatic IRQ affinity assignments (may be %NULL) * * Setup the MSI capability structure of the device with the requested * number of interrupts. A return value of zero indicates the successful - * setup of an entry with the new MSI irq. A negative return value indicates + * setup of an entry with the new MSI IRQ. A negative return value indicates * an error, and a positive return value indicates the number of interrupts * which could have been allocated. */ @@ -609,7 +632,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, if (!entry) return -ENOMEM; - /* All MSIs are unmasked by default, Mask them all */ + /* All MSIs are unmasked by default; mask them all */ mask = msi_mask(entry->msi_attrib.multi_cap); msi_mask_irq(entry, mask, mask); @@ -637,7 +660,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, return ret; } - /* Set MSI enabled bits */ + /* Set MSI enabled bits */ pci_intx_for_msi(dev, 0); pci_msi_set_enable(dev, 1); dev->msi_enabled = 1; @@ -674,6 +697,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct irq_affinity_desc *curmsk, *masks = NULL; struct msi_desc *entry; int ret, i; + int vec_count = pci_msix_vec_count(dev); if (affd) masks = irq_create_affinity_masks(nvec, affd); @@ -696,6 +720,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->msi_attrib.entry_nr = entries[i].entry; else entry->msi_attrib.entry_nr = i; + + entry->msi_attrib.is_virtual = + entry->msi_attrib.entry_nr >= vec_count; + entry->msi_attrib.default_irq = dev->irq; entry->mask_base = base; @@ -714,12 +742,19 @@ static void msix_program_entries(struct pci_dev *dev, { struct msi_desc *entry; int i = 0; + void __iomem *desc_addr; for_each_pci_msi_entry(entry, dev) { if (entries) entries[i++].vector = entry->irq; - entry->masked = readl(pci_msix_desc_addr(entry) + - PCI_MSIX_ENTRY_VECTOR_CTRL); + + desc_addr = pci_msix_desc_addr(entry); + if (desc_addr) + entry->masked = readl(desc_addr + + PCI_MSIX_ENTRY_VECTOR_CTRL); + else + entry->masked = 0; + msix_mask_irq(entry, 1); } } @@ -729,11 +764,11 @@ static void msix_program_entries(struct pci_dev *dev, * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of struct msix_entry entries * @nvec: number of @entries - * @affd: Optional pointer to enable automatic affinity assignement + * @affd: Optional pointer to enable automatic affinity assignment * * Setup the MSI-X capability structure of device function with a - * single MSI-X irq. A return of zero indicates the successful setup of - * requested MSI-X entries with allocated irqs or non-zero for otherwise. + * single MSI-X IRQ. A return of zero indicates the successful setup of + * requested MSI-X entries with allocated IRQs or non-zero for otherwise. **/ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec, struct irq_affinity *affd) @@ -789,7 +824,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, out_avail: if (ret < 0) { /* - * If we had some success, report the number of irqs + * If we had some success, report the number of IRQs * we succeeded in setting up. */ struct msi_desc *entry; @@ -812,7 +847,7 @@ out_free: /** * pci_msi_supported - check whether MSI may be enabled on a device * @dev: pointer to the pci_dev data structure of MSI device function - * @nvec: how many MSIs have been requested ? + * @nvec: how many MSIs have been requested? * * Look at global flags, the device itself, and its parent buses * to determine if MSI/-X are supported for the device. If MSI/-X is @@ -896,7 +931,7 @@ static void pci_msi_shutdown(struct pci_dev *dev) /* Keep cached state to be restored */ __pci_msi_desc_mask_irq(desc, mask, ~mask); - /* Restore dev->irq to its default pin-assertion irq */ + /* Restore dev->irq to its default pin-assertion IRQ */ dev->irq = desc->msi_attrib.default_irq; pcibios_alloc_irq(dev); } @@ -932,7 +967,7 @@ int pci_msix_vec_count(struct pci_dev *dev) EXPORT_SYMBOL(pci_msix_vec_count); static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, - int nvec, struct irq_affinity *affd) + int nvec, struct irq_affinity *affd, int flags) { int nr_entries; int i, j; @@ -943,7 +978,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, nr_entries = pci_msix_vec_count(dev); if (nr_entries < 0) return nr_entries; - if (nvec > nr_entries) + if (nvec > nr_entries && !(flags & PCI_IRQ_VIRTUAL)) return nr_entries; if (entries) { @@ -958,7 +993,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, } } - /* Check whether driver already requested for MSI irq */ + /* Check whether driver already requested for MSI IRQ */ if (dev->msi_enabled) { pci_info(dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); return -EINVAL; @@ -1026,7 +1061,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (!pci_msi_supported(dev, minvec)) return -EINVAL; - /* Check whether driver already requested MSI-X irqs */ + /* Check whether driver already requested MSI-X IRQs */ if (dev->msix_enabled) { pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); return -EINVAL; @@ -1079,7 +1114,8 @@ EXPORT_SYMBOL(pci_enable_msi); static int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, - int maxvec, struct irq_affinity *affd) + int maxvec, struct irq_affinity *affd, + int flags) { int rc, nvec = maxvec; @@ -1096,7 +1132,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, return -ENOSPC; } - rc = __pci_enable_msix(dev, entries, nvec, affd); + rc = __pci_enable_msix(dev, entries, nvec, affd, flags); if (rc == 0) return nvec; @@ -1113,8 +1149,8 @@ static int __pci_enable_msix_range(struct pci_dev *dev, * pci_enable_msix_range - configure device's MSI-X capability structure * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of MSI-X entries - * @minvec: minimum number of MSI-X irqs requested - * @maxvec: maximum number of MSI-X irqs requested + * @minvec: minimum number of MSI-X IRQs requested + * @maxvec: maximum number of MSI-X IRQs requested * * Setup the MSI-X capability structure of device function with a maximum * possible number of interrupts in the range between @minvec and @maxvec @@ -1127,7 +1163,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) { - return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL); + return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL, 0); } EXPORT_SYMBOL(pci_enable_msix_range); @@ -1167,7 +1203,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, if (flags & PCI_IRQ_MSIX) { msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, - max_vecs, affd); + max_vecs, affd, flags); if (msix_vecs > 0) return msix_vecs; } @@ -1179,7 +1215,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, return msi_vecs; } - /* use legacy irq if allowed */ + /* use legacy IRQ if allowed */ if (flags & PCI_IRQ_LEGACY) { if (min_vecs == 1 && dev->irq) { /* @@ -1248,7 +1284,7 @@ int pci_irq_vector(struct pci_dev *dev, unsigned int nr) EXPORT_SYMBOL(pci_irq_vector); /** - * pci_irq_get_affinity - return the affinity of a particular msi vector + * pci_irq_get_affinity - return the affinity of a particular MSI vector * @dev: PCI device to operate on * @nr: device-relative interrupt vector index (0-based). */ @@ -1280,7 +1316,7 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) EXPORT_SYMBOL(pci_irq_get_affinity); /** - * pci_irq_get_node - return the numa node of a particular msi vector + * pci_irq_get_node - return the NUMA node of a particular MSI vector * @pdev: PCI device to operate on * @vec: device-relative interrupt vector index (0-based). */ @@ -1330,7 +1366,7 @@ void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) /** * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source * @dev: Pointer to the PCI device - * @desc: Pointer to the msi descriptor + * @desc: Pointer to the MSI descriptor * * The ID number is only used within the irqdomain. */ @@ -1338,7 +1374,7 @@ irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, struct msi_desc *desc) { return (irq_hw_number_t)desc->msi_attrib.entry_nr | - PCI_DEVID(dev->bus->number, dev->devfn) << 11 | + pci_dev_id(dev) << 11 | (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; } @@ -1348,7 +1384,8 @@ static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) } /** - * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev + * pci_msi_domain_check_cap - Verify that @domain supports the capabilities + * for @dev * @domain: The interrupt domain to check * @info: The domain info for verification * @dev: The device to check @@ -1508,7 +1545,7 @@ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) { struct device_node *of_node; - u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn); + u32 rid = pci_dev_id(pdev); pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); @@ -1531,7 +1568,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) { struct irq_domain *dom; - u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn); + u32 rid = pci_dev_id(pdev); pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); dom = of_msi_map_get_device_domain(&pdev->dev, rid); diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 3d32da15c215..36891e7deee3 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -15,32 +15,47 @@ #include <linux/of_pci.h> #include "pci.h" +#ifdef CONFIG_PCI void pci_set_of_node(struct pci_dev *dev) { if (!dev->bus->dev.of_node) return; dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn); + if (dev->dev.of_node) + dev->dev.fwnode = &dev->dev.of_node->fwnode; } void pci_release_of_node(struct pci_dev *dev) { of_node_put(dev->dev.of_node); dev->dev.of_node = NULL; + dev->dev.fwnode = NULL; } void pci_set_bus_of_node(struct pci_bus *bus) { - if (bus->self == NULL) - bus->dev.of_node = pcibios_get_phb_of_node(bus); - else - bus->dev.of_node = of_node_get(bus->self->dev.of_node); + struct device_node *node; + + if (bus->self == NULL) { + node = pcibios_get_phb_of_node(bus); + } else { + node = of_node_get(bus->self->dev.of_node); + if (node && of_property_read_bool(node, "external-facing")) + bus->self->untrusted = true; + } + + bus->dev.of_node = node; + + if (bus->dev.of_node) + bus->dev.fwnode = &bus->dev.of_node->fwnode; } void pci_release_bus_of_node(struct pci_bus *bus) { of_node_put(bus->dev.of_node); bus->dev.of_node = NULL; + bus->dev.fwnode = NULL; } struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) @@ -197,27 +212,6 @@ int of_get_pci_domain_nr(struct device_node *node) EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); /** - * This function will try to find the limitation of link speed by finding - * a property called "max-link-speed" of the given device node. - * - * @node: device tree node with the max link speed information - * - * Returns the associated max link speed from DT, or a negative value if the - * required property is not found or is invalid. - */ -int of_pci_get_max_link_speed(struct device_node *node) -{ - u32 max_link_speed; - - if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || - max_link_speed > 4) - return -EINVAL; - - return max_link_speed; -} -EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); - -/** * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only * is present and valid */ @@ -359,7 +353,7 @@ EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources); /** * of_irq_parse_pci - Resolve the interrupt for a PCI device * @pdev: the device whose interrupt is to be resolved - * @out_irq: structure of_irq filled by this function + * @out_irq: structure of_phandle_args filled by this function * * This function resolves the PCI interrupt for a given PCI device. If a * device-node exists for a given pci_dev, it will use normal OF tree @@ -537,3 +531,25 @@ int pci_parse_request_of_pci_ranges(struct device *dev, return err; } +#endif /* CONFIG_PCI */ + +/** + * This function will try to find the limitation of link speed by finding + * a property called "max-link-speed" of the given device node. + * + * @node: device tree node with the max link speed information + * + * Returns the associated max link speed from DT, or a negative value if the + * required property is not found or is invalid. + */ +int of_pci_get_max_link_speed(struct device_node *node) +{ + u32 max_link_speed; + + if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || + max_link_speed > 4) + return -EINVAL; + + return max_link_speed; +} +EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index c52298d76e64..0608aae72ccc 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -18,14 +18,32 @@ #include <linux/percpu-refcount.h> #include <linux/random.h> #include <linux/seq_buf.h> +#include <linux/xarray.h> + +enum pci_p2pdma_map_type { + PCI_P2PDMA_MAP_UNKNOWN = 0, + PCI_P2PDMA_MAP_NOT_SUPPORTED, + PCI_P2PDMA_MAP_BUS_ADDR, + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, +}; struct pci_p2pdma { - struct percpu_ref devmap_ref; - struct completion devmap_ref_done; struct gen_pool *pool; bool p2pmem_published; + struct xarray map_types; +}; + +struct pci_p2pdma_pagemap { + struct dev_pagemap pgmap; + struct pci_dev *provider; + u64 bus_offset; }; +static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap) +{ + return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap); +} + static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -74,41 +92,21 @@ static const struct attribute_group p2pmem_group = { .name = "p2pmem", }; -static void pci_p2pdma_percpu_release(struct percpu_ref *ref) -{ - struct pci_p2pdma *p2p = - container_of(ref, struct pci_p2pdma, devmap_ref); - - complete_all(&p2p->devmap_ref_done); -} - -static void pci_p2pdma_percpu_kill(struct percpu_ref *ref) -{ - /* - * pci_p2pdma_add_resource() may be called multiple times - * by a driver and may register the percpu_kill devm action multiple - * times. We only want the first action to actually kill the - * percpu_ref. - */ - if (percpu_ref_is_dying(ref)) - return; - - percpu_ref_kill(ref); -} - static void pci_p2pdma_release(void *data) { struct pci_dev *pdev = data; + struct pci_p2pdma *p2pdma = pdev->p2pdma; - if (!pdev->p2pdma) + if (!p2pdma) return; - wait_for_completion(&pdev->p2pdma->devmap_ref_done); - percpu_ref_exit(&pdev->p2pdma->devmap_ref); + /* Flush and disable pci_alloc_p2p_mem() */ + pdev->p2pdma = NULL; + synchronize_rcu(); - gen_pool_destroy(pdev->p2pdma->pool); + gen_pool_destroy(p2pdma->pool); sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); - pdev->p2pdma = NULL; + xa_destroy(&p2pdma->map_types); } static int pci_p2pdma_setup(struct pci_dev *pdev) @@ -120,16 +118,12 @@ static int pci_p2pdma_setup(struct pci_dev *pdev) if (!p2p) return -ENOMEM; + xa_init(&p2p->map_types); + p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev)); if (!p2p->pool) goto out; - init_completion(&p2p->devmap_ref_done); - error = percpu_ref_init(&p2p->devmap_ref, - pci_p2pdma_percpu_release, 0, GFP_KERNEL); - if (error) - goto out_pool_destroy; - error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev); if (error) goto out_pool_destroy; @@ -163,6 +157,7 @@ out: int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset) { + struct pci_p2pdma_pagemap *p2p_pgmap; struct dev_pagemap *pgmap; void *addr; int error; @@ -185,18 +180,19 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, return error; } - pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL); - if (!pgmap) + p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL); + if (!p2p_pgmap) return -ENOMEM; + pgmap = &p2p_pgmap->pgmap; pgmap->res.start = pci_resource_start(pdev, bar) + offset; pgmap->res.end = pgmap->res.start + size - 1; pgmap->res.flags = pci_resource_flags(pdev, bar); - pgmap->ref = &pdev->p2pdma->devmap_ref; pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; - pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - + + p2p_pgmap->provider = pdev; + p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) - pci_resource_start(pdev, bar); - pgmap->kill = pci_p2pdma_percpu_kill; addr = devm_memremap_pages(&pdev->dev, pgmap); if (IS_ERR(addr)) { @@ -204,17 +200,20 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, goto pgmap_free; } - error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr, + error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr, pci_bus_address(pdev, bar) + offset, - resource_size(&pgmap->res), dev_to_node(&pdev->dev)); + resource_size(&pgmap->res), dev_to_node(&pdev->dev), + pgmap->ref); if (error) - goto pgmap_free; + goto pages_free; pci_info(pdev, "added peer-to-peer DMA memory %pR\n", &pgmap->res); return 0; +pages_free: + devm_memunmap_pages(&pdev->dev, pgmap); pgmap_free: devm_kfree(&pdev->dev, pgmap); return error; @@ -223,7 +222,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); /* * Note this function returns the parent PCI device with a - * reference taken. It is the caller's responsibily to drop + * reference taken. It is the caller's responsibility to drop * the reference. */ static struct pci_dev *find_parent_pci_dev(struct device *dev) @@ -274,58 +273,82 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) seq_buf_printf(buf, "%s;", pci_name(pdev)); } +static const struct pci_p2pdma_whitelist_entry { + unsigned short vendor; + unsigned short device; + enum { + REQ_SAME_HOST_BRIDGE = 1 << 0, + } flags; +} pci_p2pdma_whitelist[] = { + /* AMD ZEN */ + {PCI_VENDOR_ID_AMD, 0x1450, 0}, + + /* Intel Xeon E5/Core i7 */ + {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE}, + {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE}, + /* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */ + {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE}, + {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE}, + {} +}; + +static bool __host_bridge_whitelist(struct pci_host_bridge *host, + bool same_host_bridge) +{ + struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0)); + const struct pci_p2pdma_whitelist_entry *entry; + unsigned short vendor, device; + + if (!root) + return false; + + vendor = root->vendor; + device = root->device; + pci_dev_put(root); + + for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) { + if (vendor != entry->vendor || device != entry->device) + continue; + if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge) + return false; + + return true; + } + + return false; +} + /* - * Find the distance through the nearest common upstream bridge between - * two PCI devices. - * - * If the two devices are the same device then 0 will be returned. - * - * If there are two virtual functions of the same device behind the same - * bridge port then 2 will be returned (one step down to the PCIe switch, - * then one step back to the same device). - * - * In the case where two devices are connected to the same PCIe switch, the - * value 4 will be returned. This corresponds to the following PCI tree: - * - * -+ Root Port - * \+ Switch Upstream Port - * +-+ Switch Downstream Port - * + \- Device A - * \-+ Switch Downstream Port - * \- Device B - * - * The distance is 4 because we traverse from Device A through the downstream - * port of the switch, to the common upstream port, back up to the second - * downstream port and then to Device B. - * - * Any two devices that don't have a common upstream bridge will return -1. - * In this way devices on separate PCIe root ports will be rejected, which - * is what we want for peer-to-peer seeing each PCIe root port defines a - * separate hierarchy domain and there's no way to determine whether the root - * complex supports forwarding between them. - * - * In the case where two devices are connected to different PCIe switches, - * this function will still return a positive distance as long as both - * switches eventually have a common upstream bridge. Note this covers - * the case of using multiple PCIe switches to achieve a desired level of - * fan-out from a root port. The exact distance will be a function of the - * number of switches between Device A and Device B. - * - * If a bridge which has any ACS redirection bits set is in the path - * then this functions will return -2. This is so we reject any - * cases where the TLPs are forwarded up into the root complex. - * In this case, a list of all infringing bridge addresses will be - * populated in acs_list (assuming it's non-null) for printk purposes. + * If we can't find a common upstream bridge take a look at the root + * complex and compare it to a whitelist of known good hardware. */ -static int upstream_bridge_distance(struct pci_dev *a, - struct pci_dev *b, - struct seq_buf *acs_list) +static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b) { + struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus); + struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus); + + if (host_a == host_b) + return __host_bridge_whitelist(host_a, true); + + if (__host_bridge_whitelist(host_a, false) && + __host_bridge_whitelist(host_b, false)) + return true; + + return false; +} + +static enum pci_p2pdma_map_type +__upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client, + int *dist, bool *acs_redirects, struct seq_buf *acs_list) +{ + struct pci_dev *a = provider, *b = client, *bb; int dist_a = 0; int dist_b = 0; - struct pci_dev *bb = NULL; int acs_cnt = 0; + if (acs_redirects) + *acs_redirects = false; + /* * Note, we don't need to take references to devices returned by * pci_upstream_bridge() seeing we hold a reference to a child @@ -354,7 +377,10 @@ static int upstream_bridge_distance(struct pci_dev *a, dist_a++; } - return -1; + if (dist) + *dist = dist_a + dist_b; + + return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; check_b_path_acs: bb = b; @@ -371,33 +397,110 @@ check_b_path_acs: bb = pci_upstream_bridge(bb); } - if (acs_cnt) - return -2; + if (dist) + *dist = dist_a + dist_b; + + if (acs_cnt) { + if (acs_redirects) + *acs_redirects = true; + + return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; + } + + return PCI_P2PDMA_MAP_BUS_ADDR; +} + +static unsigned long map_types_idx(struct pci_dev *client) +{ + return (pci_domain_nr(client->bus) << 16) | + (client->bus->number << 8) | client->devfn; +} + +/* + * Find the distance through the nearest common upstream bridge between + * two PCI devices. + * + * If the two devices are the same device then 0 will be returned. + * + * If there are two virtual functions of the same device behind the same + * bridge port then 2 will be returned (one step down to the PCIe switch, + * then one step back to the same device). + * + * In the case where two devices are connected to the same PCIe switch, the + * value 4 will be returned. This corresponds to the following PCI tree: + * + * -+ Root Port + * \+ Switch Upstream Port + * +-+ Switch Downstream Port + * + \- Device A + * \-+ Switch Downstream Port + * \- Device B + * + * The distance is 4 because we traverse from Device A through the downstream + * port of the switch, to the common upstream port, back up to the second + * downstream port and then to Device B. + * + * Any two devices that cannot communicate using p2pdma will return + * PCI_P2PDMA_MAP_NOT_SUPPORTED. + * + * Any two devices that have a data path that goes through the host bridge + * will consult a whitelist. If the host bridges are on the whitelist, + * this function will return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE. + * + * If either bridge is not on the whitelist this function returns + * PCI_P2PDMA_MAP_NOT_SUPPORTED. + * + * If a bridge which has any ACS redirection bits set is in the path, + * acs_redirects will be set to true. In this case, a list of all infringing + * bridge addresses will be populated in acs_list (assuming it's non-null) + * for printk purposes. + */ +static enum pci_p2pdma_map_type +upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client, + int *dist, bool *acs_redirects, struct seq_buf *acs_list) +{ + enum pci_p2pdma_map_type map_type; + + map_type = __upstream_bridge_distance(provider, client, dist, + acs_redirects, acs_list); + + if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) { + if (!host_bridge_whitelist(provider, client)) + map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED; + } + + if (provider->p2pdma) + xa_store(&provider->p2pdma->map_types, map_types_idx(client), + xa_mk_value(map_type), GFP_KERNEL); - return dist_a + dist_b; + return map_type; } -static int upstream_bridge_distance_warn(struct pci_dev *provider, - struct pci_dev *client) +static enum pci_p2pdma_map_type +upstream_bridge_distance_warn(struct pci_dev *provider, struct pci_dev *client, + int *dist) { struct seq_buf acs_list; + bool acs_redirects; int ret; seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); if (!acs_list.buffer) return -ENOMEM; - ret = upstream_bridge_distance(provider, client, &acs_list); - if (ret == -2) { - pci_warn(client, "cannot be used for peer-to-peer DMA as ACS redirect is set between the client and provider (%s)\n", + ret = upstream_bridge_distance(provider, client, dist, &acs_redirects, + &acs_list); + if (acs_redirects) { + pci_warn(client, "ACS redirect is set between the client and provider (%s)\n", pci_name(provider)); /* Drop final semicolon */ acs_list.buffer[acs_list.len-1] = 0; pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n", acs_list.buffer); + } - } else if (ret < 0) { - pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge\n", + if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) { + pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n", pci_name(provider)); } @@ -407,35 +510,43 @@ static int upstream_bridge_distance_warn(struct pci_dev *provider, } /** - * pci_p2pdma_distance_many - Determive the cumulative distance between + * pci_p2pdma_distance_many - Determine the cumulative distance between * a p2pdma provider and the clients in use. * @provider: p2pdma provider to check against the client list * @clients: array of devices to check (NULL-terminated) * @num_clients: number of clients in the array * @verbose: if true, print warnings for devices when we return -1 * - * Returns -1 if any of the clients are not compatible (behind the same - * root port as the provider), otherwise returns a positive number where - * a lower number is the preferable choice. (If there's one client - * that's the same as the provider it will return 0, which is best choice). + * Returns -1 if any of the clients are not compatible, otherwise returns a + * positive number where a lower number is the preferable choice. (If there's + * one client that's the same as the provider it will return 0, which is best + * choice). * - * For now, "compatible" means the provider and the clients are all behind - * the same PCI root port. This cuts out cases that may work but is safest - * for the user. Future work can expand this to white-list root complexes that - * can safely forward between each ports. + * "compatible" means the provider and the clients are either all behind + * the same PCI root port or the host bridges connected to each of the devices + * are listed in the 'pci_p2pdma_whitelist'. */ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, int num_clients, bool verbose) { bool not_supported = false; struct pci_dev *pci_client; - int distance = 0; + int total_dist = 0; + int distance; int i, ret; if (num_clients == 0) return -1; for (i = 0; i < num_clients; i++) { + if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) && + clients[i]->dma_ops == &dma_virt_ops) { + if (verbose) + dev_warn(clients[i], + "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n"); + return -1; + } + pci_client = find_parent_pci_dev(clients[i]); if (!pci_client) { if (verbose) @@ -446,26 +557,26 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, if (verbose) ret = upstream_bridge_distance_warn(provider, - pci_client); + pci_client, &distance); else ret = upstream_bridge_distance(provider, pci_client, - NULL); + &distance, NULL, NULL); pci_dev_put(pci_client); - if (ret < 0) + if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) not_supported = true; if (not_supported && !verbose) break; - distance += ret; + total_dist += distance; } if (not_supported) return -1; - return distance; + return total_dist; } EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many); @@ -553,19 +664,30 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_find_many); */ void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) { - void *ret; + void *ret = NULL; + struct percpu_ref *ref; + /* + * Pairs with synchronize_rcu() in pci_p2pdma_release() to + * ensure pdev->p2pdma is non-NULL for the duration of the + * read-lock. + */ + rcu_read_lock(); if (unlikely(!pdev->p2pdma)) - return NULL; - - if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref))) - return NULL; - - ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size); + goto out; - if (unlikely(!ret)) - percpu_ref_put(&pdev->p2pdma->devmap_ref); + ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size, + (void **) &ref); + if (!ret) + goto out; + if (unlikely(!percpu_ref_tryget_live(ref))) { + gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size); + ret = NULL; + goto out; + } +out: + rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); @@ -578,8 +700,11 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); */ void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) { - gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size); - percpu_ref_put(&pdev->p2pdma->devmap_ref); + struct percpu_ref *ref; + + gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size, + (void **) &ref); + percpu_ref_put(ref); } EXPORT_SYMBOL_GPL(pci_free_p2pmem); @@ -677,21 +802,19 @@ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) } EXPORT_SYMBOL_GPL(pci_p2pmem_publish); -/** - * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA - * @dev: device doing the DMA request - * @sg: scatter list to map - * @nents: elements in the scatterlist - * @dir: DMA direction - * - * Scatterlists mapped with this function should not be unmapped in any way. - * - * Returns the number of SG entries mapped or 0 on error. - */ -int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) +static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct pci_dev *provider, + struct pci_dev *client) +{ + if (!provider->p2pdma) + return PCI_P2PDMA_MAP_NOT_SUPPORTED; + + return xa_to_value(xa_load(&provider->p2pdma->map_types, + map_types_idx(client))); +} + +static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap, + struct device *dev, struct scatterlist *sg, int nents) { - struct dev_pagemap *pgmap; struct scatterlist *s; phys_addr_t paddr; int i; @@ -700,23 +823,87 @@ int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, * p2pdma mappings are not compatible with devices that use * dma_virt_ops. If the upper layers do the right thing * this should never happen because it will be prevented - * by the check in pci_p2pdma_add_client() + * by the check in pci_p2pdma_distance_many() */ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) && dev->dma_ops == &dma_virt_ops)) return 0; for_each_sg(sg, s, nents, i) { - pgmap = sg_page(s)->pgmap; paddr = sg_phys(s); - s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset; + s->dma_address = paddr - p2p_pgmap->bus_offset; sg_dma_len(s) = s->length; } return nents; } -EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg); + +/** + * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA + * @dev: device doing the DMA request + * @sg: scatter list to map + * @nents: elements in the scatterlist + * @dir: DMA direction + * @attrs: DMA attributes passed to dma_map_sg() (if called) + * + * Scatterlists mapped with this function should be unmapped using + * pci_p2pdma_unmap_sg_attrs(). + * + * Returns the number of SG entries mapped or 0 on error. + */ +int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct pci_p2pdma_pagemap *p2p_pgmap = + to_p2p_pgmap(sg_page(sg)->pgmap); + struct pci_dev *client; + + if (WARN_ON_ONCE(!dev_is_pci(dev))) + return 0; + + client = to_pci_dev(dev); + + switch (pci_p2pdma_map_type(p2p_pgmap->provider, client)) { + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + return dma_map_sg_attrs(dev, sg, nents, dir, attrs); + case PCI_P2PDMA_MAP_BUS_ADDR: + return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents); + default: + WARN_ON_ONCE(1); + return 0; + } +} +EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs); + +/** + * pci_p2pdma_unmap_sg - unmap a PCI peer-to-peer scatterlist that was + * mapped with pci_p2pdma_map_sg() + * @dev: device doing the DMA request + * @sg: scatter list to map + * @nents: number of elements returned by pci_p2pdma_map_sg() + * @dir: DMA direction + * @attrs: DMA attributes passed to dma_unmap_sg() (if called) + */ +void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct pci_p2pdma_pagemap *p2p_pgmap = + to_p2p_pgmap(sg_page(sg)->pgmap); + enum pci_p2pdma_map_type map_type; + struct pci_dev *client; + + if (WARN_ON_ONCE(!dev_is_pci(dev))) + return; + + client = to_pci_dev(dev); + + map_type = pci_p2pdma_map_type(p2p_pgmap->provider, client); + + if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) + dma_unmap_sg_attrs(dev, sg, nents, dir, attrs); +} +EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs); /** * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index e1949f7efd9c..0c02d500158f 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -14,7 +14,6 @@ #include <linux/msi.h> #include <linux/pci_hotplug.h> #include <linux/module.h> -#include <linux/pci-aspm.h> #include <linux/pci-acpi.h> #include <linux/pm_runtime.h> #include <linux/pm_qos.h> @@ -118,8 +117,58 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) return (phys_addr_t)mcfg_addr; } +/* _HPX PCI Setting Record (Type 0); same as _HPP */ +struct hpx_type0 { + u32 revision; /* Not present in _HPP */ + u8 cache_line_size; /* Not applicable to PCIe */ + u8 latency_timer; /* Not applicable to PCIe */ + u8 enable_serr; + u8 enable_perr; +}; + +static struct hpx_type0 pci_default_type0 = { + .revision = 1, + .cache_line_size = 8, + .latency_timer = 0x40, + .enable_serr = 0, + .enable_perr = 0, +}; + +static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx) +{ + u16 pci_cmd, pci_bctl; + + if (!hpx) + hpx = &pci_default_type0; + + if (hpx->revision > 1) { + pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", + hpx->revision); + hpx = &pci_default_type0; + } + + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer); + pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); + if (hpx->enable_serr) + pci_cmd |= PCI_COMMAND_SERR; + if (hpx->enable_perr) + pci_cmd |= PCI_COMMAND_PARITY; + pci_write_config_word(dev, PCI_COMMAND, pci_cmd); + + /* Program bridge control value */ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, + hpx->latency_timer); + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); + if (hpx->enable_perr) + pci_bctl |= PCI_BRIDGE_CTL_PARITY; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); + } +} + static acpi_status decode_type0_hpx_record(union acpi_object *record, - struct hotplug_params *hpx) + struct hpx_type0 *hpx0) { int i; union acpi_object *fields = record->package.elements; @@ -132,24 +181,44 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record, for (i = 2; i < 6; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; - hpx->t0 = &hpx->type0_data; - hpx->t0->revision = revision; - hpx->t0->cache_line_size = fields[2].integer.value; - hpx->t0->latency_timer = fields[3].integer.value; - hpx->t0->enable_serr = fields[4].integer.value; - hpx->t0->enable_perr = fields[5].integer.value; + hpx0->revision = revision; + hpx0->cache_line_size = fields[2].integer.value; + hpx0->latency_timer = fields[3].integer.value; + hpx0->enable_serr = fields[4].integer.value; + hpx0->enable_perr = fields[5].integer.value; break; default: - printk(KERN_WARNING - "%s: Type 0 Revision %d record not supported\n", + pr_warn("%s: Type 0 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } +/* _HPX PCI-X Setting Record (Type 1) */ +struct hpx_type1 { + u32 revision; + u8 max_mem_read; + u8 avg_max_split; + u16 tot_max_split; +}; + +static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx) +{ + int pos; + + if (!hpx) + return; + + pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); + if (!pos) + return; + + pci_warn(dev, "PCI-X settings not supported\n"); +} + static acpi_status decode_type1_hpx_record(union acpi_object *record, - struct hotplug_params *hpx) + struct hpx_type1 *hpx1) { int i; union acpi_object *fields = record->package.elements; @@ -162,23 +231,143 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record, for (i = 2; i < 5; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; - hpx->t1 = &hpx->type1_data; - hpx->t1->revision = revision; - hpx->t1->max_mem_read = fields[2].integer.value; - hpx->t1->avg_max_split = fields[3].integer.value; - hpx->t1->tot_max_split = fields[4].integer.value; + hpx1->revision = revision; + hpx1->max_mem_read = fields[2].integer.value; + hpx1->avg_max_split = fields[3].integer.value; + hpx1->tot_max_split = fields[4].integer.value; break; default: - printk(KERN_WARNING - "%s: Type 1 Revision %d record not supported\n", + pr_warn("%s: Type 1 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } +static bool pcie_root_rcb_set(struct pci_dev *dev) +{ + struct pci_dev *rp = pcie_find_root_port(dev); + u16 lnkctl; + + if (!rp) + return false; + + pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_RCB) + return true; + + return false; +} + +/* _HPX PCI Express Setting Record (Type 2) */ +struct hpx_type2 { + u32 revision; + u32 unc_err_mask_and; + u32 unc_err_mask_or; + u32 unc_err_sever_and; + u32 unc_err_sever_or; + u32 cor_err_mask_and; + u32 cor_err_mask_or; + u32 adv_err_cap_and; + u32 adv_err_cap_or; + u16 pci_exp_devctl_and; + u16 pci_exp_devctl_or; + u16 pci_exp_lnkctl_and; + u16 pci_exp_lnkctl_or; + u32 sec_unc_err_sever_and; + u32 sec_unc_err_sever_or; + u32 sec_unc_err_mask_and; + u32 sec_unc_err_mask_or; +}; + +static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx) +{ + int pos; + u32 reg32; + + if (!hpx) + return; + + if (!pci_is_pcie(dev)) + return; + + if (hpx->revision > 1) { + pci_warn(dev, "PCIe settings rev %d not supported\n", + hpx->revision); + return; + } + + /* + * Don't allow _HPX to change MPS or MRRS settings. We manage + * those to make sure they're consistent with the rest of the + * platform. + */ + hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | + PCI_EXP_DEVCTL_READRQ; + hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | + PCI_EXP_DEVCTL_READRQ); + + /* Initialize Device Control Register */ + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or); + + /* Initialize Link Control Register */ + if (pcie_cap_has_lnkctl(dev)) { + + /* + * If the Root Port supports Read Completion Boundary of + * 128, set RCB to 128. Otherwise, clear it. + */ + hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; + hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; + if (pcie_root_rcb_set(dev)) + hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; + + pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, + ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or); + } + + /* Find Advanced Error Reporting Enhanced Capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + + /* Initialize Uncorrectable Error Mask Register */ + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); + reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or; + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); + + /* Initialize Uncorrectable Error Severity Register */ + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); + reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or; + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); + + /* Initialize Correctable Error Mask Register */ + pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); + reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or; + pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); + + /* Initialize Advanced Error Capabilities and Control Register */ + pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); + reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or; + + /* Don't enable ECRC generation or checking if unsupported */ + if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) + reg32 &= ~PCI_ERR_CAP_ECRC_GENE; + if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) + reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; + pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); + + /* + * FIXME: The following two registers are not supported yet. + * + * o Secondary Uncorrectable Error Severity Register + * o Secondary Uncorrectable Error Mask Register + */ +} + static acpi_status decode_type2_hpx_record(union acpi_object *record, - struct hotplug_params *hpx) + struct hpx_type2 *hpx2) { int i; union acpi_object *fields = record->package.elements; @@ -191,45 +380,258 @@ static acpi_status decode_type2_hpx_record(union acpi_object *record, for (i = 2; i < 18; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; - hpx->t2 = &hpx->type2_data; - hpx->t2->revision = revision; - hpx->t2->unc_err_mask_and = fields[2].integer.value; - hpx->t2->unc_err_mask_or = fields[3].integer.value; - hpx->t2->unc_err_sever_and = fields[4].integer.value; - hpx->t2->unc_err_sever_or = fields[5].integer.value; - hpx->t2->cor_err_mask_and = fields[6].integer.value; - hpx->t2->cor_err_mask_or = fields[7].integer.value; - hpx->t2->adv_err_cap_and = fields[8].integer.value; - hpx->t2->adv_err_cap_or = fields[9].integer.value; - hpx->t2->pci_exp_devctl_and = fields[10].integer.value; - hpx->t2->pci_exp_devctl_or = fields[11].integer.value; - hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value; - hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value; - hpx->t2->sec_unc_err_sever_and = fields[14].integer.value; - hpx->t2->sec_unc_err_sever_or = fields[15].integer.value; - hpx->t2->sec_unc_err_mask_and = fields[16].integer.value; - hpx->t2->sec_unc_err_mask_or = fields[17].integer.value; + hpx2->revision = revision; + hpx2->unc_err_mask_and = fields[2].integer.value; + hpx2->unc_err_mask_or = fields[3].integer.value; + hpx2->unc_err_sever_and = fields[4].integer.value; + hpx2->unc_err_sever_or = fields[5].integer.value; + hpx2->cor_err_mask_and = fields[6].integer.value; + hpx2->cor_err_mask_or = fields[7].integer.value; + hpx2->adv_err_cap_and = fields[8].integer.value; + hpx2->adv_err_cap_or = fields[9].integer.value; + hpx2->pci_exp_devctl_and = fields[10].integer.value; + hpx2->pci_exp_devctl_or = fields[11].integer.value; + hpx2->pci_exp_lnkctl_and = fields[12].integer.value; + hpx2->pci_exp_lnkctl_or = fields[13].integer.value; + hpx2->sec_unc_err_sever_and = fields[14].integer.value; + hpx2->sec_unc_err_sever_or = fields[15].integer.value; + hpx2->sec_unc_err_mask_and = fields[16].integer.value; + hpx2->sec_unc_err_mask_or = fields[17].integer.value; break; default: - printk(KERN_WARNING - "%s: Type 2 Revision %d record not supported\n", + pr_warn("%s: Type 2 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } -static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) +/* _HPX PCI Express Setting Record (Type 3) */ +struct hpx_type3 { + u16 device_type; + u16 function_type; + u16 config_space_location; + u16 pci_exp_cap_id; + u16 pci_exp_cap_ver; + u16 pci_exp_vendor_id; + u16 dvsec_id; + u16 dvsec_rev; + u16 match_offset; + u32 match_mask_and; + u32 match_value; + u16 reg_offset; + u32 reg_mask_and; + u32 reg_mask_or; +}; + +enum hpx_type3_dev_type { + HPX_TYPE_ENDPOINT = BIT(0), + HPX_TYPE_LEG_END = BIT(1), + HPX_TYPE_RC_END = BIT(2), + HPX_TYPE_RC_EC = BIT(3), + HPX_TYPE_ROOT_PORT = BIT(4), + HPX_TYPE_UPSTREAM = BIT(5), + HPX_TYPE_DOWNSTREAM = BIT(6), + HPX_TYPE_PCI_BRIDGE = BIT(7), + HPX_TYPE_PCIE_BRIDGE = BIT(8), +}; + +static u16 hpx3_device_type(struct pci_dev *dev) +{ + u16 pcie_type = pci_pcie_type(dev); + const int pcie_to_hpx3_type[] = { + [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, + [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, + [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, + [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, + [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, + [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, + [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, + [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, + [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, + }; + + if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) + return 0; + + return pcie_to_hpx3_type[pcie_type]; +} + +enum hpx_type3_fn_type { + HPX_FN_NORMAL = BIT(0), + HPX_FN_SRIOV_PHYS = BIT(1), + HPX_FN_SRIOV_VIRT = BIT(2), +}; + +static u8 hpx3_function_type(struct pci_dev *dev) +{ + if (dev->is_virtfn) + return HPX_FN_SRIOV_VIRT; + else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) + return HPX_FN_SRIOV_PHYS; + else + return HPX_FN_NORMAL; +} + +static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) +{ + u8 cap_ver = hpx3_cap_id & 0xf; + + if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) + return true; + else if (cap_ver == pcie_cap_id) + return true; + + return false; +} + +enum hpx_type3_cfg_loc { + HPX_CFG_PCICFG = 0, + HPX_CFG_PCIE_CAP = 1, + HPX_CFG_PCIE_CAP_EXT = 2, + HPX_CFG_VEND_CAP = 3, + HPX_CFG_DVSEC = 4, + HPX_CFG_MAX, +}; + +static void program_hpx_type3_register(struct pci_dev *dev, + const struct hpx_type3 *reg) +{ + u32 match_reg, write_reg, header, orig_value; + u16 pos; + + if (!(hpx3_device_type(dev) & reg->device_type)) + return; + + if (!(hpx3_function_type(dev) & reg->function_type)) + return; + + switch (reg->config_space_location) { + case HPX_CFG_PCICFG: + pos = 0; + break; + case HPX_CFG_PCIE_CAP: + pos = pci_find_capability(dev, reg->pci_exp_cap_id); + if (pos == 0) + return; + + break; + case HPX_CFG_PCIE_CAP_EXT: + pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id); + if (pos == 0) + return; + + pci_read_config_dword(dev, pos, &header); + if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), + reg->pci_exp_cap_ver)) + return; + + break; + case HPX_CFG_VEND_CAP: /* Fall through */ + case HPX_CFG_DVSEC: /* Fall through */ + default: + pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location"); + return; + } + + pci_read_config_dword(dev, pos + reg->match_offset, &match_reg); + + if ((match_reg & reg->match_mask_and) != reg->match_value) + return; + + pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg); + orig_value = write_reg; + write_reg &= reg->reg_mask_and; + write_reg |= reg->reg_mask_or; + + if (orig_value == write_reg) + return; + + pci_write_config_dword(dev, pos + reg->reg_offset, write_reg); + + pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x", + pos, orig_value, write_reg); +} + +static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx) +{ + if (!hpx) + return; + + if (!pci_is_pcie(dev)) + return; + + program_hpx_type3_register(dev, hpx); +} + +static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, + union acpi_object *reg_fields) +{ + hpx3_reg->device_type = reg_fields[0].integer.value; + hpx3_reg->function_type = reg_fields[1].integer.value; + hpx3_reg->config_space_location = reg_fields[2].integer.value; + hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; + hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; + hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; + hpx3_reg->dvsec_id = reg_fields[6].integer.value; + hpx3_reg->dvsec_rev = reg_fields[7].integer.value; + hpx3_reg->match_offset = reg_fields[8].integer.value; + hpx3_reg->match_mask_and = reg_fields[9].integer.value; + hpx3_reg->match_value = reg_fields[10].integer.value; + hpx3_reg->reg_offset = reg_fields[11].integer.value; + hpx3_reg->reg_mask_and = reg_fields[12].integer.value; + hpx3_reg->reg_mask_or = reg_fields[13].integer.value; +} + +static acpi_status program_type3_hpx_record(struct pci_dev *dev, + union acpi_object *record) +{ + union acpi_object *fields = record->package.elements; + u32 desc_count, expected_length, revision; + union acpi_object *reg_fields; + struct hpx_type3 hpx3; + int i; + + revision = fields[1].integer.value; + switch (revision) { + case 1: + desc_count = fields[2].integer.value; + expected_length = 3 + desc_count * 14; + + if (record->package.count != expected_length) + return AE_ERROR; + + for (i = 2; i < expected_length; i++) + if (fields[i].type != ACPI_TYPE_INTEGER) + return AE_ERROR; + + for (i = 0; i < desc_count; i++) { + reg_fields = fields + 3 + i * 14; + parse_hpx3_register(&hpx3, reg_fields); + program_hpx_type3(dev, &hpx3); + } + + break; + default: + printk(KERN_WARNING + "%s: Type 3 Revision %d record not supported\n", + __func__, revision); + return AE_ERROR; + } + return AE_OK; +} + +static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle) { acpi_status status; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *package, *record, *fields; + struct hpx_type0 hpx0; + struct hpx_type1 hpx1; + struct hpx_type2 hpx2; u32 type; int i; - /* Clear the return buffer with zeros */ - memset(hpx, 0, sizeof(struct hotplug_params)); - status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); if (ACPI_FAILURE(status)) return status; @@ -257,22 +659,33 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) type = fields[0].integer.value; switch (type) { case 0: - status = decode_type0_hpx_record(record, hpx); + memset(&hpx0, 0, sizeof(hpx0)); + status = decode_type0_hpx_record(record, &hpx0); if (ACPI_FAILURE(status)) goto exit; + program_hpx_type0(dev, &hpx0); break; case 1: - status = decode_type1_hpx_record(record, hpx); + memset(&hpx1, 0, sizeof(hpx1)); + status = decode_type1_hpx_record(record, &hpx1); if (ACPI_FAILURE(status)) goto exit; + program_hpx_type1(dev, &hpx1); break; case 2: - status = decode_type2_hpx_record(record, hpx); + memset(&hpx2, 0, sizeof(hpx2)); + status = decode_type2_hpx_record(record, &hpx2); + if (ACPI_FAILURE(status)) + goto exit; + program_hpx_type2(dev, &hpx2); + break; + case 3: + status = program_type3_hpx_record(dev, record); if (ACPI_FAILURE(status)) goto exit; break; default: - printk(KERN_ERR "%s: Type %d record not supported\n", + pr_err("%s: Type %d record not supported\n", __func__, type); status = AE_ERROR; goto exit; @@ -283,14 +696,15 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) return status; } -static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) +static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *package, *fields; + struct hpx_type0 hpx0; int i; - memset(hpp, 0, sizeof(struct hotplug_params)); + memset(&hpx0, 0, sizeof(hpx0)); status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); if (ACPI_FAILURE(status)) @@ -311,24 +725,24 @@ static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) } } - hpp->t0 = &hpp->type0_data; - hpp->t0->revision = 1; - hpp->t0->cache_line_size = fields[0].integer.value; - hpp->t0->latency_timer = fields[1].integer.value; - hpp->t0->enable_serr = fields[2].integer.value; - hpp->t0->enable_perr = fields[3].integer.value; + hpx0.revision = 1; + hpx0.cache_line_size = fields[0].integer.value; + hpx0.latency_timer = fields[1].integer.value; + hpx0.enable_serr = fields[2].integer.value; + hpx0.enable_perr = fields[3].integer.value; + + program_hpx_type0(dev, &hpx0); exit: kfree(buffer.pointer); return status; } -/* pci_get_hp_params +/* pci_acpi_program_hp_params * * @dev - the pci_dev for which we want parameters - * @hpp - allocated by the caller */ -int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) +int pci_acpi_program_hp_params(struct pci_dev *dev) { acpi_status status; acpi_handle handle, phandle; @@ -351,10 +765,10 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) * this pci dev. */ while (handle) { - status = acpi_run_hpx(handle, hpp); + status = acpi_run_hpx(dev, handle); if (ACPI_SUCCESS(status)) return 0; - status = acpi_run_hpp(handle, hpp); + status = acpi_run_hpp(dev, handle); if (ACPI_SUCCESS(status)) return 0; if (acpi_is_root_bridge(handle)) @@ -366,7 +780,6 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) } return -ENODEV; } -EXPORT_SYMBOL_GPL(pci_get_hp_params); /** * pciehp_is_native - Check whether a hotplug port is handled by the OS @@ -618,12 +1031,21 @@ static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) if (!adev || !acpi_device_power_manageable(adev)) return PCI_UNKNOWN; - if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN) + state = adev->power.state; + if (state == ACPI_STATE_UNKNOWN) return PCI_UNKNOWN; return state_conv[state]; } +static void acpi_pci_refresh_power_state(struct pci_dev *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(&dev->dev); + + if (adev && acpi_device_power_manageable(adev)) + acpi_device_update_power(adev, NULL); +} + static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) { while (bus->parent) { @@ -666,7 +1088,8 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) if (!adev || !acpi_device_power_manageable(adev)) return false; - if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) + if (adev->wakeup.flags.valid && + device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) return true; if (acpi_target_system_state() == ACPI_STATE_S0) @@ -680,6 +1103,7 @@ static const struct pci_platform_pm_ops acpi_pci_platform_pm = { .is_manageable = acpi_pci_power_manageable, .set_state = acpi_pci_set_power_state, .get_state = acpi_pci_get_power_state, + .refresh_state = acpi_pci_refresh_power_state, .choose_state = acpi_pci_choose_state, .set_wakeup = acpi_pci_wakeup, .need_resume = acpi_pci_need_resume, @@ -833,6 +1257,7 @@ static void pci_acpi_setup(struct device *dev) device_wakeup_enable(dev); acpi_pci_wakeup(pci_dev, false); + acpi_device_power_add_dependent(adev, dev); } static void pci_acpi_cleanup(struct device *dev) @@ -845,6 +1270,7 @@ static void pci_acpi_cleanup(struct device *dev) pci_acpi_remove_pm_notifier(adev); if (adev->wakeup.flags.valid) { + acpi_device_power_remove_dependent(adev, dev); if (pci_dev->bridge_d3) device_wakeup_disable(dev); diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index 83fb077d0b41..5fd90105510d 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -38,7 +38,7 @@ struct pci_bridge_reg_behavior { u32 rsvd; }; -const static struct pci_bridge_reg_behavior pci_regs_behavior[] = { +static const struct pci_bridge_reg_behavior pci_regs_behavior[] = { [PCI_VENDOR_ID / 4] = { .ro = ~0 }, [PCI_COMMAND / 4] = { .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | @@ -173,7 +173,7 @@ const static struct pci_bridge_reg_behavior pci_regs_behavior[] = { }, }; -const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { +static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { [PCI_CAP_LIST_ID / 4] = { /* * Capability ID, Next Capability Pointer and @@ -305,7 +305,7 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, } /* - * Cleanup a pci_bridge_emul structure that was previously initilized + * Cleanup a pci_bridge_emul structure that was previously initialized * using pci_bridge_emul_init(). */ void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 71853befd435..a8124e47bf6e 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev) #ifdef CONFIG_PCI_IOV static inline bool pci_device_can_probe(struct pci_dev *pdev) { - return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe); + return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe || + pdev->driver_override); } #else static inline bool pci_device_can_probe(struct pci_dev *pdev) @@ -414,6 +415,9 @@ static int pci_device_probe(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = to_pci_driver(dev->driver); + if (!pci_device_can_probe(pci_dev)) + return -ENODEV; + pci_assign_irq(pci_dev); error = pcibios_alloc_irq(pci_dev); @@ -421,12 +425,10 @@ static int pci_device_probe(struct device *dev) return error; pci_dev_get(pci_dev); - if (pci_device_can_probe(pci_dev)) { - error = __pci_device_probe(drv, pci_dev); - if (error) { - pcibios_free_irq(pci_dev); - pci_dev_put(pci_dev); - } + error = __pci_device_probe(drv, pci_dev); + if (error) { + pcibios_free_irq(pci_dev); + pci_dev_put(pci_dev); } return error; @@ -524,7 +526,6 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev) pci_power_up(pci_dev); pci_restore_state(pci_dev); pci_pme_restore(pci_dev); - pci_fixup_device(pci_fixup_resume_early, pci_dev); } /* @@ -578,7 +579,7 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, - "PCI PM: Device state not saved by %pF\n", + "PCI PM: Device state not saved by %pS\n", drv->suspend); } } @@ -605,7 +606,7 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, - "PCI PM: Device state not saved by %pF\n", + "PCI PM: Device state not saved by %pS\n", drv->suspend_late); goto Fixup; } @@ -679,6 +680,7 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) static int pci_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; + struct pci_dev *pci_dev = to_pci_dev(dev); if (drv && drv->pm && drv->pm->prepare) { int error = drv->pm->prepare(dev); @@ -688,7 +690,15 @@ static int pci_pm_prepare(struct device *dev) if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE)) return 0; } - return pci_dev_keep_suspended(to_pci_dev(dev)); + if (pci_dev_need_resume(pci_dev)) + return 0; + + /* + * The PME setting needs to be adjusted here in case the direct-complete + * optimization is used with respect to this device. + */ + pci_dev_adjust_pme(pci_dev); + return 1; } static void pci_pm_complete(struct device *dev) @@ -702,7 +712,14 @@ static void pci_pm_complete(struct device *dev) if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) { pci_power_t pre_sleep_state = pci_dev->current_state; - pci_update_current_state(pci_dev, pci_dev->current_state); + pci_refresh_power_state(pci_dev); + /* + * On platforms with ACPI this check may also trigger for + * devices sharing power resources if one of those power + * resources has been activated as a result of a change of the + * power state of another device sharing it. However, in that + * case it is also better to resume the device, in general. + */ if (pci_dev->current_state < pre_sleep_state) pm_request_resume(dev); } @@ -734,6 +751,8 @@ static int pci_pm_suspend(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + pci_dev->skip_bus_pm = false; + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_SUSPEND); @@ -756,9 +775,11 @@ static int pci_pm_suspend(struct device *dev) * better to resume the device from runtime suspend here. */ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || - !pci_dev_keep_suspended(pci_dev)) { + pci_dev_need_resume(pci_dev)) { pm_runtime_resume(dev); pci_dev->state_saved = false; + } else { + pci_dev_adjust_pme(pci_dev); } if (pm->suspend) { @@ -773,7 +794,7 @@ static int pci_pm_suspend(struct device *dev) if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, - "PCI PM: State of device not saved by %pF\n", + "PCI PM: State of device not saved by %pS\n", pm->suspend); } } @@ -821,13 +842,24 @@ static int pci_pm_suspend_noirq(struct device *dev) if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, - "PCI PM: State of device not saved by %pF\n", + "PCI PM: State of device not saved by %pS\n", pm->suspend_noirq); goto Fixup; } } - if (!pci_dev->state_saved) { + if (pci_dev->skip_bus_pm) { + /* + * Either the device is a bridge with a child in D0 below it, or + * the function is running for the second time in a row without + * going through full resume, which is possible only during + * suspend-to-idle in a spurious wakeup case. The device should + * be in D0 at this point, but if it is a bridge, it may be + * necessary to save its state. + */ + if (!pci_dev->state_saved) + pci_save_state(pci_dev); + } else if (!pci_dev->state_saved) { pci_save_state(pci_dev); if (pci_power_manageable(pci_dev)) pci_prepare_to_sleep(pci_dev); @@ -836,6 +868,22 @@ static int pci_pm_suspend_noirq(struct device *dev) dev_dbg(dev, "PCI PM: Suspend power state: %s\n", pci_power_name(pci_dev->current_state)); + if (pci_dev->current_state == PCI_D0) { + pci_dev->skip_bus_pm = true; + /* + * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any + * downstream device is in D0, so avoid changing the power state + * of the parent bridge by setting the skip_bus_pm flag for it. + */ + if (pci_dev->bus->self) + pci_dev->bus->self->skip_bus_pm = true; + } + + if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) { + dev_dbg(dev, "PCI PM: Skipped\n"); + goto Fixup; + } + pci_pm_set_unknown_state(pci_dev); /* @@ -883,7 +931,16 @@ static int pci_pm_resume_noirq(struct device *dev) if (dev_pm_smart_suspend_and_suspended(dev)) pm_runtime_set_active(dev); - pci_pm_default_resume_early(pci_dev); + /* + * In the suspend-to-idle case, devices left in D0 during suspend will + * stay in D0, so it is not necessary to restore or update their + * configuration here and attempting to put them into D0 again is + * pointless, so avoid doing that. + */ + if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform())) + pci_pm_default_resume_early(pci_dev); + + pci_fixup_device(pci_fixup_resume_early, pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); @@ -957,15 +1014,15 @@ static int pci_pm_freeze(struct device *dev) } /* - * This used to be done in pci_pm_prepare() for all devices and some - * drivers may depend on it, so do it here. Ideally, runtime-suspended - * devices should not be touched during freeze/thaw transitions, - * however. + * Resume all runtime-suspended devices before creating a snapshot + * image of system memory, because the restore kernel generally cannot + * be expected to always handle them consistently and they need to be + * put into the runtime-active metastate during system resume anyway, + * so it is better to ensure that the state saved in the image will be + * always consistent with that. */ - if (!dev_pm_smart_suspend_and_suspended(dev)) { - pm_runtime_resume(dev); - pci_dev->state_saved = false; - } + pm_runtime_resume(dev); + pci_dev->state_saved = false; if (pm->freeze) { int error; @@ -979,22 +1036,11 @@ static int pci_pm_freeze(struct device *dev) return 0; } -static int pci_pm_freeze_late(struct device *dev) -{ - if (dev_pm_smart_suspend_and_suspended(dev)) - return 0; - - return pm_generic_freeze_late(dev); -} - static int pci_pm_freeze_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; - if (dev_pm_smart_suspend_and_suspended(dev)) - return 0; - if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend_late(dev, PMSG_FREEZE); @@ -1024,16 +1070,6 @@ static int pci_pm_thaw_noirq(struct device *dev) struct device_driver *drv = dev->driver; int error = 0; - /* - * If the device is in runtime suspend, the code below may not work - * correctly with it, so skip that code and make the PM core skip all of - * the subsequent "thaw" callbacks for the device. - */ - if (dev_pm_smart_suspend_and_suspended(dev)) { - dev_pm_skip_next_resume_phases(dev); - return 0; - } - if (pcibios_pm_ops.thaw_noirq) { error = pcibios_pm_ops.thaw_noirq(dev); if (error) @@ -1093,10 +1129,13 @@ static int pci_pm_poweroff(struct device *dev) /* The reason to do that is the same as in pci_pm_suspend(). */ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || - !pci_dev_keep_suspended(pci_dev)) + pci_dev_need_resume(pci_dev)) { pm_runtime_resume(dev); + pci_dev->state_saved = false; + } else { + pci_dev_adjust_pme(pci_dev); + } - pci_dev->state_saved = false; if (pm->poweroff) { int error; @@ -1168,10 +1207,6 @@ static int pci_pm_restore_noirq(struct device *dev) struct device_driver *drv = dev->driver; int error = 0; - /* This is analogous to the pci_pm_resume_noirq() case. */ - if (dev_pm_smart_suspend_and_suspended(dev)) - pm_runtime_set_active(dev); - if (pcibios_pm_ops.restore_noirq) { error = pcibios_pm_ops.restore_noirq(dev); if (error) @@ -1179,6 +1214,7 @@ static int pci_pm_restore_noirq(struct device *dev) } pci_pm_default_resume_early(pci_dev); + pci_fixup_device(pci_fixup_resume_early, pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); @@ -1220,7 +1256,6 @@ static int pci_pm_restore(struct device *dev) #else /* !CONFIG_HIBERNATE_CALLBACKS */ #define pci_pm_freeze NULL -#define pci_pm_freeze_late NULL #define pci_pm_freeze_noirq NULL #define pci_pm_thaw NULL #define pci_pm_thaw_noirq NULL @@ -1260,11 +1295,11 @@ static int pci_pm_runtime_suspend(struct device *dev) * log level. */ if (error == -EBUSY || error == -EAGAIN) { - dev_dbg(dev, "can't suspend now (%pf returned %d)\n", + dev_dbg(dev, "can't suspend now (%ps returned %d)\n", pm->runtime_suspend, error); return error; } else if (error) { - dev_err(dev, "can't suspend (%pf returned %d)\n", + dev_err(dev, "can't suspend (%ps returned %d)\n", pm->runtime_suspend, error); return error; } @@ -1276,7 +1311,7 @@ static int pci_pm_runtime_suspend(struct device *dev) && !pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, - "PCI PM: State of device not saved by %pF\n", + "PCI PM: State of device not saved by %pS\n", pm->runtime_suspend); return 0; } @@ -1346,7 +1381,6 @@ static const struct dev_pm_ops pci_dev_pm_ops = { .suspend_late = pci_pm_suspend_late, .resume = pci_pm_resume, .freeze = pci_pm_freeze, - .freeze_late = pci_pm_freeze_late, .thaw = pci_pm_thaw, .poweroff = pci_pm_poweroff, .poweroff_late = pci_pm_poweroff_late, diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c index 9795649fc6f9..ef293e735c55 100644 --- a/drivers/pci/pci-pf-stub.c +++ b/drivers/pci/pci-pf-stub.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* pci-pf-stub - simple stub driver for PCI SR-IOV PF device * - * This driver is meant to act as a "whitelist" for devices that provde + * This driver is meant to act as a "whitelist" for devices that provide * SR-IOV functionality while at the same time not actually needing a * driver of their own. */ diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index 66f8a59fadbd..e408099fea52 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c @@ -66,20 +66,18 @@ static int __init pci_stub_init(void) &class, &class_mask); if (fields < 2) { - printk(KERN_WARNING - "pci-stub: invalid id string \"%s\"\n", id); + pr_warn("pci-stub: invalid ID string \"%s\"\n", id); continue; } - printk(KERN_INFO - "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", + pr_info("pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", vendor, device, subvendor, subdevice, class, class_mask); rc = pci_add_dynid(&stub_driver, vendor, device, subvendor, subdevice, class, class_mask, 0); if (rc) - printk(KERN_WARNING - "pci-stub: failed to add dynamic id (%d)\n", rc); + pr_warn("pci-stub: failed to add dynamic ID (%d)\n", + rc); } return 0; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 25794c27c7a4..793412954529 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -182,6 +182,9 @@ static ssize_t current_link_speed_show(struct device *dev, return -EINVAL; switch (linkstat & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_32_0GB: + speed = "32 GT/s"; + break; case PCI_EXP_LNKSTA_CLS_16_0GB: speed = "16 GT/s"; break; @@ -461,9 +464,7 @@ static ssize_t dev_rescan_store(struct device *dev, } return count; } -static struct device_attribute dev_rescan_attr = __ATTR(rescan, - (S_IWUSR|S_IWGRP), - NULL, dev_rescan_store); +static DEVICE_ATTR_WO(dev_rescan); static ssize_t remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -477,13 +478,12 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr, pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); return count; } -static struct device_attribute dev_remove_attr = __ATTR(remove, - (S_IWUSR|S_IWGRP), - NULL, remove_store); +static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL, + remove_store); -static ssize_t dev_bus_rescan_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t bus_rescan_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { unsigned long val; struct pci_bus *bus = to_pci_bus(dev); @@ -501,7 +501,7 @@ static ssize_t dev_bus_rescan_store(struct device *dev, } return count; } -static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store); +static DEVICE_ATTR_WO(bus_rescan); #if defined(CONFIG_PM) && defined(CONFIG_ACPI) static ssize_t d3cold_allowed_store(struct device *dev, @@ -548,154 +548,6 @@ static ssize_t devspec_show(struct device *dev, static DEVICE_ATTR_RO(devspec); #endif -#ifdef CONFIG_PCI_IOV -static ssize_t sriov_totalvfs_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); -} - - -static ssize_t sriov_numvfs_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pdev->sriov->num_VFs); -} - -/* - * num_vfs > 0; number of VFs to enable - * num_vfs = 0; disable all VFs - * - * Note: SRIOV spec doesn't allow partial VF - * disable, so it's all or none. - */ -static ssize_t sriov_numvfs_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int ret; - u16 num_vfs; - - ret = kstrtou16(buf, 0, &num_vfs); - if (ret < 0) - return ret; - - if (num_vfs > pci_sriov_get_totalvfs(pdev)) - return -ERANGE; - - device_lock(&pdev->dev); - - if (num_vfs == pdev->sriov->num_VFs) - goto exit; - - /* is PF driver loaded w/callback */ - if (!pdev->driver || !pdev->driver->sriov_configure) { - pci_info(pdev, "Driver doesn't support SRIOV configuration via sysfs\n"); - ret = -ENOENT; - goto exit; - } - - if (num_vfs == 0) { - /* disable VFs */ - ret = pdev->driver->sriov_configure(pdev, 0); - goto exit; - } - - /* enable VFs */ - if (pdev->sriov->num_VFs) { - pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", - pdev->sriov->num_VFs, num_vfs); - ret = -EBUSY; - goto exit; - } - - ret = pdev->driver->sriov_configure(pdev, num_vfs); - if (ret < 0) - goto exit; - - if (ret != num_vfs) - pci_warn(pdev, "%d VFs requested; only %d enabled\n", - num_vfs, ret); - -exit: - device_unlock(&pdev->dev); - - if (ret < 0) - return ret; - - return count; -} - -static ssize_t sriov_offset_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pdev->sriov->offset); -} - -static ssize_t sriov_stride_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pdev->sriov->stride); -} - -static ssize_t sriov_vf_device_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%x\n", pdev->sriov->vf_device); -} - -static ssize_t sriov_drivers_autoprobe_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); -} - -static ssize_t sriov_drivers_autoprobe_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct pci_dev *pdev = to_pci_dev(dev); - bool drivers_autoprobe; - - if (kstrtobool(buf, &drivers_autoprobe) < 0) - return -EINVAL; - - pdev->sriov->drivers_autoprobe = drivers_autoprobe; - - return count; -} - -static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); -static struct device_attribute sriov_numvfs_attr = - __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), - sriov_numvfs_show, sriov_numvfs_store); -static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset); -static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride); -static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device); -static struct device_attribute sriov_drivers_autoprobe_attr = - __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP), - sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store); -#endif /* CONFIG_PCI_IOV */ - static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -789,7 +641,7 @@ static struct attribute *pcie_dev_attrs[] = { }; static struct attribute *pcibus_attrs[] = { - &dev_attr_rescan.attr, + &dev_attr_bus_rescan.attr, &dev_attr_cpuaffinity.attr, &dev_attr_cpulistaffinity.attr, NULL, @@ -817,7 +669,7 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, !!(pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)); } -static struct device_attribute vga_attr = __ATTR_RO(boot_vga); +static DEVICE_ATTR_RO(boot_vga); static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, @@ -903,6 +755,11 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, unsigned int size = count; loff_t init_off = off; u8 *data = (u8 *) buf; + int ret; + + ret = security_locked_down(LOCKDOWN_PCI_ACCESS); + if (ret) + return ret; if (off > dev->cfg_size) return 0; @@ -1082,7 +939,7 @@ void pci_create_legacy_files(struct pci_bus *b) sysfs_bin_attr_init(b->legacy_io); b->legacy_io->attr.name = "legacy_io"; b->legacy_io->size = 0xffff; - b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; + b->legacy_io->attr.mode = 0600; b->legacy_io->read = pci_read_legacy_io; b->legacy_io->write = pci_write_legacy_io; b->legacy_io->mmap = pci_mmap_legacy_io; @@ -1096,7 +953,7 @@ void pci_create_legacy_files(struct pci_bus *b) sysfs_bin_attr_init(b->legacy_mem); b->legacy_mem->attr.name = "legacy_mem"; b->legacy_mem->size = 1024*1024; - b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; + b->legacy_mem->attr.mode = 0600; b->legacy_mem->mmap = pci_mmap_legacy_mem; pci_adjust_legacy_attr(b, pci_mmap_mem); error = device_create_bin_file(&b->dev, b->legacy_mem); @@ -1111,8 +968,7 @@ legacy_io_err: kfree(b->legacy_io); b->legacy_io = NULL; kzalloc_err: - printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n"); - return; + dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n"); } void pci_remove_legacy_files(struct pci_bus *b) @@ -1165,6 +1021,11 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, int bar = (unsigned long)attr->private; enum pci_mmap_state mmap_type; struct resource *res = &pdev->resource[bar]; + int ret; + + ret = security_locked_down(LOCKDOWN_PCI_ACCESS); + if (ret) + return ret; if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) return -EINVAL; @@ -1241,6 +1102,12 @@ static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { + int ret; + + ret = security_locked_down(LOCKDOWN_PCI_ACCESS); + if (ret) + return ret; + return pci_resource_io(filp, kobj, attr, buf, off, count, true); } @@ -1304,7 +1171,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) } } res_attr->attr.name = res_attr_name; - res_attr->attr.mode = S_IRUSR | S_IWUSR; + res_attr->attr.mode = 0600; res_attr->size = pci_resource_len(pdev, num); res_attr->private = (void *)(unsigned long)num; retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); @@ -1417,7 +1284,7 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, static const struct bin_attribute pci_config_attr = { .attr = { .name = "config", - .mode = S_IRUGO | S_IWUSR, + .mode = 0644, }, .size = PCI_CFG_SPACE_SIZE, .read = pci_read_config, @@ -1427,7 +1294,7 @@ static const struct bin_attribute pci_config_attr = { static const struct bin_attribute pcie_config_attr = { .attr = { .name = "config", - .mode = S_IRUGO | S_IWUSR, + .mode = 0644, }, .size = PCI_CFG_SPACE_EXP_SIZE, .read = pci_read_config, @@ -1456,7 +1323,7 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr, return count; } -static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); +static DEVICE_ATTR(reset, 0200, NULL, reset_store); static int pci_create_capabilities_sysfs(struct pci_dev *dev) { @@ -1466,7 +1333,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) pcie_aspm_create_sysfs_dev_files(dev); if (dev->reset_fn) { - retval = device_create_file(&dev->dev, &reset_attr); + retval = device_create_file(&dev->dev, &dev_attr_reset); if (retval) goto error; } @@ -1509,7 +1376,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) sysfs_bin_attr_init(attr); attr->size = rom_size; attr->attr.name = "rom"; - attr->attr.mode = S_IRUSR | S_IWUSR; + attr->attr.mode = 0600; attr->read = pci_read_rom; attr->write = pci_write_rom; retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); @@ -1551,7 +1418,7 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev) pcie_vpd_remove_sysfs_dev_files(dev); pcie_aspm_remove_sysfs_dev_files(dev); if (dev->reset_fn) { - device_remove_file(&dev->dev, &reset_attr); + device_remove_file(&dev->dev, &dev_attr_reset); dev->reset_fn = 0; } } @@ -1604,7 +1471,7 @@ static int __init pci_sysfs_init(void) late_initcall(pci_sysfs_init); static struct attribute *pci_dev_dev_attrs[] = { - &vga_attr.attr, + &dev_attr_boot_vga.attr, NULL, }; @@ -1614,7 +1481,7 @@ static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); - if (a == &vga_attr.attr) + if (a == &dev_attr_boot_vga.attr) if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) return 0; @@ -1622,8 +1489,8 @@ static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, } static struct attribute *pci_dev_hp_attrs[] = { - &dev_remove_attr.attr, - &dev_rescan_attr.attr, + &dev_attr_remove.attr, + &dev_attr_dev_rescan.attr, NULL, }; @@ -1695,34 +1562,6 @@ static const struct attribute_group pci_dev_hp_attr_group = { .is_visible = pci_dev_hp_attrs_are_visible, }; -#ifdef CONFIG_PCI_IOV -static struct attribute *sriov_dev_attrs[] = { - &sriov_totalvfs_attr.attr, - &sriov_numvfs_attr.attr, - &sriov_offset_attr.attr, - &sriov_stride_attr.attr, - &sriov_vf_device_attr.attr, - &sriov_drivers_autoprobe_attr.attr, - NULL, -}; - -static umode_t sriov_attrs_are_visible(struct kobject *kobj, - struct attribute *a, int n) -{ - struct device *dev = kobj_to_dev(kobj); - - if (!dev_is_pf(dev)) - return 0; - - return a->mode; -} - -static const struct attribute_group sriov_dev_attr_group = { - .attrs = sriov_dev_attrs, - .is_visible = sriov_attrs_are_visible, -}; -#endif /* CONFIG_PCI_IOV */ - static const struct attribute_group pci_dev_attr_group = { .attrs = pci_dev_dev_attrs, .is_visible = pci_dev_attrs_are_visible, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7c1b362f599a..fcfaadc774ee 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -197,8 +197,8 @@ EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar); /** * pci_dev_str_match_path - test if a path string matches a device - * @dev: the PCI device to test - * @path: string to match the device against + * @dev: the PCI device to test + * @path: string to match the device against * @endptr: pointer to the string after the match * * Test if a string (typically from a kernel parameter) formatted as a @@ -280,8 +280,8 @@ free_and_exit: /** * pci_dev_str_match - test if a string matches a device - * @dev: the PCI device to test - * @p: string to match the device against + * @dev: the PCI device to test + * @p: string to match the device against * @endptr: pointer to the string after the match * * Test if a string (typically from a kernel parameter) matches a specified @@ -341,7 +341,7 @@ static int pci_dev_str_match(struct pci_dev *dev, const char *p, } else { /* * PCI Bus, Device, Function IDs are specified - * (optionally, may include a path of devfns following it) + * (optionally, may include a path of devfns following it) */ ret = pci_dev_str_match_path(dev, p, &p); if (ret < 0) @@ -425,7 +425,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus, * Tell if a device supports a given PCI capability. * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not - * support it. Possible values for @cap: + * support it. Possible values for @cap include: * * %PCI_CAP_ID_PM Power Management * %PCI_CAP_ID_AGP Accelerated Graphics Port @@ -450,11 +450,11 @@ EXPORT_SYMBOL(pci_find_capability); /** * pci_bus_find_capability - query for devices' capabilities - * @bus: the PCI bus to query + * @bus: the PCI bus to query * @devfn: PCI device to query - * @cap: capability code + * @cap: capability code * - * Like pci_find_capability() but works for pci devices that do not have a + * Like pci_find_capability() but works for PCI devices that do not have a * pci_dev structure set up yet. * * Returns the address of the requested capability structure within the @@ -535,7 +535,7 @@ EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); * * Returns the address of the requested extended capability structure * within the device's PCI configuration space or 0 if the device does - * not support it. Possible values for @cap: + * not support it. Possible values for @cap include: * * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting * %PCI_EXT_CAP_ID_VC Virtual Channel @@ -618,12 +618,13 @@ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) EXPORT_SYMBOL_GPL(pci_find_ht_capability); /** - * pci_find_parent_resource - return resource region of parent bus of given region + * pci_find_parent_resource - return resource region of parent bus of given + * region * @dev: PCI device structure contains resources to be searched * @res: child resource record for which parent is sought * - * For given resource region of given device, return the resource - * region of parent bus the given region is contained in. + * For given resource region of given device, return the resource region of + * parent bus the given region is contained in. */ struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) @@ -776,6 +777,12 @@ static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev) return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN; } +static inline void platform_pci_refresh_power_state(struct pci_dev *dev) +{ + if (pci_platform_pm && pci_platform_pm->refresh_state) + pci_platform_pm->refresh_state(dev); +} + static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) { return pci_platform_pm ? @@ -800,7 +807,7 @@ static inline bool platform_pci_bridge_d3(struct pci_dev *dev) /** * pci_raw_set_power_state - Use PCI PM registers to set the power state of - * given PCI device + * given PCI device * @dev: PCI device to handle. * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. * @@ -826,7 +833,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) if (state < PCI_D0 || state > PCI_D3hot) return -EINVAL; - /* Validate current state: + /* + * Validate current state: * Can enter D0 from any state, but if we can only go deeper * to sleep if we're already in a low power state */ @@ -837,14 +845,15 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) return -EINVAL; } - /* check if this device supports the desired state */ + /* Check if this device supports the desired state */ if ((state == PCI_D1 && !dev->d1_support) || (state == PCI_D2 && !dev->d2_support)) return -EIO; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - /* If we're (effectively) in D3, force entire word to 0. + /* + * If we're (effectively) in D3, force entire word to 0. * This doesn't affect PME_Status, disables PME_En, and * sets PowerState to 0. */ @@ -867,11 +876,13 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) break; } - /* enter specified state */ + /* Enter specified state */ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); - /* Mandatory power management transition delays */ - /* see PCI PM 1.1 5.6.1 table 18 */ + /* + * Mandatory power management transition delays; see PCI PM 1.1 + * 5.6.1 table 18 + */ if (state == PCI_D3hot || dev->current_state == PCI_D3hot) pci_dev_d3_sleep(dev); else if (state == PCI_D2 || dev->current_state == PCI_D2) @@ -879,8 +890,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); - if (dev->current_state != state && printk_ratelimit()) - pci_info(dev, "Refused to change power state, currently in D%d\n", + if (dev->current_state != state) + pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n", dev->current_state); /* @@ -933,16 +944,18 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state) } /** - * pci_power_up - Put the given device into D0 forcibly - * @dev: PCI device to power up + * pci_refresh_power_state - Refresh the given device's power state data + * @dev: Target PCI device. + * + * Ask the platform to refresh the devices power state information and invoke + * pci_update_current_state() to update its current PCI power state. */ -void pci_power_up(struct pci_dev *dev) +void pci_refresh_power_state(struct pci_dev *dev) { if (platform_pci_power_manageable(dev)) - platform_pci_set_power_state(dev, PCI_D0); + platform_pci_refresh_power_state(dev); - pci_raw_set_power_state(dev, PCI_D0); - pci_update_current_state(dev, PCI_D0); + pci_update_current_state(dev, dev->current_state); } /** @@ -1085,16 +1098,18 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { int error; - /* bound the state we're entering */ + /* Bound the state we're entering */ if (state > PCI_D3cold) state = PCI_D3cold; else if (state < PCI_D0) state = PCI_D0; else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) + /* - * If the device or the parent bridge do not support PCI PM, - * ignore the request if we're doing anything other than putting - * it into D0 (which would only happen on boot). + * If the device or the parent bridge do not support PCI + * PM, ignore the request if we're doing anything other + * than putting it into D0 (which would only happen on + * boot). */ return 0; @@ -1104,8 +1119,10 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) __pci_start_power_transition(dev, state); - /* This device is quirked not to be put into D3, so - don't put it in D3 */ + /* + * This device is quirked not to be put into D3, so don't put it in + * D3 + */ if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) return 0; @@ -1124,15 +1141,25 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) EXPORT_SYMBOL(pci_set_power_state); /** + * pci_power_up - Put the given device into D0 forcibly + * @dev: PCI device to power up + */ +void pci_power_up(struct pci_dev *dev) +{ + __pci_start_power_transition(dev, PCI_D0); + pci_raw_set_power_state(dev, PCI_D0); + pci_update_current_state(dev, PCI_D0); +} + +/** * pci_choose_state - Choose the power state of a PCI device * @dev: PCI device to be suspended * @state: target sleep state for the whole system. This is the value - * that is passed to suspend() function. + * that is passed to suspend() function. * * Returns PCI power state suitable for given device and given system * message. */ - pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { pci_power_t ret; @@ -1310,8 +1337,9 @@ static void pci_restore_ltr_state(struct pci_dev *dev) } /** - * pci_save_state - save the PCI configuration space of a device before suspending - * @dev: - PCI device that we're dealing with + * pci_save_state - save the PCI configuration space of a device before + * suspending + * @dev: PCI device that we're dealing with */ int pci_save_state(struct pci_dev *dev) { @@ -1413,7 +1441,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev) pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX; res = pdev->resource + bar_idx; - size = order_base_2((resource_size(res) >> 20) | 1) - 1; + size = ilog2(resource_size(res)) - 20; ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE; ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT; pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl); @@ -1422,7 +1450,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev) /** * pci_restore_state - Restore the saved state of a PCI device - * @dev: - PCI device that we're dealing with + * @dev: PCI device that we're dealing with */ void pci_restore_state(struct pci_dev *dev) { @@ -1599,8 +1627,8 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) * pci_reenable_device - Resume abandoned device * @dev: PCI device to be resumed * - * Note this function is a backend of pci_default_resume and is not supposed - * to be called by normal code, write proper resume handler and use it instead. + * NOTE: This function is a backend of pci_default_resume() and is not supposed + * to be called by normal code, write proper resume handler and use it instead. */ int pci_reenable_device(struct pci_dev *dev) { @@ -1675,9 +1703,9 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) * pci_enable_device_io - Initialize a device for use with IO space * @dev: PCI device to be initialized * - * Initialize device before it's used by a driver. Ask low-level code - * to enable I/O resources. Wake up the device if it was suspended. - * Beware, this function can fail. + * Initialize device before it's used by a driver. Ask low-level code + * to enable I/O resources. Wake up the device if it was suspended. + * Beware, this function can fail. */ int pci_enable_device_io(struct pci_dev *dev) { @@ -1689,9 +1717,9 @@ EXPORT_SYMBOL(pci_enable_device_io); * pci_enable_device_mem - Initialize a device for use with Memory space * @dev: PCI device to be initialized * - * Initialize device before it's used by a driver. Ask low-level code - * to enable Memory resources. Wake up the device if it was suspended. - * Beware, this function can fail. + * Initialize device before it's used by a driver. Ask low-level code + * to enable Memory resources. Wake up the device if it was suspended. + * Beware, this function can fail. */ int pci_enable_device_mem(struct pci_dev *dev) { @@ -1703,12 +1731,12 @@ EXPORT_SYMBOL(pci_enable_device_mem); * pci_enable_device - Initialize device before it's used by a driver. * @dev: PCI device to be initialized * - * Initialize device before it's used by a driver. Ask low-level code - * to enable I/O and memory. Wake up the device if it was suspended. - * Beware, this function can fail. + * Initialize device before it's used by a driver. Ask low-level code + * to enable I/O and memory. Wake up the device if it was suspended. + * Beware, this function can fail. * - * Note we don't actually enable the device many times if we call - * this function repeatedly (we just increment the count). + * Note we don't actually enable the device many times if we call + * this function repeatedly (we just increment the count). */ int pci_enable_device(struct pci_dev *dev) { @@ -1717,8 +1745,8 @@ int pci_enable_device(struct pci_dev *dev) EXPORT_SYMBOL(pci_enable_device); /* - * Managed PCI resources. This manages device on/off, intx/msi/msix - * on/off and BAR regions. pci_dev itself records msi/msix status, so + * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X + * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so * there's no need to track it separately. pci_devres is initialized * when a device is enabled using managed PCI device enable interface. */ @@ -1836,7 +1864,8 @@ int __weak pcibios_add_device(struct pci_dev *dev) } /** - * pcibios_release_device - provide arch specific hooks when releasing device dev + * pcibios_release_device - provide arch specific hooks when releasing + * device dev * @dev: the PCI device being released * * Permits the platform to provide architecture specific functionality when @@ -1927,8 +1956,7 @@ EXPORT_SYMBOL(pci_disable_device); * @dev: the PCIe device reset * @state: Reset state to enter into * - * - * Sets the PCIe reset state for the device. This is the default + * Set the PCIe reset state for the device. This is the default * implementation. Architecture implementations can override this. */ int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, @@ -1942,7 +1970,6 @@ int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, * @dev: the PCIe device reset * @state: Reset state to enter into * - * * Sets the PCI reset state for the device. */ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) @@ -2057,6 +2084,13 @@ static void pci_pme_list_scan(struct work_struct *work) */ if (bridge && bridge->current_state != PCI_D0) continue; + /* + * If the device is in D3cold it should not be + * polled either. + */ + if (pme_dev->dev->current_state == PCI_D3cold) + continue; + pci_pme_wakeup(pme_dev->dev, NULL); } else { list_del(&pme_dev->list); @@ -2339,7 +2373,8 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) } /** - * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state + * pci_prepare_to_sleep - prepare PCI device for system-wide transition + * into a sleep state * @dev: Device to handle. * * Choose the power state appropriate for the device depending on whether @@ -2367,7 +2402,8 @@ int pci_prepare_to_sleep(struct pci_dev *dev) EXPORT_SYMBOL(pci_prepare_to_sleep); /** - * pci_back_from_sleep - turn PCI device on during system-wide transition into working state + * pci_back_from_sleep - turn PCI device on during system-wide transition + * into working state * @dev: Device to handle. * * Disable device's system wake-up capability and put it into D0. @@ -2449,45 +2485,56 @@ bool pci_dev_run_wake(struct pci_dev *dev) EXPORT_SYMBOL_GPL(pci_dev_run_wake); /** - * pci_dev_keep_suspended - Check if the device can stay in the suspended state. + * pci_dev_need_resume - Check if it is necessary to resume the device. * @pci_dev: Device to check. * - * Return 'true' if the device is runtime-suspended, it doesn't have to be + * Return 'true' if the device is not runtime-suspended or it has to be * reconfigured due to wakeup settings difference between system and runtime - * suspend and the current power state of it is suitable for the upcoming - * (system) transition. - * - * If the device is not configured for system wakeup, disable PME for it before - * returning 'true' to prevent it from waking up the system unnecessarily. + * suspend, or the current power state of it is not suitable for the upcoming + * (system-wide) transition. */ -bool pci_dev_keep_suspended(struct pci_dev *pci_dev) +bool pci_dev_need_resume(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; - bool wakeup = device_may_wakeup(dev); + pci_power_t target_state; - if (!pm_runtime_suspended(dev) - || pci_target_state(pci_dev, wakeup) != pci_dev->current_state - || platform_pci_need_resume(pci_dev)) - return false; + if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev)) + return true; + + target_state = pci_target_state(pci_dev, device_may_wakeup(dev)); /* - * At this point the device is good to go unless it's been configured - * to generate PME at the runtime suspend time, but it is not supposed - * to wake up the system. In that case, simply disable PME for it - * (it will have to be re-enabled on exit from system resume). - * - * If the device's power state is D3cold and the platform check above - * hasn't triggered, the device's configuration is suitable and we don't - * need to manipulate it at all. + * If the earlier platform check has not triggered, D3cold is just power + * removal on top of D3hot, so no need to resume the device in that + * case. */ + return target_state != pci_dev->current_state && + target_state != PCI_D3cold && + pci_dev->current_state != PCI_D3hot; +} + +/** + * pci_dev_adjust_pme - Adjust PME setting for a suspended device. + * @pci_dev: Device to check. + * + * If the device is suspended and it is not configured for system wakeup, + * disable PME for it to prevent it from waking up the system unnecessarily. + * + * Note that if the device's power state is D3cold and the platform check in + * pci_dev_need_resume() has not triggered, the device's configuration need not + * be changed. + */ +void pci_dev_adjust_pme(struct pci_dev *pci_dev) +{ + struct device *dev = &pci_dev->dev; + spin_lock_irq(&dev->power.lock); - if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold && - !wakeup) + if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) && + pci_dev->current_state < PCI_D3cold) __pci_pme_active(pci_dev, false); spin_unlock_irq(&dev->power.lock); - return true; } /** @@ -2777,14 +2824,14 @@ void pci_pm_init(struct pci_dev *dev) dev->d2_support = true; if (dev->d1_support || dev->d2_support) - pci_printk(KERN_DEBUG, dev, "supports%s%s\n", + pci_info(dev, "supports%s%s\n", dev->d1_support ? " D1" : "", dev->d2_support ? " D2" : ""); } pmc &= PCI_PM_CAP_PME_MASK; if (pmc) { - pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n", + pci_info(dev, "PME# supported from%s%s%s%s%s\n", (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", @@ -2952,16 +2999,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset) res->flags = flags; if (bei <= PCI_EA_BEI_BAR5) - pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", bei, res, prop); else if (bei == PCI_EA_BEI_ROM) - pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", res, prop); else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) - pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", bei - PCI_EA_BEI_VF_BAR0, res, prop); else - pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", bei, res, prop); out: @@ -3005,7 +3052,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev, /** * _pci_add_cap_save_buffer - allocate buffer for saving given - * capability registers + * capability registers * @dev: the PCI device * @cap: the capability to allocate the buffer for * @extended: Standard or Extended capability ID @@ -3186,7 +3233,7 @@ static void pci_disable_acs_redir(struct pci_dev *dev) } /** - * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites + * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities * @dev: the PCI device */ static void pci_std_enable_acs(struct pci_dev *dev) @@ -3532,7 +3579,7 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask) } /* Ensure upstream ports don't block AtomicOps on egress */ - if (!bridge->has_secondary_link) { + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) { pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2); if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) @@ -3609,13 +3656,14 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) EXPORT_SYMBOL_GPL(pci_common_swizzle); /** - * pci_release_region - Release a PCI bar - * @pdev: PCI device whose resources were previously reserved by pci_request_region - * @bar: BAR to release + * pci_release_region - Release a PCI bar + * @pdev: PCI device whose resources were previously reserved by + * pci_request_region() + * @bar: BAR to release * - * Releases the PCI I/O and memory resources previously reserved by a - * successful call to pci_request_region. Call this function only - * after all use of the PCI regions has ceased. + * Releases the PCI I/O and memory resources previously reserved by a + * successful call to pci_request_region(). Call this function only + * after all use of the PCI regions has ceased. */ void pci_release_region(struct pci_dev *pdev, int bar) { @@ -3637,23 +3685,23 @@ void pci_release_region(struct pci_dev *pdev, int bar) EXPORT_SYMBOL(pci_release_region); /** - * __pci_request_region - Reserved PCI I/O and memory resource - * @pdev: PCI device whose resources are to be reserved - * @bar: BAR to be reserved - * @res_name: Name to be associated with resource. - * @exclusive: whether the region access is exclusive or not + * __pci_request_region - Reserved PCI I/O and memory resource + * @pdev: PCI device whose resources are to be reserved + * @bar: BAR to be reserved + * @res_name: Name to be associated with resource. + * @exclusive: whether the region access is exclusive or not * - * Mark the PCI region associated with PCI device @pdev BR @bar as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. + * Mark the PCI region associated with PCI device @pdev BAR @bar as + * being reserved by owner @res_name. Do not access any + * address inside the PCI regions unless this call returns + * successfully. * - * If @exclusive is set, then the region is marked so that userspace - * is explicitly not allowed to map the resource via /dev/mem or - * sysfs MMIO access. + * If @exclusive is set, then the region is marked so that userspace + * is explicitly not allowed to map the resource via /dev/mem or + * sysfs MMIO access. * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. */ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, int exclusive) @@ -3687,18 +3735,18 @@ err_out: } /** - * pci_request_region - Reserve PCI I/O and memory resource - * @pdev: PCI device whose resources are to be reserved - * @bar: BAR to be reserved - * @res_name: Name to be associated with resource + * pci_request_region - Reserve PCI I/O and memory resource + * @pdev: PCI device whose resources are to be reserved + * @bar: BAR to be reserved + * @res_name: Name to be associated with resource * - * Mark the PCI region associated with PCI device @pdev BAR @bar as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. + * Mark the PCI region associated with PCI device @pdev BAR @bar as + * being reserved by owner @res_name. Do not access any + * address inside the PCI regions unless this call returns + * successfully. * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. */ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) { @@ -3707,31 +3755,6 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) EXPORT_SYMBOL(pci_request_region); /** - * pci_request_region_exclusive - Reserved PCI I/O and memory resource - * @pdev: PCI device whose resources are to be reserved - * @bar: BAR to be reserved - * @res_name: Name to be associated with resource. - * - * Mark the PCI region associated with PCI device @pdev BR @bar as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. - * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. - * - * The key difference that _exclusive makes it that userspace is - * explicitly not allowed to map the resource via /dev/mem or - * sysfs. - */ -int pci_request_region_exclusive(struct pci_dev *pdev, int bar, - const char *res_name) -{ - return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); -} -EXPORT_SYMBOL(pci_request_region_exclusive); - -/** * pci_release_selected_regions - Release selected PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved * @bars: Bitmask of BARs to be released @@ -3791,12 +3814,13 @@ int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars, EXPORT_SYMBOL(pci_request_selected_regions_exclusive); /** - * pci_release_regions - Release reserved PCI I/O and memory resources - * @pdev: PCI device whose resources were previously reserved by pci_request_regions + * pci_release_regions - Release reserved PCI I/O and memory resources + * @pdev: PCI device whose resources were previously reserved by + * pci_request_regions() * - * Releases all PCI I/O and memory resources previously reserved by a - * successful call to pci_request_regions. Call this function only - * after all use of the PCI regions has ceased. + * Releases all PCI I/O and memory resources previously reserved by a + * successful call to pci_request_regions(). Call this function only + * after all use of the PCI regions has ceased. */ void pci_release_regions(struct pci_dev *pdev) @@ -3806,17 +3830,17 @@ void pci_release_regions(struct pci_dev *pdev) EXPORT_SYMBOL(pci_release_regions); /** - * pci_request_regions - Reserved PCI I/O and memory resources - * @pdev: PCI device whose resources are to be reserved - * @res_name: Name to be associated with resource. + * pci_request_regions - Reserve PCI I/O and memory resources + * @pdev: PCI device whose resources are to be reserved + * @res_name: Name to be associated with resource. * - * Mark all PCI regions associated with PCI device @pdev as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. + * Mark all PCI regions associated with PCI device @pdev as + * being reserved by owner @res_name. Do not access any + * address inside the PCI regions unless this call returns + * successfully. * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. */ int pci_request_regions(struct pci_dev *pdev, const char *res_name) { @@ -3825,20 +3849,19 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name) EXPORT_SYMBOL(pci_request_regions); /** - * pci_request_regions_exclusive - Reserved PCI I/O and memory resources - * @pdev: PCI device whose resources are to be reserved - * @res_name: Name to be associated with resource. + * pci_request_regions_exclusive - Reserve PCI I/O and memory resources + * @pdev: PCI device whose resources are to be reserved + * @res_name: Name to be associated with resource. * - * Mark all PCI regions associated with PCI device @pdev as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. + * Mark all PCI regions associated with PCI device @pdev as being reserved + * by owner @res_name. Do not access any address inside the PCI regions + * unless this call returns successfully. * - * pci_request_regions_exclusive() will mark the region so that - * /dev/mem and the sysfs MMIO access will not be allowed. + * pci_request_regions_exclusive() will mark the region so that /dev/mem + * and the sysfs MMIO access will not be allowed. * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. + * Returns 0 on success, or %EBUSY on error. A warning message is also + * printed on failure. */ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) { @@ -3849,7 +3872,7 @@ EXPORT_SYMBOL(pci_request_regions_exclusive); /* * Record the PCI IO range (expressed as CPU physical address + size). - * Return a negative value if an error has occured, zero otherwise + * Return a negative value if an error has occurred, zero otherwise */ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, resource_size_t size) @@ -3905,14 +3928,14 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address) } /** - * pci_remap_iospace - Remap the memory mapped I/O space - * @res: Resource describing the I/O space - * @phys_addr: physical address of range to be mapped + * pci_remap_iospace - Remap the memory mapped I/O space + * @res: Resource describing the I/O space + * @phys_addr: physical address of range to be mapped * - * Remap the memory mapped I/O space described by the @res - * and the CPU physical address @phys_addr into virtual address space. - * Only architectures that have memory mapped IO functions defined - * (and the PCI_IOBASE value defined) should call this function. + * Remap the memory mapped I/O space described by the @res and the CPU + * physical address @phys_addr into virtual address space. Only + * architectures that have memory mapped IO functions defined (and the + * PCI_IOBASE value defined) should call this function. */ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) { @@ -3928,8 +3951,10 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, pgprot_device(PAGE_KERNEL)); #else - /* this architecture does not have memory mapped I/O space, - so this function should never be called */ + /* + * This architecture does not have memory mapped I/O space, + * so this function should never be called + */ WARN_ONCE(1, "This architecture does not support memory mapped I/O\n"); return -ENODEV; #endif @@ -3937,12 +3962,12 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) EXPORT_SYMBOL(pci_remap_iospace); /** - * pci_unmap_iospace - Unmap the memory mapped I/O space - * @res: resource to be unmapped + * pci_unmap_iospace - Unmap the memory mapped I/O space + * @res: resource to be unmapped * - * Unmap the CPU virtual address @res from virtual address space. - * Only architectures that have memory mapped IO functions defined - * (and the PCI_IOBASE value defined) should call this function. + * Unmap the CPU virtual address @res from virtual address space. Only + * architectures that have memory mapped IO functions defined (and the + * PCI_IOBASE value defined) should call this function. */ void pci_unmap_iospace(struct resource *res) { @@ -4185,7 +4210,7 @@ int pci_set_cacheline_size(struct pci_dev *dev) if (cacheline_size == pci_cache_line_size) return 0; - pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n", + pci_info(dev, "cache line size of %d is not supported\n", pci_cache_line_size << 2); return -EINVAL; @@ -4288,7 +4313,7 @@ EXPORT_SYMBOL(pci_clear_mwi); * @pdev: the PCI device to operate on * @enable: boolean: whether to enable or disable PCI INTx * - * Enables/disables PCI INTx for device dev + * Enables/disables PCI INTx for device @pdev */ void pci_intx(struct pci_dev *pdev, int enable) { @@ -4364,9 +4389,8 @@ done: * pci_check_and_mask_intx - mask INTx on pending interrupt * @dev: the PCI device to operate on * - * Check if the device dev has its INTx line asserted, mask it and - * return true in that case. False is returned if no interrupt was - * pending. + * Check if the device dev has its INTx line asserted, mask it and return + * true in that case. False is returned if no interrupt was pending. */ bool pci_check_and_mask_intx(struct pci_dev *dev) { @@ -4378,9 +4402,9 @@ EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending * @dev: the PCI device to operate on * - * Check if the device dev has its INTx line asserted, unmask it if not - * and return true. False is returned and the mask remains active if - * there was still an interrupt pending. + * Check if the device dev has its INTx line asserted, unmask it if not and + * return true. False is returned and the mask remains active if there was + * still an interrupt pending. */ bool pci_check_and_unmask_intx(struct pci_dev *dev) { @@ -4389,7 +4413,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev) EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); /** - * pci_wait_for_pending_transaction - waits for pending transaction + * pci_wait_for_pending_transaction - wait for pending transaction * @dev: the PCI device to operate on * * Return 0 if transaction is pending 1 otherwise. @@ -4447,7 +4471,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) /** * pcie_has_flr - check if a device supports function level resets - * @dev: device to check + * @dev: device to check * * Returns true if the device advertises support for PCIe function level * resets. @@ -4466,7 +4490,7 @@ EXPORT_SYMBOL_GPL(pcie_has_flr); /** * pcie_flr - initiate a PCIe function level reset - * @dev: device to reset + * @dev: device to reset * * Initiate a function level reset on @dev. The caller should ensure the * device supports FLR before calling this function, e.g. by using the @@ -4514,7 +4538,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe) /* * Wait for Transaction Pending bit to clear. A word-aligned test - * is used, so we use the conrol offset rather than status and shift + * is used, so we use the control offset rather than status and shift * the test bit to match. */ if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL, @@ -4810,6 +4834,7 @@ static void pci_dev_restore(struct pci_dev *dev) * * The device function is presumed to be unused and the caller is holding * the device mutex lock when this function is called. + * * Resetting the device will make the contents of PCI configuration space * random, so any caller of this must be prepared to reinitialise the * device including MSI, bus mastering, BARs, decoding IO and memory spaces, @@ -5373,8 +5398,8 @@ EXPORT_SYMBOL_GPL(pci_reset_bus); * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count * @dev: PCI device to query * - * Returns mmrbc: maximum designed memory read count in bytes - * or appropriate error value. + * Returns mmrbc: maximum designed memory read count in bytes or + * appropriate error value. */ int pcix_get_max_mmrbc(struct pci_dev *dev) { @@ -5396,8 +5421,8 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc); * pcix_get_mmrbc - get PCI-X maximum memory read byte count * @dev: PCI device to query * - * Returns mmrbc: maximum memory read count in bytes - * or appropriate error value. + * Returns mmrbc: maximum memory read count in bytes or appropriate error + * value. */ int pcix_get_mmrbc(struct pci_dev *dev) { @@ -5421,7 +5446,7 @@ EXPORT_SYMBOL(pcix_get_mmrbc); * @mmrbc: maximum memory read count in bytes * valid values are 512, 1024, 2048, 4096 * - * If possible sets maximum memory read byte count, some bridges have erratas + * If possible sets maximum memory read byte count, some bridges have errata * that prevent this. */ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) @@ -5466,8 +5491,7 @@ EXPORT_SYMBOL(pcix_set_mmrbc); * pcie_get_readrq - get PCI Express read request size * @dev: PCI device to query * - * Returns maximum memory read request in bytes - * or appropriate error value. + * Returns maximum memory read request in bytes or appropriate error value. */ int pcie_get_readrq(struct pci_dev *dev) { @@ -5495,10 +5519,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) return -EINVAL; /* - * If using the "performance" PCIe config, we clamp the - * read rq size to the max packet size to prevent the - * host bridge generating requests larger than we can - * cope with + * If using the "performance" PCIe config, we clamp the read rq + * size to the max packet size to keep the host bridge from + * generating requests larger than we can cope with. */ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { int mps = pcie_get_mps(dev); @@ -5635,7 +5658,9 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); if (lnkcap2) { /* PCIe r3.0-compliant */ - if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB) + return PCIE_SPEED_32_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) return PCIE_SPEED_16_0GT; else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) return PCIE_SPEED_8_0GT; @@ -5829,6 +5854,24 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, return 0; } +#ifdef CONFIG_ACPI +bool pci_pr3_present(struct pci_dev *pdev) +{ + struct acpi_device *adev; + + if (acpi_disabled) + return false; + + adev = ACPI_COMPANION(&pdev->dev); + if (!adev) + return false; + + return adev->power.flags.power_resources && + acpi_has_method(adev->handle, "_PR3"); +} +EXPORT_SYMBOL_GPL(pci_pr3_present); +#endif + /** * pci_add_dma_alias - Add a DMA devfn alias for a device * @dev: the PCI device for which alias is added @@ -5896,8 +5939,19 @@ resource_size_t __weak pcibios_default_alignment(void) return 0; } -#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE -static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; +/* + * Arches that don't want to expose struct resource to userland as-is in + * sysfs and /proc can implement their own pci_resource_to_user(). + */ +void __weak pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + resource_size_t *start, resource_size_t *end) +{ + *start = rsrc->start; + *end = rsrc->end; +} + +static char *resource_alignment_param; static DEFINE_SPINLOCK(resource_alignment_lock); /** @@ -5918,7 +5972,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, spin_lock(&resource_alignment_lock); p = resource_alignment_param; - if (!*p && !align) + if (!p || !*p) goto out; if (pci_has_flag(PCI_PROBE_ONLY)) { align = 0; @@ -6082,35 +6136,41 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) } } -static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) +static ssize_t resource_alignment_show(struct bus_type *bus, char *buf) { - if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) - count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; - spin_lock(&resource_alignment_lock); - strncpy(resource_alignment_param, buf, count); - resource_alignment_param[count] = '\0'; - spin_unlock(&resource_alignment_lock); - return count; -} + size_t count = 0; -static ssize_t pci_get_resource_alignment_param(char *buf, size_t size) -{ - size_t count; spin_lock(&resource_alignment_lock); - count = snprintf(buf, size, "%s", resource_alignment_param); + if (resource_alignment_param) + count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param); spin_unlock(&resource_alignment_lock); - return count; -} -static ssize_t resource_alignment_show(struct bus_type *bus, char *buf) -{ - return pci_get_resource_alignment_param(buf, PAGE_SIZE); + /* + * When set by the command line, resource_alignment_param will not + * have a trailing line feed, which is ugly. So conditionally add + * it here. + */ + if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) { + buf[count - 1] = '\n'; + buf[count++] = 0; + } + + return count; } static ssize_t resource_alignment_store(struct bus_type *bus, const char *buf, size_t count) { - return pci_set_resource_alignment_param(buf, count); + char *param = kstrndup(buf, count, GFP_KERNEL); + + if (!param) + return -ENOMEM; + + spin_lock(&resource_alignment_lock); + kfree(resource_alignment_param); + resource_alignment_param = param; + spin_unlock(&resource_alignment_lock); + return count; } static BUS_ATTR_RW(resource_alignment); @@ -6144,6 +6204,7 @@ static int of_pci_bus_find_domain_nr(struct device *parent) if (parent) domain = of_get_pci_domain_nr(parent->of_node); + /* * Check DT domain and use_dt_domains values. * @@ -6238,8 +6299,7 @@ static int __init pci_setup(char *str) } else if (!strncmp(str, "cbmemsize=", 10)) { pci_cardbus_mem_size = memparse(str + 10, &str); } else if (!strncmp(str, "resource_alignment=", 19)) { - pci_set_resource_alignment_param(str + 19, - strlen(str + 19)); + resource_alignment_param = str + 19; } else if (!strncmp(str, "ecrc=", 5)) { pcie_ecrc_get_policy(str + 5); } else if (!strncmp(str, "hpiosize=", 9)) { @@ -6262,11 +6322,9 @@ static int __init pci_setup(char *str) } else if (!strncmp(str, "pcie_scan_all", 13)) { pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); } else if (!strncmp(str, "disable_acs_redir=", 18)) { - disable_acs_redir_param = - kstrdup(str + 18, GFP_KERNEL); + disable_acs_redir_param = str + 18; } else { - printk(KERN_ERR "PCI: Unknown option `%s'\n", - str); + pr_err("PCI: Unknown option `%s'\n", str); } } str = k; @@ -6274,3 +6332,22 @@ static int __init pci_setup(char *str) return 0; } early_param("pci", pci_setup); + +/* + * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized + * in pci_setup(), above, to point to data in the __initdata section which + * will be freed after the init sequence is complete. We can't allocate memory + * in pci_setup() because some architectures do not have any memory allocation + * service available during an early_param() call. So we allocate memory and + * copy the variable here before the init section is freed. + * + */ +static int __init pci_realloc_setup_params(void) +{ + resource_alignment_param = kstrdup(resource_alignment_param, + GFP_KERNEL); + disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); + + return 0; +} +pure_initcall(pci_realloc_setup_params); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d994839a3e24..3f6947ee3324 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -39,6 +39,11 @@ int pci_probe_reset_function(struct pci_dev *dev); int pci_bridge_secondary_bus_reset(struct pci_dev *dev); int pci_bus_error_reset(struct pci_dev *dev); +#define PCI_PM_D2_DELAY 200 +#define PCI_PM_D3_WAIT 10 +#define PCI_PM_D3COLD_WAIT 100 +#define PCI_PM_BUS_WAIT 50 + /** * struct pci_platform_pm_ops - Firmware PM callbacks * @@ -51,6 +56,8 @@ int pci_bus_error_reset(struct pci_dev *dev); * * @get_state: queries the platform firmware for a device's current power state * + * @refresh_state: asks the platform to refresh the device's power state data + * * @choose_state: returns PCI power state of given device preferred by the * platform; to be used during system-wide transitions from a * sleeping state to the working state and vice versa @@ -69,6 +76,7 @@ struct pci_platform_pm_ops { bool (*is_manageable)(struct pci_dev *dev); int (*set_state)(struct pci_dev *dev, pci_power_t state); pci_power_t (*get_state)(struct pci_dev *dev); + void (*refresh_state)(struct pci_dev *dev); pci_power_t (*choose_state)(struct pci_dev *dev); int (*set_wakeup)(struct pci_dev *dev, bool enable); bool (*need_resume)(struct pci_dev *dev); @@ -76,13 +84,17 @@ struct pci_platform_pm_ops { int pci_set_platform_pm(const struct pci_platform_pm_ops *ops); void pci_update_current_state(struct pci_dev *dev, pci_power_t state); +void pci_refresh_power_state(struct pci_dev *dev); void pci_power_up(struct pci_dev *dev); void pci_disable_enabled_device(struct pci_dev *dev); int pci_finish_runtime_suspend(struct pci_dev *dev); void pcie_clear_root_pme_status(struct pci_dev *dev); +bool pci_check_pme_status(struct pci_dev *dev); +void pci_pme_wakeup_bus(struct pci_bus *bus); int __pci_pme_wakeup(struct pci_dev *dev, void *ign); void pci_pme_restore(struct pci_dev *dev); -bool pci_dev_keep_suspended(struct pci_dev *dev); +bool pci_dev_need_resume(struct pci_dev *dev); +void pci_dev_adjust_pme(struct pci_dev *dev); void pci_dev_complete_resume(struct pci_dev *pci_dev); void pci_config_pm_runtime_get(struct pci_dev *dev); void pci_config_pm_runtime_put(struct pci_dev *dev); @@ -113,11 +125,25 @@ static inline bool pci_power_manageable(struct pci_dev *pci_dev) return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3; } +static inline bool pcie_downstream_port(const struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_DOWNSTREAM || + type == PCI_EXP_TYPE_PCIE_BRIDGE; +} + int pci_vpd_init(struct pci_dev *dev); void pci_vpd_release(struct pci_dev *dev); void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev); void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev); +/* PCI Virtual Channel */ +int pci_save_vc_state(struct pci_dev *dev); +void pci_restore_vc_state(struct pci_dev *dev); +void pci_allocate_vc_save_buffers(struct pci_dev *dev); + /* PCI /proc functions */ #ifdef CONFIG_PROC_FS int pci_proc_attach_device(struct pci_dev *dev); @@ -191,6 +217,9 @@ extern const struct attribute_group *pcibus_groups[]; extern const struct device_type pci_dev_type; extern const struct attribute_group *pci_bus_groups[]; +extern unsigned long pci_hotplug_io_size; +extern unsigned long pci_hotplug_mem_size; +extern unsigned long pci_hotplug_bus_size; /** * pci_match_one_device - Tell if a PCI device structure has a matching @@ -231,6 +260,9 @@ enum pci_bar_type { pci_bar_mem64, /* A 64-bit memory BAR */ }; +struct device *pci_get_host_bridge_device(struct pci_dev *dev); +void pci_put_host_bridge_device(struct device *dev); + int pci_configure_extended_tags(struct pci_dev *dev, void *ign); bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout); @@ -251,6 +283,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx); void pci_reassigndev_resource_alignment(struct pci_dev *dev); void pci_disable_bridge_window(struct pci_dev *dev); +struct pci_bus *pci_bus_get(struct pci_bus *bus); +void pci_bus_put(struct pci_bus *bus); /* PCIe link information */ #define PCIE_SPEED2STR(speed) \ @@ -274,6 +308,7 @@ u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width); void __pcie_print_link_status(struct pci_dev *dev, bool verbose); void pcie_report_downtraining(struct pci_dev *dev); +void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); /* Single Root I/O Virtualization */ struct pci_sriov { @@ -293,7 +328,6 @@ struct pci_sriov { u16 driver_max_VFs; /* Max num VFs driver supports */ struct pci_dev *dev; /* Lowest numbered PF */ struct pci_dev *self; /* This PF */ - u32 cfg_size; /* VF config space size */ u32 class; /* VF device */ u8 hdr_type; /* VF header type */ u16 subsystem_vendor; /* VF subsystem vendor */ @@ -414,11 +448,12 @@ static inline void pci_restore_dpc_state(struct pci_dev *dev) {} #endif #ifdef CONFIG_PCI_ATS +/* Address Translation Service */ +void pci_ats_init(struct pci_dev *dev); void pci_restore_ats_state(struct pci_dev *dev); #else -static inline void pci_restore_ats_state(struct pci_dev *dev) -{ -} +static inline void pci_ats_init(struct pci_dev *d) { } +static inline void pci_restore_ats_state(struct pci_dev *dev) { } #endif /* CONFIG_PCI_ATS */ #ifdef CONFIG_PCI_IOV @@ -429,7 +464,7 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno); resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); void pci_restore_iov_state(struct pci_dev *dev); int pci_iov_bus_range(struct pci_bus *bus); - +extern const struct attribute_group sriov_dev_attr_group; #else static inline int pci_iov_init(struct pci_dev *dev) { @@ -514,10 +549,21 @@ static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { } static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { } #endif +#ifdef CONFIG_PCIE_ECRC +void pcie_set_ecrc_checking(struct pci_dev *dev); +void pcie_ecrc_get_policy(char *str); +#else +static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } +static inline void pcie_ecrc_get_policy(char *str) { } +#endif + #ifdef CONFIG_PCIE_PTM void pci_ptm_init(struct pci_dev *dev); +int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); #else static inline void pci_ptm_init(struct pci_dev *dev) { } +static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) +{ return -EINVAL; } #endif struct pci_dev_reset_methods { @@ -554,6 +600,10 @@ struct device_node; int of_pci_parse_bus_range(struct device_node *node, struct resource *res); int of_get_pci_domain_nr(struct device_node *node); int of_pci_get_max_link_speed(struct device_node *node); +void pci_set_of_node(struct pci_dev *dev); +void pci_release_of_node(struct pci_dev *dev); +void pci_set_bus_of_node(struct pci_bus *bus); +void pci_release_bus_of_node(struct pci_bus *bus); #else static inline int @@ -573,6 +623,11 @@ of_pci_get_max_link_speed(struct device_node *node) { return -EINVAL; } + +static inline void pci_set_of_node(struct pci_dev *dev) { } +static inline void pci_release_of_node(struct pci_dev *dev) { } +static inline void pci_set_bus_of_node(struct pci_bus *bus) { } +static inline void pci_release_bus_of_node(struct pci_bus *bus) { } #endif /* CONFIG_OF */ #if defined(CONFIG_OF_ADDRESS) @@ -597,10 +652,19 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev); void pci_aer_clear_device_status(struct pci_dev *dev); #else static inline void pci_no_aer(void) { } -static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; } +static inline void pci_aer_init(struct pci_dev *d) { } static inline void pci_aer_exit(struct pci_dev *d) { } static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } #endif +#ifdef CONFIG_ACPI +int pci_acpi_program_hp_params(struct pci_dev *dev); +#else +static inline int pci_acpi_program_hp_params(struct pci_dev *dev) +{ + return -ENODEV; +} +#endif + #endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 5cbdbca904ac..362eb8cfa53b 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -142,3 +142,11 @@ config PCIE_PTM This is only useful if you have devices that support PTM, but it is safe to enable even if you don't. + +config PCIE_BW + bool "PCI Express Bandwidth Change Notification" + depends on PCIEPORTBUS + help + This enables PCI Express Bandwidth Change Notification. If + you know link width or rate changes occur only to correct + unreliable links, you may answer Y. diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index f1d7bc1e5efa..efb9d2e71e9e 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -3,7 +3,6 @@ # Makefile for PCI Express features and port driver pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o -pcieportdrv-y += bw_notification.o obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o @@ -13,3 +12,4 @@ obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o obj-$(CONFIG_PCIE_PME) += pme.o obj-$(CONFIG_PCIE_DPC) += dpc.o obj-$(CONFIG_PCIE_PTM) += ptm.o +obj-$(CONFIG_PCIE_BW) += bw_notification.o diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index f8fc2114ad39..b45bc47d04fe 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -12,6 +12,9 @@ * Andrew Patterson <[email protected]> */ +#define pr_fmt(fmt) "AER: " fmt +#define dev_fmt pr_fmt + #include <linux/cper.h> #include <linux/pci.h> #include <linux/pci-acpi.h> @@ -779,10 +782,11 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) u8 bus = info->id >> 8; u8 devfn = info->id & 0xff; - pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n", - info->multi_error_valid ? "Multiple " : "", - aer_error_severity_string[info->severity], - pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n", + info->multi_error_valid ? "Multiple " : "", + aer_error_severity_string[info->severity], + pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); } #ifdef CONFIG_ACPI_APEI_PCIEAER @@ -964,8 +968,7 @@ static bool find_source_device(struct pci_dev *parent, pci_walk_bus(parent->subordinate, find_device_iter, e_info); if (!e_info->error_dev_num) { - pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n", - e_info->id); + pci_info(parent, "can't find device of ID%04x\n", e_info->id); return false; } return true; @@ -1377,25 +1380,24 @@ static int aer_probe(struct pcie_device *dev) int status; struct aer_rpc *rpc; struct device *device = &dev->device; + struct pci_dev *port = dev->port; rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL); - if (!rpc) { - dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n"); + if (!rpc) return -ENOMEM; - } - rpc->rpd = dev->port; + + rpc->rpd = port; set_service_data(dev, rpc); status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr, IRQF_SHARED, "aerdrv", dev); if (status) { - dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n", - dev->irq); + pci_err(port, "request AER IRQ %d failed\n", dev->irq); return status; } aer_enable_rootport(rpc); - dev_info(device, "AER enabled with IRQ %d\n", dev->irq); + pci_info(port, "enabled with IRQ %d\n", dev->irq); return 0; } @@ -1419,7 +1421,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); rc = pci_bus_error_reset(dev); - pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n"); + pci_info(dev, "Root Port link has been reset\n"); /* Clear Root Error Status */ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 95d4759664b3..6988fe7389b9 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -2,7 +2,7 @@ /* * PCIe AER software error injection support. * - * Debuging PCIe AER code is quite difficult because it is hard to + * Debugging PCIe AER code is quite difficult because it is hard to * trigger various real hardware errors. Software based error * injection can fake almost all kinds of errors with the help of a * user space helper tool aer-inject, which can be gotten from: @@ -12,6 +12,8 @@ * Huang Ying <[email protected]> */ +#define dev_fmt(fmt) "aer_inject: " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/irq.h> @@ -332,14 +334,14 @@ static int aer_inject(struct aer_error_inj *einj) return -ENODEV; rpdev = pcie_find_root_port(dev); if (!rpdev) { - pci_err(dev, "aer_inject: Root port not found\n"); + pci_err(dev, "Root port not found\n"); ret = -ENODEV; goto out_put; } pos_cap_err = dev->aer_cap; if (!pos_cap_err) { - pci_err(dev, "aer_inject: Device doesn't support AER\n"); + pci_err(dev, "Device doesn't support AER\n"); ret = -EPROTONOSUPPORT; goto out_put; } @@ -350,7 +352,7 @@ static int aer_inject(struct aer_error_inj *einj) rp_pos_cap_err = rpdev->aer_cap; if (!rp_pos_cap_err) { - pci_err(rpdev, "aer_inject: Root port doesn't support AER\n"); + pci_err(rpdev, "Root port doesn't support AER\n"); ret = -EPROTONOSUPPORT; goto out_put; } @@ -398,14 +400,14 @@ static int aer_inject(struct aer_error_inj *einj) if (!aer_mask_override && einj->cor_status && !(einj->cor_status & ~cor_mask)) { ret = -EINVAL; - pci_warn(dev, "aer_inject: The correctable error(s) is masked by device\n"); + pci_warn(dev, "The correctable error(s) is masked by device\n"); spin_unlock_irqrestore(&inject_lock, flags); goto out_put; } if (!aer_mask_override && einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) { ret = -EINVAL; - pci_warn(dev, "aer_inject: The uncorrectable error(s) is masked by device\n"); + pci_warn(dev, "The uncorrectable error(s) is masked by device\n"); spin_unlock_irqrestore(&inject_lock, flags); goto out_put; } @@ -460,19 +462,17 @@ static int aer_inject(struct aer_error_inj *einj) if (device) { edev = to_pcie_device(device); if (!get_service_data(edev)) { - dev_warn(&edev->device, - "aer_inject: AER service is not initialized\n"); + pci_warn(edev->port, "AER service is not initialized\n"); ret = -EPROTONOSUPPORT; goto out_put; } - dev_info(&edev->device, - "aer_inject: Injecting errors %08x/%08x into device %s\n", + pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n", einj->cor_status, einj->uncor_status, pci_name(dev)); local_irq_disable(); generic_handle_irq(edev->irq); local_irq_enable(); } else { - pci_err(rpdev, "aer_inject: AER device not found\n"); + pci_err(rpdev, "AER device not found\n"); ret = -ENODEV; } out_put: diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 727e3c1ef9a4..652ef23bba35 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -18,7 +18,6 @@ #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/delay.h> -#include <linux/pci-aspm.h> #include "../pci.h" #ifdef MODULE_PARAM_PREFIX @@ -196,6 +195,36 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) link->clkpm_capable = (blacklist) ? 0 : capable; } +static bool pcie_retrain_link(struct pcie_link_state *link) +{ + struct pci_dev *parent = link->pdev; + unsigned long end_jiffies; + u16 reg16; + + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); + reg16 |= PCI_EXP_LNKCTL_RL; + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); + if (parent->clear_retrain_link) { + /* + * Due to an erratum in some devices the Retrain Link bit + * needs to be cleared again manually to allow the link + * training to succeed. + */ + reg16 &= ~PCI_EXP_LNKCTL_RL; + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); + } + + /* Wait for link training end. Break out after waiting for timeout */ + end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; + do { + pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); + if (!(reg16 & PCI_EXP_LNKSTA_LT)) + break; + msleep(1); + } while (time_before(jiffies, end_jiffies)); + return !(reg16 & PCI_EXP_LNKSTA_LT); +} + /* * pcie_aspm_configure_common_clock: check if the 2 ends of a link * could use common clock. If they are, configure them to use the @@ -205,7 +234,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) { int same_clock = 1; u16 reg16, parent_reg, child_reg[8]; - unsigned long start_jiffies; struct pci_dev *child, *parent = link->pdev; struct pci_bus *linkbus = parent->subordinate; /* @@ -263,21 +291,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) reg16 &= ~PCI_EXP_LNKCTL_CCC; pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - /* Retrain link */ - reg16 |= PCI_EXP_LNKCTL_RL; - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - - /* Wait for link training end. Break out after waiting for timeout */ - start_jiffies = jiffies; - for (;;) { - pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); - if (!(reg16 & PCI_EXP_LNKSTA_LT)) - break; - if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) - break; - msleep(1); - } - if (!(reg16 & PCI_EXP_LNKSTA_LT)) + if (pcie_retrain_link(link)) return; /* Training failed. Restore common clock configurations */ @@ -898,10 +912,10 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) /* * We allocate pcie_link_state for the component on the upstream - * end of a Link, so there's nothing to do unless this device has a - * Link on its secondary side. + * end of a Link, so there's nothing to do unless this device is + * downstream port. */ - if (!pdev->has_secondary_link) + if (!pcie_downstream_port(pdev)) return; /* VIA has a strange chipset, root port is under a bridge */ @@ -1047,18 +1061,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev) up_read(&pci_bus_sem); } -static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) +static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) { struct pci_dev *parent = pdev->bus->self; struct pcie_link_state *link; if (!pci_is_pcie(pdev)) - return; + return 0; - if (pdev->has_secondary_link) + if (pcie_downstream_port(pdev)) parent = pdev; if (!parent || !parent->link_state) - return; + return -EINVAL; /* * A driver requested that ASPM be disabled on this device, but @@ -1070,7 +1084,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) */ if (aspm_disabled) { pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n"); - return; + return -EPERM; } if (sem) @@ -1090,11 +1104,13 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) mutex_unlock(&aspm_lock); if (sem) up_read(&pci_bus_sem); + + return 0; } -void pci_disable_link_state_locked(struct pci_dev *pdev, int state) +int pci_disable_link_state_locked(struct pci_dev *pdev, int state) { - __pci_disable_link_state(pdev, state, false); + return __pci_disable_link_state(pdev, state, false); } EXPORT_SYMBOL(pci_disable_link_state_locked); @@ -1102,14 +1118,14 @@ EXPORT_SYMBOL(pci_disable_link_state_locked); * pci_disable_link_state - Disable device's link state, so the link will * never enter specific states. Note that if the BIOS didn't grant ASPM * control to the OS, this does nothing because we can't touch the LNKCTL - * register. + * register. Returns 0 or a negative errno. * * @pdev: PCI device * @state: ASPM link state to disable */ -void pci_disable_link_state(struct pci_dev *pdev, int state) +int pci_disable_link_state(struct pci_dev *pdev, int state) { - __pci_disable_link_state(pdev, state, true); + return __pci_disable_link_state(pdev, state, true); } EXPORT_SYMBOL(pci_disable_link_state); @@ -1153,6 +1169,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, NULL, 0644); +/** + * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device. + * @pdev: Target device. + */ +bool pcie_aspm_enabled(struct pci_dev *pdev) +{ + struct pci_dev *bridge = pci_upstream_bridge(pdev); + bool ret; + + if (!bridge) + return false; + + mutex_lock(&aspm_lock); + ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false; + mutex_unlock(&aspm_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(pcie_aspm_enabled); + #ifdef CONFIG_PCIEASPM_DEBUG static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c index 4fa9e3523ee1..77e685771487 100644 --- a/drivers/pci/pcie/bw_notification.c +++ b/drivers/pci/pcie/bw_notification.c @@ -107,11 +107,25 @@ static void pcie_bandwidth_notification_remove(struct pcie_device *srv) free_irq(srv->irq, srv); } +static int pcie_bandwidth_notification_suspend(struct pcie_device *srv) +{ + pcie_disable_link_bandwidth_notification(srv->port); + return 0; +} + +static int pcie_bandwidth_notification_resume(struct pcie_device *srv) +{ + pcie_enable_link_bandwidth_notification(srv->port); + return 0; +} + static struct pcie_port_service_driver pcie_bandwidth_notification_driver = { .name = "pcie_bw_notification", .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_BWNOTIF, .probe = pcie_bandwidth_notification_probe, + .suspend = pcie_bandwidth_notification_suspend, + .resume = pcie_bandwidth_notification_resume, .remove = pcie_bandwidth_notification_remove, }; diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index 7b77754a82de..a32ec3487a8d 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -6,6 +6,8 @@ * Copyright (C) 2016 Intel Corp. */ +#define dev_fmt(fmt) "DPC: " fmt + #include <linux/aer.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -100,7 +102,6 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc) { unsigned long timeout = jiffies + HZ; struct pci_dev *pdev = dpc->dev->port; - struct device *dev = &dpc->dev->device; u16 cap = dpc->cap_pos, status; pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); @@ -110,7 +111,7 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc) pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); } if (status & PCI_EXP_DPC_RP_BUSY) { - dev_warn(dev, "DPC root port still busy\n"); + pci_warn(pdev, "root port still busy\n"); return -EBUSY; } return 0; @@ -148,7 +149,6 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev) static void dpc_process_rp_pio_error(struct dpc_dev *dpc) { - struct device *dev = &dpc->dev->device; struct pci_dev *pdev = dpc->dev->port; u16 cap = dpc->cap_pos, dpc_status, first_error; u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix; @@ -156,13 +156,13 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask); - dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n", + pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n", status, mask); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc); - dev_err(dev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n", + pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n", sev, syserr, exc); /* Get First Error Pointer */ @@ -171,7 +171,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) { if ((status & ~mask) & (1 << i)) - dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i], + pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i], first_error == i ? " (First)" : ""); } @@ -185,18 +185,18 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) &dw2); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12, &dw3); - dev_err(dev, "TLP Header: %#010x %#010x %#010x %#010x\n", + pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n", dw0, dw1, dw2, dw3); if (dpc->rp_log_size < 5) goto clear_status; pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log); - dev_err(dev, "RP PIO ImpSpec Log %#010x\n", log); + pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log); for (i = 0; i < dpc->rp_log_size - 5; i++) { pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix); - dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); + pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); } clear_status: pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status); @@ -229,18 +229,17 @@ static irqreturn_t dpc_handler(int irq, void *context) struct aer_err_info info; struct dpc_dev *dpc = context; struct pci_dev *pdev = dpc->dev->port; - struct device *dev = &dpc->dev->device; u16 cap = dpc->cap_pos, status, source, reason, ext_reason; pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source); - dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n", + pci_info(pdev, "containment event, status:%#06x source:%#06x\n", status, source); reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1; ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5; - dev_warn(dev, "DPC %s detected\n", + pci_warn(pdev, "%s detected\n", (reason == 0) ? "unmasked uncorrectable error" : (reason == 1) ? "ERR_NONFATAL" : (reason == 2) ? "ERR_FATAL" : @@ -307,7 +306,7 @@ static int dpc_probe(struct pcie_device *dev) dpc_handler, IRQF_SHARED, "pcie-dpc", dpc); if (status) { - dev_warn(device, "request IRQ%d failed: %d\n", dev->irq, + pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq, status); return status; } @@ -319,7 +318,7 @@ static int dpc_probe(struct pcie_device *dev) if (dpc->rp_extensions) { dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) { - dev_err(device, "RP PIO log size %u is invalid\n", + pci_err(pdev, "RP PIO log size %u is invalid\n", dpc->rp_log_size); dpc->rp_log_size = 0; } @@ -328,11 +327,11 @@ static int dpc_probe(struct pcie_device *dev) ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN; pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); - dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", - cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), - FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), - FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size, - FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); + pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", + cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), + FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), + FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size, + FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16)); return status; diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 773197a12568..b0e6048a9208 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -166,7 +166,7 @@ static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service) driver = pcie_port_find_service(dev, service); if (driver && driver->reset_link) { status = driver->reset_link(dev); - } else if (dev->has_secondary_link) { + } else if (pcie_downstream_port(dev)) { status = default_reset_link(dev); } else { pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n", diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 54d593d10396..f38e6c19dd50 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -7,6 +7,8 @@ * Copyright (C) 2009 Rafael J. Wysocki <[email protected]>, Novell Inc. */ +#define dev_fmt(fmt) "PME: " fmt + #include <linux/pci.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -194,14 +196,14 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) * assuming that the PME was reported by a PCIe-PCI bridge that * used devfn different from zero. */ - pci_dbg(port, "PME interrupt generated for non-existent device %02x:%02x.%d\n", - busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); + pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n", + busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); found = pcie_pme_from_pci_bridge(bus, 0); } out: if (!found) - pci_dbg(port, "Spurious native PME interrupt!\n"); + pci_info(port, "Spurious native interrupt!\n"); } /** @@ -341,7 +343,7 @@ static int pcie_pme_probe(struct pcie_device *srv) return ret; } - pci_info(port, "Signaling PME with IRQ %d\n", srv->irq); + pci_info(port, "Signaling with IRQ %d\n", srv->irq); pcie_pme_mark_devices(port); pcie_pme_interrupt_enable(port, true); diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 1d50dc58ac40..944827a8c7d3 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -49,7 +49,11 @@ int pcie_dpc_init(void); static inline int pcie_dpc_init(void) { return 0; } #endif +#ifdef CONFIG_PCIE_BW int pcie_bandwidth_notification_init(void); +#else +static inline int pcie_bandwidth_notification_init(void) { return 0; } +#endif /* Port Type */ #define PCIE_ANY_PORT (~0) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 7d04f9d087a6..1b330129089f 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -55,7 +55,8 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask, * 7.8.2, 7.10.10, 7.31.2. */ - if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) { + if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | + PCIE_PORT_SERVICE_BWNOTIF)) { pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16); *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; nvec = *pme + 1; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 7e12d0163863..3d5271a7a849 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -64,11 +64,6 @@ static struct resource *get_pci_domain_busn_res(int domain_nr) return &r->res; } -static int find_anything(struct device *dev, void *data) -{ - return 1; -} - /* * Some device drivers need know if PCI is initiated. * Basically, we think PCI is not initiated when there @@ -79,7 +74,7 @@ int no_pci_devices(void) struct device *dev; int no_devices; - dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); + dev = bus_find_next_device(&pci_bus_type, NULL); no_devices = (dev == NULL); put_device(dev); return no_devices; @@ -317,7 +312,7 @@ fail: res->flags = 0; out: if (res->flags) - pci_printk(KERN_DEBUG, dev, "reg 0x%x: %pR\n", pos, res); + pci_info(dev, "reg 0x%x: %pR\n", pos, res); return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; } @@ -435,7 +430,7 @@ static void pci_read_bridge_io(struct pci_bus *child) region.start = base; region.end = limit + io_granularity - 1; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); + pci_info(dev, " bridge window %pR\n", res); } } @@ -457,7 +452,7 @@ static void pci_read_bridge_mmio(struct pci_bus *child) region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); + pci_info(dev, " bridge window %pR\n", res); } } @@ -510,7 +505,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); + pci_info(dev, " bridge window %pR\n", res); } } @@ -540,8 +535,7 @@ void pci_read_bridge_bases(struct pci_bus *child) if (res && res->flags) { pci_bus_add_resource(child, res, PCI_SUBTRACTIVE_DECODE); - pci_printk(KERN_DEBUG, dev, - " bridge window %pR (subtractive decode)\n", + pci_info(dev, " bridge window %pR (subtractive decode)\n", res); } } @@ -586,16 +580,10 @@ static void pci_release_host_bridge_dev(struct device *dev) kfree(to_pci_host_bridge(dev)); } -struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) +static void pci_init_host_bridge(struct pci_host_bridge *bridge) { - struct pci_host_bridge *bridge; - - bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); - if (!bridge) - return NULL; - INIT_LIST_HEAD(&bridge->windows); - bridge->dev.release = pci_release_host_bridge_dev; + INIT_LIST_HEAD(&bridge->dma_ranges); /* * We assume we can manage these PCIe features. Some systems may @@ -608,6 +596,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) bridge->native_shpc_hotplug = 1; bridge->native_pme = 1; bridge->native_ltr = 1; +} + +struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) +{ + struct pci_host_bridge *bridge; + + bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); + if (!bridge) + return NULL; + + pci_init_host_bridge(bridge); + bridge->dev.release = pci_release_host_bridge_dev; return bridge; } @@ -622,7 +622,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, if (!bridge) return NULL; - INIT_LIST_HEAD(&bridge->windows); + pci_init_host_bridge(bridge); bridge->dev.release = devm_pci_release_host_bridge_dev; return bridge; @@ -632,6 +632,7 @@ EXPORT_SYMBOL(devm_pci_alloc_host_bridge); void pci_free_host_bridge(struct pci_host_bridge *bridge) { pci_free_resource_list(&bridge->windows); + pci_free_resource_list(&bridge->dma_ranges); kfree(bridge); } @@ -662,7 +663,7 @@ const unsigned char pcie_link_speed[] = { PCIE_SPEED_5_0GT, /* 2 */ PCIE_SPEED_8_0GT, /* 3 */ PCIE_SPEED_16_0GT, /* 4 */ - PCI_SPEED_UNKNOWN, /* 5 */ + PCIE_SPEED_32_0GT, /* 5 */ PCI_SPEED_UNKNOWN, /* 6 */ PCI_SPEED_UNKNOWN, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ @@ -1081,6 +1082,36 @@ static void pci_enable_crs(struct pci_dev *pdev) static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, unsigned int available_buses); +/** + * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus + * numbers from EA capability. + * @dev: Bridge + * @sec: updated with secondary bus number from EA + * @sub: updated with subordinate bus number from EA + * + * If @dev is a bridge with EA capability, update @sec and @sub with + * fixed bus numbers from the capability and return true. Otherwise, + * return false. + */ +static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub) +{ + int ea, offset; + u32 dw; + + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + return false; + + /* find PCI EA capability in list */ + ea = pci_find_capability(dev, PCI_CAP_ID_EA); + if (!ea) + return false; + + offset = ea + PCI_EA_FIRST_ENT; + pci_read_config_dword(dev, offset, &dw); + *sec = dw & PCI_EA_SEC_BUS_MASK; + *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT; + return true; +} /* * pci_scan_bridge_extend() - Scan buses behind a bridge @@ -1115,6 +1146,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, u16 bctl; u8 primary, secondary, subordinate; int broken = 0; + bool fixed_buses; + u8 fixed_sec, fixed_sub; + int next_busnr; /* * Make sure the bridge is powered on to be able to access config @@ -1214,17 +1248,24 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, /* Clear errors */ pci_write_config_word(dev, PCI_STATUS, 0xffff); + /* Read bus numbers from EA Capability (if present) */ + fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub); + if (fixed_buses) + next_busnr = fixed_sec; + else + next_busnr = max + 1; + /* * Prevent assigning a bus number that already exists. * This can happen when a bridge is hot-plugged, so in this * case we only re-scan this bus. */ - child = pci_find_bus(pci_domain_nr(bus), max+1); + child = pci_find_bus(pci_domain_nr(bus), next_busnr); if (!child) { - child = pci_add_new_bus(bus, dev, max+1); + child = pci_add_new_bus(bus, dev, next_busnr); if (!child) goto out; - pci_bus_insert_busn_res(child, max+1, + pci_bus_insert_busn_res(child, next_busnr, bus->busn_res.end); } max++; @@ -1285,7 +1326,13 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, max += i; } - /* Set subordinate bus number to its real value */ + /* + * Set subordinate bus number to its real value. + * If fixed subordinate bus number exists from EA + * capability then use it. + */ + if (fixed_buses) + max = fixed_sub; pci_bus_update_busn_res_end(child, max); pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); } @@ -1379,26 +1426,38 @@ void set_pcie_port_type(struct pci_dev *pdev) pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; + parent = pci_upstream_bridge(pdev); + if (!parent) + return; + /* - * A Root Port or a PCI-to-PCIe bridge is always the upstream end - * of a Link. No PCIe component has two Links. Two Links are - * connected by a Switch that has a Port on each Link and internal - * logic to connect the two Ports. + * Some systems do not identify their upstream/downstream ports + * correctly so detect impossible configurations here and correct + * the port type accordingly. */ type = pci_pcie_type(pdev); - if (type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_PCIE_BRIDGE) - pdev->has_secondary_link = 1; - else if (type == PCI_EXP_TYPE_UPSTREAM || - type == PCI_EXP_TYPE_DOWNSTREAM) { - parent = pci_upstream_bridge(pdev); - + if (type == PCI_EXP_TYPE_DOWNSTREAM) { + /* + * If pdev claims to be downstream port but the parent + * device is also downstream port assume pdev is actually + * upstream port. + */ + if (pcie_downstream_port(parent)) { + pci_info(pdev, "claims to be downstream port but is acting as upstream port, correcting type\n"); + pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE; + pdev->pcie_flags_reg |= PCI_EXP_TYPE_UPSTREAM; + } + } else if (type == PCI_EXP_TYPE_UPSTREAM) { /* - * Usually there's an upstream device (Root Port or Switch - * Downstream Port), but we can't assume one exists. + * If pdev claims to be upstream port but the parent + * device is also upstream port assume pdev is actually + * downstream port. */ - if (parent && !parent->has_secondary_link) - pdev->has_secondary_link = 1; + if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) { + pci_info(pdev, "claims to be upstream port but is acting as downstream port, correcting type\n"); + pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE; + pdev->pcie_flags_reg |= PCI_EXP_TYPE_DOWNSTREAM; + } } } @@ -1503,17 +1562,6 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev) return PCI_CFG_SPACE_EXP_SIZE; } -#ifdef CONFIG_PCI_IOV -static bool is_vf0(struct pci_dev *dev) -{ - if (pci_iov_virtfn_devfn(dev->physfn, 0) == dev->devfn && - pci_iov_virtfn_bus(dev->physfn, 0) == dev->bus->number) - return true; - - return false; -} -#endif - int pci_cfg_space_size(struct pci_dev *dev) { int pos; @@ -1521,9 +1569,18 @@ int pci_cfg_space_size(struct pci_dev *dev) u16 class; #ifdef CONFIG_PCI_IOV - /* Read cached value for all VFs except for VF0 */ - if (dev->is_virtfn && !is_vf0(dev)) - return dev->physfn->sriov->cfg_size; + /* + * Per the SR-IOV specification (rev 1.1, sec 3.5), VFs are required to + * implement a PCIe capability and therefore must implement extended + * config space. We can skip the NO_EXTCFG test below and the + * reachability/aliasing test in pci_cfg_space_size_ext() by virtue of + * the fact that the SR-IOV capability on the PF resides in extended + * config space and must be accessible and non-aliased to have enabled + * support for this VF. This is a micro performance optimization for + * systems supporting many VFs. + */ + if (dev->is_virtfn) + return PCI_CFG_SPACE_EXP_SIZE; #endif if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG) @@ -1690,7 +1747,7 @@ int pci_setup_device(struct pci_dev *dev) dev->revision = class & 0xff; dev->class = class >> 8; /* upper 3 bytes */ - pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n", + pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", dev->vendor, dev->device, dev->hdr_type, dev->class); if (pci_early_dump) @@ -1870,162 +1927,6 @@ static void pci_configure_mps(struct pci_dev *dev) p_mps, mps, mpss); } -static struct hpp_type0 pci_default_type0 = { - .revision = 1, - .cache_line_size = 8, - .latency_timer = 0x40, - .enable_serr = 0, - .enable_perr = 0, -}; - -static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) -{ - u16 pci_cmd, pci_bctl; - - if (!hpp) - hpp = &pci_default_type0; - - if (hpp->revision > 1) { - pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", - hpp->revision); - hpp = &pci_default_type0; - } - - pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); - pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); - pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); - if (hpp->enable_serr) - pci_cmd |= PCI_COMMAND_SERR; - if (hpp->enable_perr) - pci_cmd |= PCI_COMMAND_PARITY; - pci_write_config_word(dev, PCI_COMMAND, pci_cmd); - - /* Program bridge control value */ - if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { - pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, - hpp->latency_timer); - pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); - if (hpp->enable_perr) - pci_bctl |= PCI_BRIDGE_CTL_PARITY; - pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); - } -} - -static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) -{ - int pos; - - if (!hpp) - return; - - pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); - if (!pos) - return; - - pci_warn(dev, "PCI-X settings not supported\n"); -} - -static bool pcie_root_rcb_set(struct pci_dev *dev) -{ - struct pci_dev *rp = pcie_find_root_port(dev); - u16 lnkctl; - - if (!rp) - return false; - - pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); - if (lnkctl & PCI_EXP_LNKCTL_RCB) - return true; - - return false; -} - -static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) -{ - int pos; - u32 reg32; - - if (!hpp) - return; - - if (!pci_is_pcie(dev)) - return; - - if (hpp->revision > 1) { - pci_warn(dev, "PCIe settings rev %d not supported\n", - hpp->revision); - return; - } - - /* - * Don't allow _HPX to change MPS or MRRS settings. We manage - * those to make sure they're consistent with the rest of the - * platform. - */ - hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | - PCI_EXP_DEVCTL_READRQ; - hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | - PCI_EXP_DEVCTL_READRQ); - - /* Initialize Device Control Register */ - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, - ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); - - /* Initialize Link Control Register */ - if (pcie_cap_has_lnkctl(dev)) { - - /* - * If the Root Port supports Read Completion Boundary of - * 128, set RCB to 128. Otherwise, clear it. - */ - hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; - hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; - if (pcie_root_rcb_set(dev)) - hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; - - pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, - ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); - } - - /* Find Advanced Error Reporting Enhanced Capability */ - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); - if (!pos) - return; - - /* Initialize Uncorrectable Error Mask Register */ - pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); - reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; - pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); - - /* Initialize Uncorrectable Error Severity Register */ - pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); - reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; - pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); - - /* Initialize Correctable Error Mask Register */ - pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); - reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; - pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); - - /* Initialize Advanced Error Capabilities and Control Register */ - pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); - reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; - - /* Don't enable ECRC generation or checking if unsupported */ - if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) - reg32 &= ~PCI_ERR_CAP_ECRC_GENE; - if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) - reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; - pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); - - /* - * FIXME: The following two registers are not supported yet. - * - * o Secondary Uncorrectable Error Severity Register - * o Secondary Uncorrectable Error Mask Register - */ -} - int pci_configure_extended_tags(struct pci_dev *dev, void *ign) { struct pci_host_bridge *host; @@ -2206,9 +2107,6 @@ static void pci_configure_serr(struct pci_dev *dev) static void pci_configure_device(struct pci_dev *dev) { - struct hotplug_params hpp; - int ret; - pci_configure_mps(dev); pci_configure_extended_tags(dev, NULL); pci_configure_relaxed_ordering(dev); @@ -2216,14 +2114,7 @@ static void pci_configure_device(struct pci_dev *dev) pci_configure_eetlp_prefix(dev); pci_configure_serr(dev); - memset(&hpp, 0, sizeof(hpp)); - ret = pci_get_hp_params(dev, &hpp); - if (ret) - return; - - program_hpp_type2(dev, hpp.t2); - program_hpp_type1(dev, hpp.t1); - program_hpp_type0(dev, hpp.t0); + pci_acpi_program_hp_params(dev); } static void pci_release_capabilities(struct pci_dev *dev) @@ -2604,12 +2495,8 @@ static int only_one_child(struct pci_bus *bus) * A PCIe Downstream Port normally leads to a Link with only Device * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan * only for Device 0 in that situation. - * - * Checking has_secondary_link is a hack to identify Downstream - * Ports because sometimes Switches are configured such that the - * PCIe Port Type labels are backwards. */ - if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link) + if (bridge && pci_is_pcie(bridge) && pcie_downstream_port(bridge)) return 1; return 0; @@ -3086,7 +2973,7 @@ int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) conflict = request_resource_conflict(parent_res, res); if (conflict) - dev_printk(KERN_DEBUG, &b->dev, + dev_info(&b->dev, "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", res, pci_is_root_bus(b) ? "domain " : "", parent_res, conflict->name, conflict); @@ -3106,8 +2993,7 @@ int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) size = bus_max - res->start + 1; ret = adjust_resource(res, res->start, size); - dev_printk(KERN_DEBUG, &b->dev, - "busn_res: %pR end %s updated to %02x\n", + dev_info(&b->dev, "busn_res: %pR end %s updated to %02x\n", &old_res, ret ? "can not be" : "is", bus_max); if (!ret && !res->parent) @@ -3125,8 +3011,7 @@ void pci_bus_release_busn_res(struct pci_bus *b) return; ret = release_resource(res); - dev_printk(KERN_DEBUG, &b->dev, - "busn_res: %pR %s released\n", + dev_info(&b->dev, "busn_res: %pR %s released\n", res, ret ? "can not be" : "is"); } diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 6fa1627ce08d..5495537c60c2 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -13,6 +13,7 @@ #include <linux/seq_file.h> #include <linux/capability.h> #include <linux/uaccess.h> +#include <linux/security.h> #include <asm/byteorder.h> #include "pci.h" @@ -115,7 +116,11 @@ static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf, struct pci_dev *dev = PDE_DATA(ino); int pos = *ppos; int size = dev->cfg_size; - int cnt; + int cnt, ret; + + ret = security_locked_down(LOCKDOWN_PCI_ACCESS); + if (ret) + return ret; if (pos >= size) return 0; @@ -196,6 +201,10 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, #endif /* HAVE_PCI_MMAP */ int ret = 0; + ret = security_locked_down(LOCKDOWN_PCI_ACCESS); + if (ret) + return ret; + switch (cmd) { case PCIIOC_CONTROLLER: ret = pci_domain_nr(dev->bus); @@ -222,6 +231,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, } /* If arch decided it can't, fall through... */ #endif /* HAVE_PCI_MMAP */ + /* fall through */ default: ret = -EINVAL; break; @@ -237,7 +247,8 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) struct pci_filp_private *fpriv = file->private_data; int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM; - if (!capable(CAP_SYS_RAWIO)) + if (!capable(CAP_SYS_RAWIO) || + security_locked_down(LOCKDOWN_PCI_ACCESS)) return -EPERM; if (fpriv->mmap_state == pci_mmap_io) { @@ -376,7 +387,7 @@ static int show_device(struct seq_file *m, void *v) } seq_putc(m, '\t'); if (drv) - seq_printf(m, "%s", drv->name); + seq_puts(m, drv->name); seq_putc(m, '\n'); return 0; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a59ad09ce911..320255e5e8f8 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -20,7 +20,6 @@ #include <linux/delay.h> #include <linux/acpi.h> #include <linux/dmi.h> -#include <linux/pci-aspm.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/ktime.h> @@ -36,7 +35,7 @@ static ktime_t fixup_debug_start(struct pci_dev *dev, void (*fn)(struct pci_dev *dev)) { if (initcall_debug) - pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current)); + pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current)); return ktime_get(); } @@ -51,7 +50,7 @@ static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime, delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; if (initcall_debug || duration > 10000) - pci_info(dev, "%pF took %lld usecs\n", fn, duration); + pci_info(dev, "%pS took %lld usecs\n", fn, duration); } static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, @@ -159,8 +158,7 @@ static int __init pci_apply_final_quirks(void) u8 tmp; if (pci_cache_line_size) - printk(KERN_DEBUG "PCI: CLS %u bytes\n", - pci_cache_line_size << 2); + pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2); pci_apply_fixup_final_quirks = true; for_each_pci_dev(dev) { @@ -177,16 +175,16 @@ static int __init pci_apply_final_quirks(void) if (!tmp || cls == tmp) continue; - printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n", - cls << 2, tmp << 2, - pci_dfl_cache_line_size << 2); + pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n", + cls << 2, tmp << 2, + pci_dfl_cache_line_size << 2); pci_cache_line_size = pci_dfl_cache_line_size; } } if (!pci_cache_line_size) { - printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", - cls << 2, pci_dfl_cache_line_size << 2); + pr_info("PCI: CLS %u bytes, default %u\n", cls << 2, + pci_dfl_cache_line_size << 2); pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size; } @@ -2245,6 +2243,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +/* + * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain + * Link bit cleared after starting the link retrain process to allow this + * process to finish. + * + * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the + * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf. + */ +static void quirk_enable_clear_retrain_link(struct pci_dev *dev) +{ + dev->clear_retrain_link = 1; + pci_info(dev, "Enable PCIe Retrain Link quirk\n"); +} +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link); +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link); +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link); + static void fixup_rev1_53c810(struct pci_dev *dev) { u32 class = dev->class; @@ -2577,6 +2592,59 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, nvenet_msi_disable); /* + * PCIe spec r4.0 sec 7.7.1.2 and sec 7.7.2.2 say that if MSI/MSI-X is enabled, + * then the device can't use INTx interrupts. Tegra's PCIe root ports don't + * generate MSI interrupts for PME and AER events instead only INTx interrupts + * are generated. Though Tegra's PCIe root ports can generate MSI interrupts + * for other events, since PCIe specificiation doesn't support using a mix of + * INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port + * service drivers registering their respective ISRs for MSIs. + */ +static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev) +{ + dev->no_msi = 1; +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6, + PCI_CLASS_BRIDGE_PCI, 8, + pci_quirk_nvidia_tegra_disable_rp_msi); + +/* * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing * config register. This register controls the routing of legacy * interrupts from devices that route through the MCP55. If this register @@ -2596,7 +2664,7 @@ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev) pci_read_config_dword(dev, 0x74, &cfg); if (cfg & ((1 << 2) | (1 << 15))) { - printk(KERN_INFO "Rewriting IRQ routing register on MCP55\n"); + pr_info("Rewriting IRQ routing register on MCP55\n"); cfg &= ~((1 << 2) | (1 << 15)); pci_write_config_dword(dev, 0x74, cfg); } @@ -2909,6 +2977,24 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091, quirk_msi_intx_disable_qca_bug); + +/* + * Amazon's Annapurna Labs 1c36:0031 Root Ports don't support MSI-X, so it + * should be disabled on platforms where the device (mistakenly) advertises it. + * + * Notice that this quirk also disables MSI (which may work, but hasn't been + * tested), since currently there is no standard way to disable only MSI-X. + * + * The 0031 device id is reused for other non Root Port device types, + * therefore the quirk is registered for the PCI_CLASS_BRIDGE_PCI class. + */ +static void quirk_al_msi_disable(struct pci_dev *dev) +{ + dev->no_msi = 1; + pci_warn(dev, "Disabling MSI/MSI-X\n"); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, + PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable); #endif /* CONFIG_PCI_MSI */ /* @@ -3408,6 +3494,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset); /* * Root port on some Cavium CN8xxx chips do not successfully complete a bus @@ -3877,6 +3964,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, quirk_dma_func1_alias); @@ -4347,6 +4436,24 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) return ret; } +static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags) +{ + if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) + return -ENOTTY; + + /* + * Amazon's Annapurna Labs root ports don't include an ACS capability, + * but do include ACS-like functionality. The hardware doesn't support + * peer-to-peer transactions via the root port and each has a unique + * segment number. + * + * Additionally, the root ports cannot send traffic to each other. + */ + acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + + return acs_flags ? 0 : 1; +} + /* * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2, @@ -4447,6 +4554,19 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) return acs_flags ? 0 : 1; } +static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * iProc PAXB Root Ports don't advertise an ACS capability, but + * they do not allow peer-to-peer transactions between Root Ports. + * Allow each Root Port to be in a separate IOMMU group by masking + * SV/RR/CR/UF bits. + */ + acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + + return acs_flags ? 0 : 1; +} + static const struct pci_dev_acs_enabled { u16 vendor; u16 device; @@ -4540,6 +4660,9 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, + /* Amazon Annapurna Labs */ + { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, { 0 } }; @@ -4903,6 +5026,7 @@ static void quirk_no_ats(struct pci_dev *pdev) /* AMD Stoney platform GPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); #endif /* CONFIG_PCI_ATS */ /* Freescale PCIe doesn't support MSI in RC mode */ @@ -4914,35 +5038,49 @@ static void quirk_fsl_no_msi(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi); /* - * GPUs with integrated HDA controller for streaming audio to attached displays - * need a device link from the HDA controller (consumer) to the GPU (supplier) - * so that the GPU is powered up whenever the HDA controller is accessed. - * The GPU and HDA controller are functions 0 and 1 of the same PCI device. - * The device link stays in place until shutdown (or removal of the PCI device - * if it's hotplugged). Runtime PM is allowed by default on the HDA controller - * to prevent it from permanently keeping the GPU awake. + * Although not allowed by the spec, some multi-function devices have + * dependencies of one function (consumer) on another (supplier). For the + * consumer to work in D0, the supplier must also be in D0. Create a + * device link from the consumer to the supplier to enforce this + * dependency. Runtime PM is allowed by default on the consumer to prevent + * it from permanently keeping the supplier awake. */ -static void quirk_gpu_hda(struct pci_dev *hda) +static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer, + unsigned int supplier, unsigned int class, + unsigned int class_shift) { - struct pci_dev *gpu; + struct pci_dev *supplier_pdev; - if (PCI_FUNC(hda->devfn) != 1) + if (PCI_FUNC(pdev->devfn) != consumer) return; - gpu = pci_get_domain_bus_and_slot(pci_domain_nr(hda->bus), - hda->bus->number, - PCI_DEVFN(PCI_SLOT(hda->devfn), 0)); - if (!gpu || (gpu->class >> 16) != PCI_BASE_CLASS_DISPLAY) { - pci_dev_put(gpu); + supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier)); + if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) { + pci_dev_put(supplier_pdev); return; } - if (!device_link_add(&hda->dev, &gpu->dev, - DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) - pci_err(hda, "cannot link HDA to GPU %s\n", pci_name(gpu)); + if (device_link_add(&pdev->dev, &supplier_pdev->dev, + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) + pci_info(pdev, "D0 power state depends on %s\n", + pci_name(supplier_pdev)); + else + pci_err(pdev, "Cannot enforce power dependency on %s\n", + pci_name(supplier_pdev)); - pm_runtime_allow(&hda->dev); - pci_dev_put(gpu); + pm_runtime_allow(&pdev->dev); + pci_dev_put(supplier_pdev); +} + +/* + * Create device link for GPUs with integrated HDA controller for streaming + * audio to attached displays. + */ +static void quirk_gpu_hda(struct pci_dev *hda) +{ + pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); @@ -4952,6 +5090,62 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); /* + * Create device link for NVIDIA GPU with integrated USB xHCI Host + * controller to VGA. + */ +static void quirk_gpu_usb(struct pci_dev *usb) +{ + pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); + +/* + * Create device link for NVIDIA GPU with integrated Type-C UCSI controller + * to VGA. Currently there is no class code defined for UCSI device over PCI + * so using UNKNOWN class for now and it will be updated when UCSI + * over PCI gets a class code. + */ +#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80 +static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi) +{ + pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_CLASS_SERIAL_UNKNOWN, 8, + quirk_gpu_usb_typec_ucsi); + +/* + * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it + * disabled. https://devtalk.nvidia.com/default/topic/1024022 + */ +static void quirk_nvidia_hda(struct pci_dev *gpu) +{ + u8 hdr_type; + u32 val; + + /* There was no integrated HDA controller before MCP89 */ + if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M) + return; + + /* Bit 25 at offset 0x488 enables the HDA controller */ + pci_read_config_dword(gpu, 0x488, &val); + if (val & BIT(25)) + return; + + pci_info(gpu, "Enabling HDA controller\n"); + pci_write_config_dword(gpu, 0x488, val | BIT(25)); + + /* The GPU becomes a multi-function device when the HDA is enabled */ + pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type); + gpu->multifunction = !!(hdr_type & 0x80); +} +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda); +DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda); + +/* * Some IDT switches incorrectly flag an ACS Source Validation error on * completions for config read requests even though PCIe r4.0, sec * 6.12.1.1, says that completions are never affected by ACS Source @@ -5120,3 +5314,61 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */ SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */ SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */ SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */ + +/* + * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does + * not always reset the secondary Nvidia GPU between reboots if the system + * is configured to use Hybrid Graphics mode. This results in the GPU + * being left in whatever state it was in during the *previous* boot, which + * causes spurious interrupts from the GPU, which in turn causes us to + * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly, + * this also completely breaks nouveau. + * + * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a + * clean state and fixes all these issues. + * + * When the machine is configured in Dedicated display mode, the issue + * doesn't occur. Fortunately the GPU advertises NoReset+ when in this + * mode, so we can detect that and avoid resetting it. + */ +static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev) +{ + void __iomem *map; + int ret; + + if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO || + pdev->subsystem_device != 0x222e || + !pdev->reset_fn) + return; + + if (pci_enable_device_mem(pdev)) + return; + + /* + * Based on nvkm_device_ctor() in + * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c + */ + map = pci_iomap(pdev, 0, 0x23000); + if (!map) { + pci_err(pdev, "Can't map MMIO space\n"); + goto out_disable; + } + + /* + * Make sure the GPU looks like it's been POSTed before resetting + * it. + */ + if (ioread32(map + 0x2240c) & 0x2) { + pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); + ret = pci_reset_bus(pdev); + if (ret < 0) + pci_err(pdev, "Failed to reset GPU: %d\n", ret); + } + + iounmap(map); +out_disable: + pci_disable_device(pdev); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1, + PCI_CLASS_DISPLAY_VGA, 8, + quirk_reset_lenovo_thinkpad_p50_nvgpu); diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 2b5f720862d3..bade14002fd8 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -15,7 +15,6 @@ #include "pci.h" DECLARE_RWSEM(pci_bus_sem); -EXPORT_SYMBOL_GPL(pci_bus_sem); /* * pci_for_each_dma_alias - Iterate over DMA aliases for a device @@ -33,7 +32,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, struct pci_bus *bus; int ret; - ret = fn(pdev, PCI_DEVID(pdev->bus->number, pdev->devfn), data); + ret = fn(pdev, pci_dev_id(pdev), data); if (ret) return ret; @@ -88,9 +87,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, return ret; continue; case PCI_EXP_TYPE_PCIE_BRIDGE: - ret = fn(tmp, - PCI_DEVID(tmp->bus->number, - tmp->devfn), data); + ret = fn(tmp, pci_dev_id(tmp), data); if (ret) return ret; continue; @@ -101,9 +98,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, PCI_DEVID(tmp->subordinate->number, PCI_DEVFN(0, 0)), data); else - ret = fn(tmp, - PCI_DEVID(tmp->bus->number, - tmp->devfn), data); + ret = fn(tmp, pci_dev_id(tmp), data); if (ret) return ret; } @@ -240,10 +235,10 @@ struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, } EXPORT_SYMBOL(pci_get_domain_bus_and_slot); -static int match_pci_dev_by_id(struct device *dev, void *data) +static int match_pci_dev_by_id(struct device *dev, const void *data) { struct pci_dev *pdev = to_pci_dev(dev); - struct pci_device_id *id = data; + const struct pci_device_id *id = data; if (pci_match_one_device(id, pdev)) return 1; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ec44a0f3a7ac..e7dbe21705ba 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -49,17 +49,15 @@ static void free_list(struct list_head *head) } /** - * add_to_list() - add a new resource tracker to the list + * add_to_list() - Add a new resource tracker to the list * @head: Head of the list - * @dev: device corresponding to which the resource - * belongs - * @res: The resource to be tracked - * @add_size: additional size to be optionally added - * to the resource + * @dev: Device to which the resource belongs + * @res: Resource to be tracked + * @add_size: Additional size to be optionally added to the resource */ -static int add_to_list(struct list_head *head, - struct pci_dev *dev, struct resource *res, - resource_size_t add_size, resource_size_t min_align) +static int add_to_list(struct list_head *head, struct pci_dev *dev, + struct resource *res, resource_size_t add_size, + resource_size_t min_align) { struct pci_dev_resource *tmp; @@ -80,8 +78,7 @@ static int add_to_list(struct list_head *head, return 0; } -static void remove_from_list(struct list_head *head, - struct resource *res) +static void remove_from_list(struct list_head *head, struct resource *res) { struct pci_dev_resource *dev_res, *tmp; @@ -158,7 +155,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) tmp->res = r; tmp->dev = dev; - /* fallback is smallest one or list is empty*/ + /* Fallback is smallest one or list is empty */ n = head; list_for_each_entry(dev_res, head, list) { resource_size_t align; @@ -171,21 +168,20 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) break; } } - /* Insert it just before n*/ + /* Insert it just before n */ list_add_tail(&tmp->list, n); } } -static void __dev_sort_resources(struct pci_dev *dev, - struct list_head *head) +static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head) { u16 class = dev->class >> 8; - /* Don't touch classless devices or host bridges or ioapics. */ + /* Don't touch classless devices or host bridges or IOAPICs */ if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) return; - /* Don't touch ioapic devices already enabled by firmware */ + /* Don't touch IOAPIC devices already enabled by firmware */ if (class == PCI_CLASS_SYSTEM_PIC) { u16 command; pci_read_config_word(dev, PCI_COMMAND, &command); @@ -204,19 +200,18 @@ static inline void reset_resource(struct resource *res) } /** - * reassign_resources_sorted() - satisfy any additional resource requests + * reassign_resources_sorted() - Satisfy any additional resource requests * - * @realloc_head : head of the list tracking requests requiring additional - * resources - * @head : head of the list tracking requests with allocated - * resources + * @realloc_head: Head of the list tracking requests requiring + * additional resources + * @head: Head of the list tracking requests with allocated + * resources * - * Walk through each element of the realloc_head and try to procure - * additional resources for the element, provided the element - * is in the head list. + * Walk through each element of the realloc_head and try to procure additional + * resources for the element, provided the element is in the head list. */ static void reassign_resources_sorted(struct list_head *realloc_head, - struct list_head *head) + struct list_head *head) { struct resource *res; struct pci_dev_resource *add_res, *tmp; @@ -228,18 +223,18 @@ static void reassign_resources_sorted(struct list_head *realloc_head, bool found_match = false; res = add_res->res; - /* skip resource that has been reset */ + /* Skip resource that has been reset */ if (!res->flags) goto out; - /* skip this resource if not found in head list */ + /* Skip this resource if not found in head list */ list_for_each_entry(dev_res, head, list) { if (dev_res->res == res) { found_match = true; break; } } - if (!found_match)/* just skip */ + if (!found_match) /* Just skip */ continue; idx = res - &add_res->dev->resource[0]; @@ -255,10 +250,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head, (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); if (pci_reassign_resource(add_res->dev, idx, add_size, align)) - pci_printk(KERN_DEBUG, add_res->dev, - "failed to add %llx res[%d]=%pR\n", - (unsigned long long)add_size, - idx, res); + pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n", + (unsigned long long) add_size, idx, + res); } out: list_del(&add_res->list); @@ -267,14 +261,14 @@ out: } /** - * assign_requested_resources_sorted() - satisfy resource requests + * assign_requested_resources_sorted() - Satisfy resource requests * - * @head : head of the list tracking requests for resources - * @fail_head : head of the list tracking requests that could - * not be allocated + * @head: Head of the list tracking requests for resources + * @fail_head: Head of the list tracking requests that could not be + * allocated * - * Satisfy resource requests of each element in the list. Add - * requests that could not satisfied to the failed_list. + * Satisfy resource requests of each element in the list. Add requests that + * could not be satisfied to the failed_list. */ static void assign_requested_resources_sorted(struct list_head *head, struct list_head *fail_head) @@ -290,8 +284,9 @@ static void assign_requested_resources_sorted(struct list_head *head, pci_assign_resource(dev_res->dev, idx)) { if (fail_head) { /* - * if the failed res is for ROM BAR, and it will - * be enabled later, don't add it to the list + * If the failed resource is a ROM BAR and + * it will be enabled later, don't add it + * to the list. */ if (!((idx == PCI_ROM_RESOURCE) && (!(res->flags & IORESOURCE_ROM_ENABLE)))) @@ -310,15 +305,14 @@ static unsigned long pci_fail_res_type_mask(struct list_head *fail_head) struct pci_dev_resource *fail_res; unsigned long mask = 0; - /* check failed type */ + /* Check failed type */ list_for_each_entry(fail_res, fail_head, list) mask |= fail_res->flags; /* - * one pref failed resource will set IORESOURCE_MEM, - * as we can allocate pref in non-pref range. - * Will release all assigned non-pref sibling resources - * according to that bit. + * One pref failed resource will set IORESOURCE_MEM, as we can + * allocate pref in non-pref range. Will release all assigned + * non-pref sibling resources according to that bit. */ return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH); } @@ -328,11 +322,11 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res) if (res->flags & IORESOURCE_IO) return !!(mask & IORESOURCE_IO); - /* check pref at first */ + /* Check pref at first */ if (res->flags & IORESOURCE_PREFETCH) { if (mask & IORESOURCE_PREFETCH) return true; - /* count pref if its parent is non-pref */ + /* Count pref if its parent is non-pref */ else if ((mask & IORESOURCE_MEM) && !(res->parent->flags & IORESOURCE_PREFETCH)) return true; @@ -343,33 +337,33 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res) if (res->flags & IORESOURCE_MEM) return !!(mask & IORESOURCE_MEM); - return false; /* should not get here */ + return false; /* Should not get here */ } static void __assign_resources_sorted(struct list_head *head, - struct list_head *realloc_head, - struct list_head *fail_head) + struct list_head *realloc_head, + struct list_head *fail_head) { /* - * Should not assign requested resources at first. - * they could be adjacent, so later reassign can not reallocate - * them one by one in parent resource window. - * Try to assign requested + add_size at beginning - * if could do that, could get out early. - * if could not do that, we still try to assign requested at first, - * then try to reassign add_size for some resources. + * Should not assign requested resources at first. They could be + * adjacent, so later reassign can not reallocate them one by one in + * parent resource window. + * + * Try to assign requested + add_size at beginning. If could do that, + * could get out early. If could not do that, we still try to assign + * requested at first, then try to reassign add_size for some resources. * * Separate three resource type checking if we need to release * assigned resource after requested + add_size try. - * 1. if there is io port assign fail, will release assigned - * io port. - * 2. if there is pref mmio assign fail, release assigned - * pref mmio. - * if assigned pref mmio's parent is non-pref mmio and there - * is non-pref mmio assign fail, will release that assigned - * pref mmio. - * 3. if there is non-pref mmio assign fail or pref mmio - * assigned fail, will release assigned non-pref mmio. + * + * 1. If IO port assignment fails, will release assigned IO + * port. + * 2. If pref MMIO assignment fails, release assigned pref + * MMIO. If assigned pref MMIO's parent is non-pref MMIO + * and non-pref MMIO assignment fails, will release that + * assigned pref MMIO. + * 3. If non-pref MMIO assignment fails or pref MMIO + * assignment fails, will release assigned non-pref MMIO. */ LIST_HEAD(save_head); LIST_HEAD(local_fail_head); @@ -398,7 +392,7 @@ static void __assign_resources_sorted(struct list_head *head, /* * There are two kinds of additional resources in the list: * 1. bridge resource -- IORESOURCE_STARTALIGN - * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN + * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN * Here just fix the additional alignment for bridge */ if (!(dev_res->res->flags & IORESOURCE_STARTALIGN)) @@ -407,10 +401,10 @@ static void __assign_resources_sorted(struct list_head *head, add_align = get_res_add_align(realloc_head, dev_res->res); /* - * The "head" list is sorted by the alignment to make sure - * resources with bigger alignment will be assigned first. - * After we change the alignment of a dev_res in "head" list, - * we need to reorder the list by alignment to make it + * The "head" list is sorted by alignment so resources with + * bigger alignment will be assigned first. After we + * change the alignment of a dev_res in "head" list, we + * need to reorder the list by alignment to make it * consistent. */ if (add_align > dev_res->res->start) { @@ -435,7 +429,7 @@ static void __assign_resources_sorted(struct list_head *head, /* Try updated head list with add_size added */ assign_requested_resources_sorted(head, &local_fail_head); - /* all assigned with add_size ? */ + /* All assigned with add_size? */ if (list_empty(&local_fail_head)) { /* Remove head list from realloc_head list */ list_for_each_entry(dev_res, head, list) @@ -445,13 +439,13 @@ static void __assign_resources_sorted(struct list_head *head, return; } - /* check failed type */ + /* Check failed type */ fail_type = pci_fail_res_type_mask(&local_fail_head); - /* remove not need to be released assigned res from head list etc */ + /* Remove not need to be released assigned res from head list etc */ list_for_each_entry_safe(dev_res, tmp_res, head, list) if (dev_res->res->parent && !pci_need_to_release(fail_type, dev_res->res)) { - /* remove it from realloc_head list */ + /* Remove it from realloc_head list */ remove_from_list(realloc_head, dev_res->res); remove_from_list(&save_head, dev_res->res); list_del(&dev_res->list); @@ -477,16 +471,15 @@ requested_and_reassign: /* Satisfy the must-have resource requests */ assign_requested_resources_sorted(head, fail_head); - /* Try to satisfy any additional optional resource - requests */ + /* Try to satisfy any additional optional resource requests */ if (realloc_head) reassign_resources_sorted(realloc_head, head); free_list(head); } static void pdev_assign_resources_sorted(struct pci_dev *dev, - struct list_head *add_head, - struct list_head *fail_head) + struct list_head *add_head, + struct list_head *fail_head) { LIST_HEAD(head); @@ -563,17 +556,19 @@ void pci_setup_cardbus(struct pci_bus *bus) } EXPORT_SYMBOL(pci_setup_cardbus); -/* Initialize bridges with base/limit values we have collected. - PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998) - requires that if there is no I/O ports or memory behind the - bridge, corresponding range must be turned off by writing base - value greater than limit to the bridge's base/limit registers. - - Note: care must be taken when updating I/O base/limit registers - of bridges which support 32-bit I/O. This update requires two - config space writes, so it's quite possible that an I/O window of - the bridge will have some undesirable address (e.g. 0) after the - first write. Ditto 64-bit prefetchable MMIO. */ +/* + * Initialize bridges with base/limit values we have collected. PCI-to-PCI + * Bridge Architecture Specification rev. 1.1 (1998) requires that if there + * are no I/O ports or memory behind the bridge, the corresponding range + * must be turned off by writing base value greater than limit to the + * bridge's base/limit registers. + * + * Note: care must be taken when updating I/O base/limit registers of + * bridges which support 32-bit I/O. This update requires two config space + * writes, so it's quite possible that an I/O window of the bridge will + * have some undesirable address (e.g. 0) after the first write. Ditto + * 64-bit prefetchable MMIO. + */ static void pci_setup_bridge_io(struct pci_dev *bridge) { struct resource *res; @@ -587,7 +582,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) if (bridge->io_window_1k) io_mask = PCI_IO_1K_RANGE_MASK; - /* Set up the top and bottom of the PCI I/O segment for this bus. */ + /* Set up the top and bottom of the PCI I/O segment for this bus */ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0]; pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_IO) { @@ -595,19 +590,19 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) io_base_lo = (region.start >> 8) & io_mask; io_limit_lo = (region.end >> 8) & io_mask; l = ((u16) io_limit_lo << 8) | io_base_lo; - /* Set up upper 16 bits of I/O base/limit. */ + /* Set up upper 16 bits of I/O base/limit */ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); pci_info(bridge, " bridge window %pR\n", res); } else { - /* Clear upper 16 bits of I/O base/limit. */ + /* Clear upper 16 bits of I/O base/limit */ io_upper16 = 0; l = 0x00f0; } - /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ + /* Temporarily disable the I/O range before updating PCI_IO_BASE */ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); - /* Update lower 16 bits of I/O base/limit. */ + /* Update lower 16 bits of I/O base/limit */ pci_write_config_word(bridge, PCI_IO_BASE, l); - /* Update upper 16 bits of I/O base/limit. */ + /* Update upper 16 bits of I/O base/limit */ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); } @@ -617,7 +612,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge) struct pci_bus_region region; u32 l; - /* Set up the top and bottom of the PCI Memory segment for this bus. */ + /* Set up the top and bottom of the PCI Memory segment for this bus */ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1]; pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_MEM) { @@ -636,12 +631,14 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) struct pci_bus_region region; u32 l, bu, lu; - /* Clear out the upper 32 bits of PREF limit. - If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily - disables PREF range, which is ok. */ + /* + * Clear out the upper 32 bits of PREF limit. If + * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables + * PREF range, which is ok. + */ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); - /* Set up PREF base/limit. */ + /* Set up PREF base/limit */ bu = lu = 0; res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2]; pcibios_resource_to_bus(bridge->bus, ®ion, res); @@ -658,7 +655,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) } pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); - /* Set the upper 32 bits of PREF base & limit. */ + /* Set the upper 32 bits of PREF base & limit */ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); } @@ -702,13 +699,13 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i) return 0; if (pci_claim_resource(bridge, i) == 0) - return 0; /* claimed the window */ + return 0; /* Claimed the window */ if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI) return 0; if (!pci_bus_clip_resource(bridge, i)) - return -EINVAL; /* clipping didn't change anything */ + return -EINVAL; /* Clipping didn't change anything */ switch (i - PCI_BRIDGE_RESOURCES) { case 0: @@ -725,14 +722,16 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i) } if (pci_claim_resource(bridge, i) == 0) - return 0; /* claimed a smaller window */ + return 0; /* Claimed a smaller window */ return -EINVAL; } -/* Check whether the bridge supports optional I/O and - prefetchable memory ranges. If not, the respective - base/limit registers must be read-only and read as 0. */ +/* + * Check whether the bridge supports optional I/O and prefetchable memory + * ranges. If not, the respective base/limit registers must be read-only + * and read as 0. + */ static void pci_bridge_check_ranges(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; @@ -752,12 +751,14 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) } } -/* Helper function for sizing routines: find first available - bus resource of a given type. Note: we intentionally skip - the bus resources which have already been assigned (that is, - have non-NULL parent resource). */ +/* + * Helper function for sizing routines: find first available bus resource + * of a given type. Note: we intentionally skip the bus resources which + * have already been assigned (that is, have non-NULL parent resource). + */ static struct resource *find_free_bus_resource(struct pci_bus *bus, - unsigned long type_mask, unsigned long type) + unsigned long type_mask, + unsigned long type) { int i; struct resource *r; @@ -772,19 +773,21 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, } static resource_size_t calculate_iosize(resource_size_t size, - resource_size_t min_size, - resource_size_t size1, - resource_size_t add_size, - resource_size_t children_add_size, - resource_size_t old_size, - resource_size_t align) + resource_size_t min_size, + resource_size_t size1, + resource_size_t add_size, + resource_size_t children_add_size, + resource_size_t old_size, + resource_size_t align) { if (size < min_size) size = min_size; if (old_size == 1) old_size = 0; - /* To be fixed in 2.5: we should have sort of HAVE_ISA - flag in the struct pci_bus. */ + /* + * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the + * struct pci_bus. + */ #if defined(CONFIG_ISA) || defined(CONFIG_EISA) size = (size & 0xff) + ((size & ~0xffUL) << 2); #endif @@ -797,11 +800,11 @@ static resource_size_t calculate_iosize(resource_size_t size, } static resource_size_t calculate_memsize(resource_size_t size, - resource_size_t min_size, - resource_size_t add_size, - resource_size_t children_add_size, - resource_size_t old_size, - resource_size_t align) + resource_size_t min_size, + resource_size_t add_size, + resource_size_t children_add_size, + resource_size_t old_size, + resource_size_t align) { if (size < min_size) size = min_size; @@ -824,8 +827,7 @@ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus, #define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */ #define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */ -static resource_size_t window_alignment(struct pci_bus *bus, - unsigned long type) +static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type) { resource_size_t align = 1, arch_align; @@ -833,8 +835,8 @@ static resource_size_t window_alignment(struct pci_bus *bus, align = PCI_P2P_DEFAULT_MEM_ALIGN; else if (type & IORESOURCE_IO) { /* - * Per spec, I/O windows are 4K-aligned, but some - * bridges have an extension to support 1K alignment. + * Per spec, I/O windows are 4K-aligned, but some bridges have + * an extension to support 1K alignment. */ if (bus->self->io_window_1k) align = PCI_P2P_DEFAULT_IO_ALIGN_1K; @@ -847,20 +849,21 @@ static resource_size_t window_alignment(struct pci_bus *bus, } /** - * pbus_size_io() - size the io window of a given bus + * pbus_size_io() - Size the I/O window of a given bus * - * @bus : the bus - * @min_size : the minimum io window that must to be allocated - * @add_size : additional optional io window - * @realloc_head : track the additional io window on this list + * @bus: The bus + * @min_size: The minimum I/O window that must be allocated + * @add_size: Additional optional I/O window + * @realloc_head: Track the additional I/O window on this list * - * Sizing the IO windows of the PCI-PCI bridge is trivial, - * since these windows have 1K or 4K granularity and the IO ranges - * of non-bridge PCI devices are limited to 256 bytes. - * We must be careful with the ISA aliasing though. + * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these + * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI + * devices are limited to 256 bytes. We must be careful with the ISA + * aliasing though. */ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, - resource_size_t add_size, struct list_head *realloc_head) + resource_size_t add_size, + struct list_head *realloc_head) { struct pci_dev *dev; struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO, @@ -918,9 +921,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, if (size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); - pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx\n", - b_res, &bus->busn_res, - (unsigned long long)size1-size0); + pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n", + b_res, &bus->busn_res, + (unsigned long long) size1 - size0); } } @@ -947,33 +950,33 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns, } /** - * pbus_size_mem() - size the memory window of a given bus + * pbus_size_mem() - Size the memory window of a given bus * - * @bus : the bus - * @mask: mask the resource flag, then compare it with type - * @type: the type of free resource from bridge - * @type2: second match type - * @type3: third match type - * @min_size : the minimum memory window that must to be allocated - * @add_size : additional optional memory window - * @realloc_head : track the additional memory window on this list + * @bus: The bus + * @mask: Mask the resource flag, then compare it with type + * @type: The type of free resource from bridge + * @type2: Second match type + * @type3: Third match type + * @min_size: The minimum memory window that must be allocated + * @add_size: Additional optional memory window + * @realloc_head: Track the additional memory window on this list * - * Calculate the size of the bus and minimal alignment which - * guarantees that all child resources fit in this size. + * Calculate the size of the bus and minimal alignment which guarantees + * that all child resources fit in this size. * - * Returns -ENOSPC if there's no available bus resource of the desired type. - * Otherwise, sets the bus resource start/end to indicate the required - * size, adds things to realloc_head (if supplied), and returns 0. + * Return -ENOSPC if there's no available bus resource of the desired + * type. Otherwise, set the bus resource start/end to indicate the + * required size, add things to realloc_head (if supplied), and return 0. */ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type, unsigned long type2, - unsigned long type3, - resource_size_t min_size, resource_size_t add_size, + unsigned long type3, resource_size_t min_size, + resource_size_t add_size, struct list_head *realloc_head) { struct pci_dev *dev; resource_size_t min_align, align, size, size0, size1; - resource_size_t aligns[18]; /* Alignments from 1Mb to 128Gb */ + resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */ int order, max_order; struct resource *b_res = find_free_bus_resource(bus, mask | IORESOURCE_PREFETCH, type); @@ -1002,12 +1005,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, continue; r_size = resource_size(r); #ifdef CONFIG_PCI_IOV - /* put SRIOV requested res to the optional list */ + /* Put SRIOV requested res to the optional list */ if (realloc_head && i >= PCI_IOV_RESOURCES && i <= PCI_IOV_RESOURCE_END) { add_align = max(pci_resource_alignment(dev, r), add_align); r->end = r->start - 1; - add_to_list(realloc_head, dev, r, r_size, 0/* don't care */); + add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */); children_add_size += r_size; continue; } @@ -1029,8 +1032,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, continue; } size += max(r_size, align); - /* Exclude ranges with size > align from - calculation of the alignment. */ + /* + * Exclude ranges with size > align from calculation of + * the alignment. + */ if (r_size <= align) aligns[order] += align; if (order > max_order) @@ -1063,7 +1068,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, b_res->flags |= IORESOURCE_STARTALIGN; if (size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align); - pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n", + pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n", b_res, &bus->busn_res, (unsigned long long) (size1 - size0), (unsigned long long) add_align); @@ -1081,7 +1086,7 @@ unsigned long pci_cardbus_resource_alignment(struct resource *res) } static void pci_bus_size_cardbus(struct pci_bus *bus, - struct list_head *realloc_head) + struct list_head *realloc_head) { struct pci_dev *bridge = bus->self; struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; @@ -1091,8 +1096,8 @@ static void pci_bus_size_cardbus(struct pci_bus *bus, if (b_res[0].parent) goto handle_b_res_1; /* - * Reserve some resources for CardBus. We reserve - * a fixed amount of bus space for CardBus bridges. + * Reserve some resources for CardBus. We reserve a fixed amount + * of bus space for CardBus bridges. */ b_res[0].start = pci_cardbus_io_size; b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1; @@ -1116,7 +1121,7 @@ handle_b_res_1: } handle_b_res_2: - /* MEM1 must not be pref mmio */ + /* MEM1 must not be pref MMIO */ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) { ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1; @@ -1124,10 +1129,7 @@ handle_b_res_2: pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); } - /* - * Check whether prefetchable memory is supported - * by this bridge. - */ + /* Check whether prefetchable memory is supported by this bridge. */ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) { ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; @@ -1138,9 +1140,8 @@ handle_b_res_2: if (b_res[2].parent) goto handle_b_res_3; /* - * If we have prefetchable memory support, allocate - * two regions. Otherwise, allocate one region of - * twice the size. + * If we have prefetchable memory support, allocate two regions. + * Otherwise, allocate one region of twice the size. */ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { b_res[2].start = pci_cardbus_mem_size; @@ -1153,7 +1154,7 @@ handle_b_res_2: pci_cardbus_mem_size, pci_cardbus_mem_size); } - /* reduce that to half */ + /* Reduce that to half */ b_res_3_size = pci_cardbus_mem_size; } @@ -1204,7 +1205,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) switch (bus->self->hdr_type) { case PCI_HEADER_TYPE_CARDBUS: - /* don't size cardbuses yet. */ + /* Don't size CardBuses yet */ break; case PCI_HEADER_TYPE_BRIDGE: @@ -1271,18 +1272,17 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) /* * Compute the size required to put everything else in the - * non-prefetchable window. This includes: + * non-prefetchable window. This includes: * * - all non-prefetchable resources * - 32-bit prefetchable resources if there's a 64-bit * prefetchable window or no prefetchable window at all - * - 64-bit prefetchable resources if there's no - * prefetchable window at all + * - 64-bit prefetchable resources if there's no prefetchable + * window at all * - * Note that the strategy in __pci_assign_resource() must - * match that used here. Specifically, we cannot put a - * 32-bit prefetchable resource in a 64-bit prefetchable - * window. + * Note that the strategy in __pci_assign_resource() must match + * that used here. Specifically, we cannot put a 32-bit + * prefetchable resource in a 64-bit prefetchable window. */ pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3, realloc_head ? 0 : additional_mem_size, @@ -1315,8 +1315,8 @@ static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r) } /* - * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they - * are skipped by pbus_assign_resources_sorted(). + * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are + * skipped by pbus_assign_resources_sorted(). */ static void pdev_assign_fixed_resources(struct pci_dev *dev) { @@ -1427,10 +1427,9 @@ static void pci_bus_allocate_resources(struct pci_bus *b) struct pci_bus *child; /* - * Carry out a depth-first search on the PCI bus - * tree to allocate bridge apertures. Read the - * programmed bridge bases and recursively claim - * the respective bridge resources. + * Carry out a depth-first search on the PCI bus tree to allocate + * bridge apertures. Read the programmed bridge bases and + * recursively claim the respective bridge resources. */ if (b->self) { pci_read_bridge_bases(b); @@ -1484,7 +1483,7 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge, IORESOURCE_MEM_64) static void pci_bridge_release_resources(struct pci_bus *bus, - unsigned long type) + unsigned long type) { struct pci_dev *dev = bus->self; struct resource *r; @@ -1495,16 +1494,14 @@ static void pci_bridge_release_resources(struct pci_bus *bus, b_res = &dev->resource[PCI_BRIDGE_RESOURCES]; /* - * 1. if there is io port assign fail, will release bridge - * io port. - * 2. if there is non pref mmio assign fail, release bridge - * nonpref mmio. - * 3. if there is 64bit pref mmio assign fail, and bridge pref - * is 64bit, release bridge pref mmio. - * 4. if there is pref mmio assign fail, and bridge pref is - * 32bit mmio, release bridge pref mmio - * 5. if there is pref mmio assign fail, and bridge pref is not - * assigned, release bridge nonpref mmio. + * 1. If IO port assignment fails, release bridge IO port. + * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO. + * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit, + * release bridge pref MMIO. + * 4. If pref MMIO assignment fails, and bridge pref is 32bit, + * release bridge pref MMIO. + * 5. If pref MMIO assignment fails, and bridge pref is not + * assigned, release bridge nonpref MMIO. */ if (type & IORESOURCE_IO) idx = 0; @@ -1524,25 +1521,22 @@ static void pci_bridge_release_resources(struct pci_bus *bus, if (!r->parent) return; - /* - * if there are children under that, we should release them - * all - */ + /* If there are children, release them all */ release_child_resources(r); if (!release_resource(r)) { type = old_flags = r->flags & PCI_RES_TYPE_MASK; - pci_printk(KERN_DEBUG, dev, "resource %d %pR released\n", - PCI_BRIDGE_RESOURCES + idx, r); - /* keep the old size */ + pci_info(dev, "resource %d %pR released\n", + PCI_BRIDGE_RESOURCES + idx, r); + /* Keep the old size */ r->end = resource_size(r) - 1; r->start = 0; r->flags = 0; - /* avoiding touch the one without PREF */ + /* Avoiding touch the one without PREF */ if (type & IORESOURCE_PREFETCH) type = IORESOURCE_PREFETCH; __pci_setup_bridge(bus, type); - /* for next child res under same bridge */ + /* For next child res under same bridge */ r->flags = old_flags; } } @@ -1551,9 +1545,10 @@ enum release_type { leaf_only, whole_subtree, }; + /* - * try to release pci bridge resources that is from leaf bridge, - * so we can allocate big new one later + * Try to release PCI bridge resources from leaf bridge, so we can allocate + * a larger window later. */ static void pci_bus_release_bridge_resources(struct pci_bus *bus, unsigned long type, @@ -1596,7 +1591,7 @@ static void pci_bus_dump_res(struct pci_bus *bus) if (!res || !res->end || !res->flags) continue; - dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); + dev_info(&bus->dev, "resource %d %pR\n", i, res); } } @@ -1667,8 +1662,8 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data) int i; bool *unassigned = data; - for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) { - struct resource *r = &dev->resource[i]; + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { + struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES]; struct pci_bus_region region; /* Not assigned or rejected by kernel? */ @@ -1678,7 +1673,7 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data) pcibios_resource_to_bus(dev->bus, ®ion, r); if (!region.start) { *unassigned = true; - return 1; /* return early from pci_walk_bus() */ + return 1; /* Return early from pci_walk_bus() */ } } @@ -1686,13 +1681,18 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data) } static enum enable_type pci_realloc_detect(struct pci_bus *bus, - enum enable_type enable_local) + enum enable_type enable_local) { bool unassigned = false; + struct pci_host_bridge *host; if (enable_local != undefined) return enable_local; + host = pci_find_host_bridge(bus); + if (host->preserve_config) + return auto_disabled; + pci_walk_bus(bus, iov_resources_unassigned, &unassigned); if (unassigned) return auto_enabled; @@ -1701,21 +1701,21 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus, } #else static enum enable_type pci_realloc_detect(struct pci_bus *bus, - enum enable_type enable_local) + enum enable_type enable_local) { return enable_local; } #endif /* - * first try will not touch pci bridge res - * second and later try will clear small leaf bridge res - * will stop till to the max depth if can not find good one + * First try will not touch PCI bridge res. + * Second and later try will clear small leaf bridge res. + * Will stop till to the max depth if can not find good one. */ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) { - LIST_HEAD(realloc_head); /* list of resources that - want additional resources */ + LIST_HEAD(realloc_head); + /* List of resources that want additional resources */ struct list_head *add_list = NULL; int tried_times = 0; enum release_type rel_type = leaf_only; @@ -1724,26 +1724,26 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) int pci_try_num = 1; enum enable_type enable_local; - /* don't realloc if asked to do so */ + /* Don't realloc if asked to do so */ enable_local = pci_realloc_detect(bus, pci_realloc_enable); if (pci_realloc_enabled(enable_local)) { int max_depth = pci_bus_get_depth(bus); pci_try_num = max_depth + 1; - dev_printk(KERN_DEBUG, &bus->dev, - "max bus depth: %d pci_try_num: %d\n", - max_depth, pci_try_num); + dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n", + max_depth, pci_try_num); } again: /* - * last try will use add_list, otherwise will try good to have as - * must have, so can realloc parent bridge resource + * Last try will use add_list, otherwise will try good to have as must + * have, so can realloc parent bridge resource */ if (tried_times + 1 == pci_try_num) add_list = &realloc_head; - /* Depth first, calculate sizes and alignments of all - subordinate buses. */ + /* + * Depth first, calculate sizes and alignments of all subordinate buses. + */ __pci_bus_size_bridges(bus, add_list); /* Depth last, allocate resources and update the hardware. */ @@ -1752,7 +1752,7 @@ again: BUG_ON(!list_empty(add_list)); tried_times++; - /* any device complain? */ + /* Any device complain? */ if (list_empty(&fail_head)) goto dump; @@ -1766,23 +1766,23 @@ again: goto dump; } - dev_printk(KERN_DEBUG, &bus->dev, - "No. %d try to assign unassigned res\n", tried_times + 1); + dev_info(&bus->dev, "No. %d try to assign unassigned res\n", + tried_times + 1); - /* third times and later will not check if it is leaf */ + /* Third times and later will not check if it is leaf */ if ((tried_times + 1) > 2) rel_type = whole_subtree; /* * Try to release leaf bridge's resources that doesn't fit resource of - * child device under that bridge + * child device under that bridge. */ list_for_each_entry(fail_res, &fail_head, list) pci_bus_release_bridge_resources(fail_res->dev->bus, fail_res->flags & PCI_RES_TYPE_MASK, rel_type); - /* restore size and flags */ + /* Restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; @@ -1797,7 +1797,7 @@ again: goto again; dump: - /* dump the resource on buses */ + /* Dump the resource on buses */ pci_bus_dump_resources(bus); } @@ -1808,14 +1808,15 @@ void __init pci_assign_unassigned_resources(void) list_for_each_entry(root_bus, &pci_root_buses, node) { pci_assign_unassigned_root_bus_resources(root_bus); - /* Make sure the root bridge has a companion ACPI device: */ + /* Make sure the root bridge has a companion ACPI device */ if (ACPI_HANDLE(root_bus->bridge)) acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge)); } } static void extend_bridge_window(struct pci_dev *bridge, struct resource *res, - struct list_head *add_list, resource_size_t available) + struct list_head *add_list, + resource_size_t available) { struct pci_dev_resource *dev_res; @@ -1839,8 +1840,10 @@ static void extend_bridge_window(struct pci_dev *bridge, struct resource *res, } static void pci_bus_distribute_available_resources(struct pci_bus *bus, - struct list_head *add_list, resource_size_t available_io, - resource_size_t available_mmio, resource_size_t available_mmio_pref) + struct list_head *add_list, + resource_size_t available_io, + resource_size_t available_mmio, + resource_size_t available_mmio_pref) { resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref; unsigned int normal_bridges = 0, hotplug_bridges = 0; @@ -1863,18 +1866,8 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, available_mmio_pref); /* - * Calculate the total amount of extra resource space we can - * pass to bridges below this one. This is basically the - * extra space reduced by the minimal required space for the - * non-hotplug bridges. - */ - remaining_io = available_io; - remaining_mmio = available_mmio; - remaining_mmio_pref = available_mmio_pref; - - /* * Calculate how many hotplug bridges and normal bridges there - * are on this bus. We will distribute the additional available + * are on this bus. We will distribute the additional available * resources between hotplug bridges. */ for_each_pci_bridge(dev, bus) { @@ -1884,6 +1877,34 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, normal_bridges++; } + /* + * There is only one bridge on the bus so it gets all available + * resources which it can then distribute to the possible hotplug + * bridges below. + */ + if (hotplug_bridges + normal_bridges == 1) { + dev = list_first_entry(&bus->devices, struct pci_dev, bus_list); + if (dev->subordinate) { + pci_bus_distribute_available_resources(dev->subordinate, + add_list, available_io, available_mmio, + available_mmio_pref); + } + return; + } + + if (hotplug_bridges == 0) + return; + + /* + * Calculate the total amount of extra resource space we can + * pass to bridges below this one. This is basically the + * extra space reduced by the minimal required space for the + * non-hotplug bridges. + */ + remaining_io = available_io; + remaining_mmio = available_mmio; + remaining_mmio_pref = available_mmio_pref; + for_each_pci_bridge(dev, bus) { const struct resource *res; @@ -1908,21 +1929,6 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, } /* - * There is only one bridge on the bus so it gets all available - * resources which it can then distribute to the possible - * hotplug bridges below. - */ - if (hotplug_bridges + normal_bridges == 1) { - dev = list_first_entry(&bus->devices, struct pci_dev, bus_list); - if (dev->subordinate) { - pci_bus_distribute_available_resources(dev->subordinate, - add_list, available_io, available_mmio, - available_mmio_pref); - } - return; - } - - /* * Go over devices on this bus and distribute the remaining * resource space between hotplug bridges. */ @@ -1938,8 +1944,6 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, * Distribute available extra resources equally between * hotplug-capable downstream ports taking alignment into * account. - * - * Here hotplug_bridges is always != 0. */ align = pci_resource_alignment(bridge, io_res); io = div64_ul(available_io, hotplug_bridges); @@ -1961,9 +1965,8 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, } } -static void -pci_bridge_distribute_available_resources(struct pci_dev *bridge, - struct list_head *add_list) +static void pci_bridge_distribute_available_resources(struct pci_dev *bridge, + struct list_head *add_list) { resource_size_t available_io, available_mmio, available_mmio_pref; const struct resource *res; @@ -1980,14 +1983,17 @@ pci_bridge_distribute_available_resources(struct pci_dev *bridge, available_mmio_pref = resource_size(res); pci_bus_distribute_available_resources(bridge->subordinate, - add_list, available_io, available_mmio, available_mmio_pref); + add_list, available_io, + available_mmio, + available_mmio_pref); } void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) { struct pci_bus *parent = bridge->subordinate; - LIST_HEAD(add_list); /* list of resources that - want additional resources */ + /* List of resources that want additional resources */ + LIST_HEAD(add_list); + int tried_times = 0; LIST_HEAD(fail_head); struct pci_dev_resource *fail_res; @@ -1997,9 +2003,9 @@ again: __pci_bus_size_bridges(parent, &add_list); /* - * Distribute remaining resources (if any) equally between - * hotplug bridges below. This makes it possible to extend the - * hierarchy later without running out of resources. + * Distribute remaining resources (if any) equally between hotplug + * bridges below. This makes it possible to extend the hierarchy + * later without running out of resources. */ pci_bridge_distribute_available_resources(bridge, &add_list); @@ -2011,7 +2017,7 @@ again: goto enable_all; if (tried_times >= 2) { - /* still fail, don't need to try more */ + /* Still fail, don't need to try more */ free_list(&fail_head); goto enable_all; } @@ -2020,15 +2026,15 @@ again: tried_times + 1); /* - * Try to release leaf bridge's resources that doesn't fit resource of - * child device under that bridge + * Try to release leaf bridge's resources that aren't big enough + * to contain child device resources. */ list_for_each_entry(fail_res, &fail_head, list) pci_bus_release_bridge_resources(fail_res->dev->bus, fail_res->flags & PCI_RES_TYPE_MASK, whole_subtree); - /* restore size and flags */ + /* Restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; @@ -2107,7 +2113,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) } list_for_each_entry(dev_res, &saved, list) { - /* Skip the bridge we just assigned resources for. */ + /* Skip the bridge we just assigned resources for */ if (bridge == dev_res->dev) continue; @@ -2119,7 +2125,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) return 0; cleanup: - /* restore size and flags */ + /* Restore size and flags */ list_for_each_entry(dev_res, &failed, list) { struct resource *res = dev_res->res; @@ -2151,8 +2157,8 @@ cleanup: void pci_assign_unassigned_bus_resources(struct pci_bus *bus) { struct pci_dev *dev; - LIST_HEAD(add_list); /* list of resources that - want additional resources */ + /* List of resources that want additional resources */ + LIST_HEAD(add_list); down_read(&pci_bus_sem); for_each_pci_bridge(dev, bus) diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index c46d5e1ff536..ae4aa0e1f2f4 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -75,6 +75,7 @@ static const char *pci_bus_speed_strings[] = { "5.0 GT/s PCIe", /* 0x15 */ "8.0 GT/s PCIe", /* 0x16 */ "16.0 GT/s PCIe", /* 0x17 */ + "32.0 GT/s PCIe", /* 0x18 */ }; static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) @@ -403,7 +404,7 @@ static int pci_slot_init(void) pci_slots_kset = kset_create_and_add("slots", NULL, &pci_bus_kset->kobj); if (!pci_slots_kset) { - printk(KERN_ERR "PCI: Slot initialization failure\n"); + pr_err("PCI: Slot initialization failure\n"); return -ENOMEM; } return 0; diff --git a/drivers/pci/switch/Kconfig b/drivers/pci/switch/Kconfig index aee28a5bb98f..d370f4ce0492 100644 --- a/drivers/pci/switch/Kconfig +++ b/drivers/pci/switch/Kconfig @@ -9,7 +9,7 @@ config PCI_SW_SWITCHTEC Enables support for the management interface for the MicroSemi Switchtec series of PCIe switches. Supports userspace access to submit MRPC commands to the switch via /dev/switchtecX - devices. See <file:Documentation/switchtec.txt> for more + devices. See <file:Documentation/driver-api/switchtec.rst> for more information. endmenu diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index e22766c79fe9..66610f04d76d 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -30,6 +30,10 @@ module_param(use_dma_mrpc, bool, 0644); MODULE_PARM_DESC(use_dma_mrpc, "Enable the use of the DMA MRPC feature"); +static int nirqs = 32; +module_param(nirqs, int, 0644); +MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)"); + static dev_t switchtec_devt; static DEFINE_IDA(switchtec_minor_ida); @@ -390,7 +394,7 @@ static int switchtec_dev_open(struct inode *inode, struct file *filp) return PTR_ERR(stuser); filp->private_data = stuser; - nonseekable_open(inode, filp); + stream_open(inode, filp); dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); @@ -658,19 +662,25 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, static int ioctl_event_summary(struct switchtec_dev *stdev, struct switchtec_user *stuser, - struct switchtec_ioctl_event_summary __user *usum) + struct switchtec_ioctl_event_summary __user *usum, + size_t size) { - struct switchtec_ioctl_event_summary s = {0}; + struct switchtec_ioctl_event_summary *s; int i; u32 reg; + int ret = 0; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; - s.global = ioread32(&stdev->mmio_sw_event->global_summary); - s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap); - s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary); + s->global = ioread32(&stdev->mmio_sw_event->global_summary); + s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap); + s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary); for (i = 0; i < stdev->partition_count; i++) { reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary); - s.part[i] = reg; + s->part[i] = reg; } for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { @@ -679,15 +689,19 @@ static int ioctl_event_summary(struct switchtec_dev *stdev, break; reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary); - s.pff[i] = reg; + s->pff[i] = reg; } - if (copy_to_user(usum, &s, sizeof(s))) - return -EFAULT; + if (copy_to_user(usum, s, size)) { + ret = -EFAULT; + goto error_case; + } stuser->event_cnt = atomic_read(&stdev->event_cnt); - return 0; +error_case: + kfree(s); + return ret; } static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev, @@ -977,8 +991,9 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd, case SWITCHTEC_IOCTL_FLASH_PART_INFO: rc = ioctl_flash_part_info(stdev, argp); break; - case SWITCHTEC_IOCTL_EVENT_SUMMARY: - rc = ioctl_event_summary(stdev, stuser, argp); + case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY: + rc = ioctl_event_summary(stdev, stuser, argp, + sizeof(struct switchtec_ioctl_event_summary_legacy)); break; case SWITCHTEC_IOCTL_EVENT_CTL: rc = ioctl_event_ctl(stdev, argp); @@ -989,6 +1004,10 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd, case SWITCHTEC_IOCTL_PORT_TO_PFF: rc = ioctl_port_to_pff(stdev, argp); break; + case SWITCHTEC_IOCTL_EVENT_SUMMARY: + rc = ioctl_event_summary(stdev, stuser, argp, + sizeof(struct switchtec_ioctl_event_summary)); + break; default: rc = -ENOTTY; break; @@ -1006,7 +1025,7 @@ static const struct file_operations switchtec_fops = { .read = switchtec_dev_read, .poll = switchtec_dev_poll, .unlocked_ioctl = switchtec_dev_ioctl, - .compat_ioctl = switchtec_dev_ioctl, + .compat_ioctl = compat_ptr_ioctl, }; static void link_event_work(struct work_struct *work) @@ -1162,7 +1181,8 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx) if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) return 0; - if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE) + if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE || + eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP) return 0; dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); @@ -1247,8 +1267,12 @@ static int switchtec_init_isr(struct switchtec_dev *stdev) int dma_mrpc_irq; int rc; - nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4, - PCI_IRQ_MSIX | PCI_IRQ_MSI); + if (nirqs < 4) + nirqs = 4; + + nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs, + PCI_IRQ_MSIX | PCI_IRQ_MSI | + PCI_IRQ_VIRTUAL); if (nvecs < 0) return nvecs; diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index d96626c614f5..31e39558d49d 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -7,6 +7,7 @@ #include <linux/errno.h> #include <linux/pci.h> +#include <linux/security.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include "pci.h" @@ -90,7 +91,8 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, u32 dword; int err = 0; - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN) || + security_locked_down(LOCKDOWN_PCI_ACCESS)) return -EPERM; dev = pci_get_domain_bus_and_slot(0, bus, dfn); diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c index 5acd9c02683a..5486f8768c86 100644 --- a/drivers/pci/vc.c +++ b/drivers/pci/vc.c @@ -13,6 +13,8 @@ #include <linux/pci_regs.h> #include <linux/types.h> +#include "pci.h" + /** * pci_vc_save_restore_dwords - Save or restore a series of dwords * @dev: device @@ -105,7 +107,7 @@ static void pci_vc_enable(struct pci_dev *dev, int pos, int res) struct pci_dev *link = NULL; /* Enable VCs from the downstream device */ - if (!dev->has_secondary_link) + if (!pci_is_pcie(dev) || !pcie_downstream_port(dev)) return; ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); @@ -409,7 +411,6 @@ void pci_restore_vc_state(struct pci_dev *dev) * For each type of VC capability, VC/VC9/MFVC, find the capability, size * it, and allocate a buffer for save/restore. */ - void pci_allocate_vc_save_buffers(struct pci_dev *dev) { int i; diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index 4963c2e2bd4c..7915d10f9aa1 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -571,6 +571,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); +/* + * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port + * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class. + */ +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, + PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd); /* * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index eba6e33147a2..d1b16cf3403f 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -291,8 +291,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev, vector[i] = op.msix_entries[i].vector; } } else { - printk(KERN_DEBUG "enable msix get value %x\n", - op.value); + pr_info("enable msix get value %x\n", op.value); err = op.value; } } else { @@ -364,12 +363,12 @@ static void pci_frontend_disable_msi(struct pci_dev *dev) err = do_pci_op(pdev, &op); if (err == XEN_PCI_ERR_dev_not_found) { /* XXX No response from backend, what shall we do? */ - printk(KERN_DEBUG "get no response from backend for disable MSI\n"); + pr_info("get no response from backend for disable MSI\n"); return; } if (err) /* how can pciback notify us fail? */ - printk(KERN_DEBUG "get fake response frombackend\n"); + pr_info("get fake response from backend\n"); } static struct xen_pci_frontend_ops pci_frontend_ops = { @@ -1104,7 +1103,7 @@ static void __ref pcifront_backend_changed(struct xenbus_device *xdev, case XenbusStateClosed: if (xdev->state == XenbusStateClosed) break; - /* Missed the backend's CLOSING state -- fallthrough */ + /* fall through - Missed the backend's CLOSING state. */ case XenbusStateClosing: dev_warn(&xdev->dev, "backend going away!\n"); pcifront_try_disconnect(pdev); |