diff options
Diffstat (limited to 'drivers/pci')
85 files changed, 3937 insertions, 1993 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 74147262625b..d35001589d88 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -13,6 +13,11 @@ config FORCE_PCI select HAVE_PCI select PCI +# select this to provide a generic PCI iomap, +# without PCI itself having to be defined +config GENERIC_PCI_IOMAP + bool + menuconfig PCI bool "PCI support" depends on HAVE_PCI diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index cc8b4e01e29d..175302036890 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -4,16 +4,17 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \ remove.o pci.o pci-driver.o search.o \ - pci-sysfs.o rom.o setup-res.o irq.o vpd.o \ - setup-bus.o vc.o mmap.o setup-irq.o + rom.o setup-res.o irq.o vpd.o \ + setup-bus.o vc.o mmap.o devres.o obj-$(CONFIG_PCI) += msi/ obj-$(CONFIG_PCI) += pcie/ ifdef CONFIG_PCI obj-$(CONFIG_PROC_FS) += proc.o -obj-$(CONFIG_SYSFS) += slot.o +obj-$(CONFIG_SYSFS) += pci-sysfs.o slot.o obj-$(CONFIG_ACPI) += pci-acpi.o +obj-$(CONFIG_GENERIC_PCI_IOMAP) += iomap.o endif obj-$(CONFIG_OF) += of.o diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 6554a2e89d36..6449056b57dd 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -598,3 +598,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_dword); + +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set) +{ + u32 val; + + pci_read_config_dword(dev, pos, &val); + val &= ~clear; + val |= set; + pci_write_config_dword(dev, pos, val); +} +EXPORT_SYMBOL(pci_clear_and_set_config_dword); diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 9c2137dae429..826b5016a101 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -386,21 +386,8 @@ void pci_bus_add_devices(const struct pci_bus *bus) } EXPORT_SYMBOL(pci_bus_add_devices); -/** pci_walk_bus - walk devices on/under bus, calling callback. - * @top bus whose devices should be walked - * @cb callback to be called for each device found - * @userdata arbitrary pointer to be passed to callback. - * - * Walk the given bus, including any bridged devices - * on buses under this bus. Call the provided callback - * on each device found. - * - * We check the return of @cb each time. If it returns anything - * other than 0, we break out. - * - */ -void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), - void *userdata) +static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata, bool locked) { struct pci_dev *dev; struct pci_bus *bus; @@ -408,7 +395,8 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), int retval; bus = top; - down_read(&pci_bus_sem); + if (!locked) + down_read(&pci_bus_sem); next = top->devices.next; for (;;) { if (next == &bus->devices) { @@ -431,10 +419,37 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), if (retval) break; } - up_read(&pci_bus_sem); + if (!locked) + up_read(&pci_bus_sem); +} + +/** + * pci_walk_bus - walk devices on/under bus, calling callback. + * @top: bus whose devices should be walked + * @cb: callback to be called for each device found + * @userdata: arbitrary pointer to be passed to callback + * + * Walk the given bus, including any bridged devices + * on buses under this bus. Call the provided callback + * on each device found. + * + * We check the return of @cb each time. If it returns anything + * other than 0, we break out. + */ +void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata) +{ + __pci_walk_bus(top, cb, userdata, false); } EXPORT_SYMBOL_GPL(pci_walk_bus); +void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata) +{ + lockdep_assert_held(&pci_bus_sem); + + __pci_walk_bus(top, cb, userdata, true); +} +EXPORT_SYMBOL_GPL(pci_walk_bus_locked); + struct pci_bus *pci_bus_get(struct pci_bus *bus) { if (bus) diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index 291d12711363..1d5a70c9055e 100644 --- a/drivers/pci/controller/cadence/Kconfig +++ b/drivers/pci/controller/cadence/Kconfig @@ -47,6 +47,7 @@ config PCI_J721E config PCI_J721E_HOST bool "TI J721E PCIe controller (host mode)" + depends on ARCH_K3 || COMPILE_TEST depends on OF select PCIE_CADENCE_HOST select PCI_J721E @@ -57,6 +58,7 @@ config PCI_J721E_HOST config PCI_J721E_EP bool "TI J721E PCIe controller (endpoint mode)" + depends on ARCH_K3 || COMPILE_TEST depends on OF depends on PCI_ENDPOINT select PCIE_CADENCE_EP diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 2c87e7728a65..85718246016b 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -42,18 +42,16 @@ enum link_status { }; #define J721E_MODE_RC BIT(7) -#define LANE_COUNT_MASK BIT(8) #define LANE_COUNT(n) ((n) << 8) #define GENERATION_SEL_MASK GENMASK(1, 0) -#define MAX_LANES 2 - struct j721e_pcie { struct cdns_pcie *cdns_pcie; struct clk *refclk; u32 mode; u32 num_lanes; + u32 max_lanes; void __iomem *user_cfg_base; void __iomem *intd_cfg_base; u32 linkdown_irq_regfield; @@ -71,6 +69,7 @@ struct j721e_pcie_data { unsigned int quirk_disable_flr:1; u32 linkdown_irq_regfield; unsigned int byte_access_allowed:1; + unsigned int max_lanes; }; static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset) @@ -206,11 +205,15 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, { struct device *dev = pcie->cdns_pcie->dev; u32 lanes = pcie->num_lanes; + u32 mask = BIT(8); u32 val = 0; int ret; + if (pcie->max_lanes == 4) + mask = GENMASK(9, 8); + val = LANE_COUNT(lanes - 1); - ret = regmap_update_bits(syscon, offset, LANE_COUNT_MASK, val); + ret = regmap_update_bits(syscon, offset, mask, val); if (ret) dev_err(dev, "failed to set link count\n"); @@ -290,11 +293,13 @@ static const struct j721e_pcie_data j721e_pcie_rc_data = { .quirk_retrain_flag = true, .byte_access_allowed = false, .linkdown_irq_regfield = LINK_DOWN, + .max_lanes = 2, }; static const struct j721e_pcie_data j721e_pcie_ep_data = { .mode = PCI_MODE_EP, .linkdown_irq_regfield = LINK_DOWN, + .max_lanes = 2, }; static const struct j721e_pcie_data j7200_pcie_rc_data = { @@ -302,23 +307,41 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = { .quirk_detect_quiet_flag = true, .linkdown_irq_regfield = J7200_LINK_DOWN, .byte_access_allowed = true, + .max_lanes = 2, }; static const struct j721e_pcie_data j7200_pcie_ep_data = { .mode = PCI_MODE_EP, .quirk_detect_quiet_flag = true, .quirk_disable_flr = true, + .max_lanes = 2, }; static const struct j721e_pcie_data am64_pcie_rc_data = { .mode = PCI_MODE_RC, .linkdown_irq_regfield = J7200_LINK_DOWN, .byte_access_allowed = true, + .max_lanes = 1, }; static const struct j721e_pcie_data am64_pcie_ep_data = { .mode = PCI_MODE_EP, .linkdown_irq_regfield = J7200_LINK_DOWN, + .max_lanes = 1, +}; + +static const struct j721e_pcie_data j784s4_pcie_rc_data = { + .mode = PCI_MODE_RC, + .quirk_retrain_flag = true, + .byte_access_allowed = false, + .linkdown_irq_regfield = LINK_DOWN, + .max_lanes = 4, +}; + +static const struct j721e_pcie_data j784s4_pcie_ep_data = { + .mode = PCI_MODE_EP, + .linkdown_irq_regfield = LINK_DOWN, + .max_lanes = 4, }; static const struct of_device_id of_j721e_pcie_match[] = { @@ -346,6 +369,14 @@ static const struct of_device_id of_j721e_pcie_match[] = { .compatible = "ti,am64-pcie-ep", .data = &am64_pcie_ep_data, }, + { + .compatible = "ti,j784s4-pcie-host", + .data = &j784s4_pcie_rc_data, + }, + { + .compatible = "ti,j784s4-pcie-ep", + .data = &j784s4_pcie_ep_data, + }, {}, }; @@ -432,9 +463,13 @@ static int j721e_pcie_probe(struct platform_device *pdev) pcie->user_cfg_base = base; ret = of_property_read_u32(node, "num-lanes", &num_lanes); - if (ret || num_lanes > MAX_LANES) + if (ret || num_lanes > data->max_lanes) { + dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n"); num_lanes = 1; + } + pcie->num_lanes = num_lanes; + pcie->max_lanes = data->max_lanes; if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48))) return -EINVAL; diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 3142feb8ac19..81c50dc64da9 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -360,8 +360,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, writel(0, ep->irq_cpu_addr + offset); } -static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, - u8 intx) +static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, + u8 intx) { u16 cmd; @@ -371,7 +371,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, cdns_pcie_ep_assert_intx(ep, fn, intx, true); /* - * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() + * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq() */ mdelay(1); cdns_pcie_ep_assert_intx(ep, fn, intx, false); @@ -532,25 +532,24 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, } static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; struct device *dev = pcie->dev; switch (type) { - case PCI_EPC_IRQ_LEGACY: + case PCI_IRQ_INTX: if (vfn > 0) { - dev_err(dev, "Cannot raise legacy interrupts for VF\n"); + dev_err(dev, "Cannot raise INTX interrupts for VF\n"); return -EINVAL; } - return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0); + return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); default: @@ -566,7 +565,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) struct cdns_pcie *pcie = &ep->pcie; struct device *dev = pcie->dev; int max_epfs = sizeof(epc->function_num_map) * 8; - int ret, value, epf; + int ret, epf, last_fn; + u32 reg, value; /* * BIT(0) is hardwired to 1, hence function 0 is always enabled @@ -574,6 +574,17 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) */ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); + /* + * Next function field in ARI_CAP_AND_CTR register for last function + * should be 0. + * Clearing Next Function Number field for the last function used. + */ + last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG); + reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn); + value = cdns_pcie_readl(pcie, reg); + value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK; + cdns_pcie_writel(pcie, reg, value); + if (ep->quirk_disable_flr) { for (epf = 0; epf < max_epfs; epf++) { if (!(epc->function_num_map & BIT(epf))) diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index 373cb50fcd15..7a66a2f815dc 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -131,6 +131,12 @@ #define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200 /* + * Endpoint PF Registers + */ +#define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000) +#define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8) + +/* * Root Port Registers (PCI configuration space for the root port function) */ #define CDNS_PCIE_RP_BASE 0x00200000 @@ -347,16 +353,16 @@ struct cdns_pcie_epf { * @max_regions: maximum number of regions supported by hardware * @ob_region_map: bitmask of mapped outbound regions * @ob_addr: base addresses in the AXI bus where the outbound regions start - * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ + * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ * dedicated outbound regions is mapped. * @irq_cpu_addr: base address in the CPU space where a write access triggers - * the sending of a memory write (MSI) / normal message (legacy + * the sending of a memory write (MSI) / normal message (INTX * IRQ) TLP through the PCIe bus. - * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ + * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ * dedicated outbound region. * @irq_pci_fn: the latest PCI function that has updated the mapping of - * the MSI/legacy IRQ dedicated outbound region. - * @irq_pending: bitmask of asserted legacy IRQs. + * the MSI/INTX IRQ dedicated outbound region. + * @irq_pending: bitmask of asserted INTX IRQs. * @lock: spin lock to disable interrupts while modifying PCIe controller * registers fields (RMW) accessible by both remote RC and EP to * minimize time between read and write @@ -374,7 +380,7 @@ struct cdns_pcie_ep { u64 irq_pci_addr; u8 irq_pci_fn; u8 irq_pending; - /* protect writing to PCI_STATUS while raising legacy interrupts */ + /* protect writing to PCI_STATUS while raising INTX interrupts */ spinlock_t lock; struct cdns_pcie_epf *epf; unsigned int quirk_detect_quiet_flag:1; diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 5ac021dbd46a..8afacc90c63b 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -336,7 +336,7 @@ config PCI_EXYNOS config PCIE_FU740 bool "SiFive FU740 PCIe controller" depends on PCI_MSI - depends on SOC_SIFIVE || COMPILE_TEST + depends on ARCH_SIFIVE || COMPILE_TEST select PCIE_DW_HOST help Say Y here if you want PCIe controller support for the SiFive diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index b445ffe95e3f..0e406677060d 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -371,7 +371,7 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { - .host_init = dra7xx_pcie_host_init, + .init = dra7xx_pcie_host_init, }; static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) @@ -386,7 +386,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) dra7xx_pcie_enable_wrapper_interrupts(dra7xx); } -static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) +static void dra7xx_pcie_raise_intx_irq(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); mdelay(1); @@ -404,16 +404,16 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, } static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); switch (type) { - case PCI_EPC_IRQ_LEGACY: - dra7xx_pcie_raise_legacy_irq(dra7xx); + case PCI_IRQ_INTX: + dra7xx_pcie_raise_intx_irq(dra7xx); break; - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); break; default: @@ -436,7 +436,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = dra7xx_pcie_ep_init, + .init = dra7xx_pcie_ep_init, .raise_irq = dra7xx_pcie_raise_irq, .get_features = dra7xx_pcie_get_features, }; diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index c6bede346932..a33fa98a252e 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -268,7 +268,7 @@ static int exynos_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops exynos_pcie_host_ops = { - .host_init = exynos_pcie_host_init, + .init = exynos_pcie_host_init, }; static int exynos_add_pcie_port(struct exynos_pcie *ep, @@ -375,7 +375,7 @@ fail_probe: return ret; } -static int exynos_pcie_remove(struct platform_device *pdev) +static void exynos_pcie_remove(struct platform_device *pdev) { struct exynos_pcie *ep = platform_get_drvdata(pdev); @@ -385,8 +385,6 @@ static int exynos_pcie_remove(struct platform_device *pdev) phy_exit(ep->phy); exynos_pcie_deinit_clk_resources(ep); regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies); - - return 0; } static int exynos_pcie_suspend_noirq(struct device *dev) @@ -431,7 +429,7 @@ static const struct of_device_id exynos_pcie_of_match[] = { static struct platform_driver exynos_pcie_driver = { .probe = exynos_pcie_probe, - .remove = exynos_pcie_remove, + .remove_new = exynos_pcie_remove, .driver = { .name = "exynos-pcie", .of_match_table = exynos_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 74703362aeec..99a60270b26c 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -42,6 +42,19 @@ #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) #define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000 +#define IMX95_PCIE_PHY_GEN_CTRL 0x0 +#define IMX95_PCIE_REF_USE_PAD BIT(17) + +#define IMX95_PCIE_SS_RW_REG_0 0xf0 +#define IMX95_PCIE_REF_CLKEN BIT(23) +#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) + +#define IMX95_PE0_GEN_CTRL_1 0x1050 +#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) + +#define IMX95_PE0_GEN_CTRL_3 0x1058 +#define IMX95_PCIE_LTSSM_EN BIT(0) + #define to_imx6_pcie(x) dev_get_drvdata((x)->dev) enum imx6_pcie_variants { @@ -52,14 +65,29 @@ enum imx6_pcie_variants { IMX8MQ, IMX8MM, IMX8MP, + IMX95, IMX8MQ_EP, IMX8MM_EP, IMX8MP_EP, + IMX95_EP, }; #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) #define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1) #define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) +#define IMX6_PCIE_FLAG_HAS_PHYDRV BIT(3) +#define IMX6_PCIE_FLAG_HAS_APP_RESET BIT(4) +#define IMX6_PCIE_FLAG_HAS_PHY_RESET BIT(5) +#define IMX6_PCIE_FLAG_HAS_SERDES BIT(6) +#define IMX6_PCIE_FLAG_SUPPORT_64BIT BIT(7) + +#define imx6_check_flag(pci, val) (pci->drvdata->flags & val) + +#define IMX6_PCIE_MAX_CLKS 6 + +#define IMX6_PCIE_MAX_INSTANCES 2 + +struct imx6_pcie; struct imx6_pcie_drvdata { enum imx6_pcie_variants variant; @@ -67,6 +95,14 @@ struct imx6_pcie_drvdata { u32 flags; int dbi_length; const char *gpr; + const char * const *clk_names; + const u32 clks_cnt; + const u32 ltssm_off; + const u32 ltssm_mask; + const u32 mode_off[IMX6_PCIE_MAX_INSTANCES]; + const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES]; + const struct pci_epc_features *epc_features; + int (*init_phy)(struct imx6_pcie *pcie); }; struct imx6_pcie { @@ -74,11 +110,7 @@ struct imx6_pcie { int reset_gpio; bool gpio_active_high; bool link_is_up; - struct clk *pcie_bus; - struct clk *pcie_phy; - struct clk *pcie_inbound_axi; - struct clk *pcie; - struct clk *pcie_aux; + struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS]; struct regmap *iomuxc_gpr; u16 msi_ctrl; u32 controller_id; @@ -165,34 +197,44 @@ static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; } +static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie) +{ + regmap_update_bits(imx6_pcie->iomuxc_gpr, + IMX95_PCIE_SS_RW_REG_0, + IMX95_PCIE_PHY_CR_PARA_SEL, + IMX95_PCIE_PHY_CR_PARA_SEL); + + regmap_update_bits(imx6_pcie->iomuxc_gpr, + IMX95_PCIE_PHY_GEN_CTRL, + IMX95_PCIE_REF_USE_PAD, 0); + regmap_update_bits(imx6_pcie->iomuxc_gpr, + IMX95_PCIE_SS_RW_REG_0, + IMX95_PCIE_REF_CLKEN, + IMX95_PCIE_REF_CLKEN); + + return 0; +} + static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) { - unsigned int mask, val, mode; + const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata; + unsigned int mask, val, mode, id; - if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) + if (drvdata->mode == DW_PCIE_EP_TYPE) mode = PCI_EXP_TYPE_ENDPOINT; else mode = PCI_EXP_TYPE_ROOT_PORT; - switch (imx6_pcie->drvdata->variant) { - case IMX8MQ: - case IMX8MQ_EP: - if (imx6_pcie->controller_id == 1) { - mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE; - val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, - mode); - } else { - mask = IMX6Q_GPR12_DEVICE_TYPE; - val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode); - } - break; - default: - mask = IMX6Q_GPR12_DEVICE_TYPE; - val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode); - break; - } + id = imx6_pcie->controller_id; + + /* If mode_mask[id] is zero, means each controller have its individual gpr */ + if (!drvdata->mode_mask[id]) + id = 0; + + mask = drvdata->mode_mask[id]; + val = mode << (ffs(mask) - 1); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val); + regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); } static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val) @@ -320,76 +362,66 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) return 0; } -static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) +static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie) { - switch (imx6_pcie->drvdata->variant) { - case IMX8MM: - case IMX8MM_EP: - case IMX8MP: - case IMX8MP_EP: - /* - * The PHY initialization had been done in the PHY - * driver, break here directly. - */ - break; - case IMX8MQ: - case IMX8MQ_EP: - /* - * TODO: Currently this code assumes external - * oscillator is being used - */ + /* TODO: Currently this code assumes external oscillator is being used */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, + imx6_pcie_grp_offset(imx6_pcie), + IMX8MQ_GPR_PCIE_REF_USE_PAD, + IMX8MQ_GPR_PCIE_REF_USE_PAD); + /* + * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is + * supplied by 3.3V, the VREG_BYPASS should be cleared to zero. + */ + if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000) regmap_update_bits(imx6_pcie->iomuxc_gpr, imx6_pcie_grp_offset(imx6_pcie), - IMX8MQ_GPR_PCIE_REF_USE_PAD, - IMX8MQ_GPR_PCIE_REF_USE_PAD); - /* - * Regarding the datasheet, the PCIE_VPH is suggested - * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the - * VREG_BYPASS should be cleared to zero. - */ - if (imx6_pcie->vph && - regulator_get_voltage(imx6_pcie->vph) > 3000000) - regmap_update_bits(imx6_pcie->iomuxc_gpr, - imx6_pcie_grp_offset(imx6_pcie), - IMX8MQ_GPR_PCIE_VREG_BYPASS, - 0); - break; - case IMX7D: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); - break; - case IMX6SX: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_RX_EQ_MASK, - IMX6SX_GPR12_PCIE_RX_EQ_2); - fallthrough; - default: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX8MQ_GPR_PCIE_VREG_BYPASS, + 0); + + return 0; +} + +static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie) +{ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); + + return 0; +} + +static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) +{ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); - /* configure constant input signal to the pcie ctrl and phy */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_LOS_LEVEL, 9 << 4); - - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN1, - imx6_pcie->tx_deemph_gen1 << 0); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, - imx6_pcie->tx_deemph_gen2_3p5db << 6); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, - imx6_pcie->tx_deemph_gen2_6db << 12); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_SWING_FULL, - imx6_pcie->tx_swing_full << 18); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_SWING_LOW, - imx6_pcie->tx_swing_low << 25); - break; - } + /* configure constant input signal to the pcie ctrl and phy */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_LOS_LEVEL, 9 << 4); + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN1, + imx6_pcie->tx_deemph_gen1 << 0); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, + imx6_pcie->tx_deemph_gen2_3p5db << 6); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, + imx6_pcie->tx_deemph_gen2_6db << 12); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_FULL, + imx6_pcie->tx_swing_full << 18); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_LOW, + imx6_pcie->tx_swing_low << 25); + return 0; +} - imx6_pcie_configure_type(imx6_pcie); +static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie) +{ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); + + return imx6_pcie_init_phy(imx6_pcie); } static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) @@ -407,13 +439,18 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) { - unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); + unsigned long phy_rate = 0; int mult, div; u16 val; + int i; if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) return 0; + for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++) + if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0) + phy_rate = clk_get_rate(imx6_pcie->clks[i].clk); + switch (phy_rate) { case 125000000: /* @@ -550,19 +587,11 @@ static int imx6_pcie_attach_pd(struct device *dev) static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) { - struct dw_pcie *pci = imx6_pcie->pci; - struct device *dev = pci->dev; unsigned int offset; int ret = 0; switch (imx6_pcie->drvdata->variant) { case IMX6SX: - ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); - if (ret) { - dev_err(dev, "unable to enable pcie_axi clock\n"); - break; - } - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); break; @@ -582,6 +611,8 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); break; case IMX7D: + case IMX95: + case IMX95_EP: break; case IMX8MM: case IMX8MM_EP: @@ -589,12 +620,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) case IMX8MQ_EP: case IMX8MP: case IMX8MP_EP: - ret = clk_prepare_enable(imx6_pcie->pcie_aux); - if (ret) { - dev_err(dev, "unable to enable pcie_aux clock\n"); - break; - } - offset = imx6_pcie_grp_offset(imx6_pcie); /* * Set the over ride low and enabled @@ -615,9 +640,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie) { switch (imx6_pcie->drvdata->variant) { - case IMX6SX: - clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); - break; case IMX6QP: case IMX6Q: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, @@ -631,14 +653,6 @@ static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie) IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); break; - case IMX8MM: - case IMX8MM_EP: - case IMX8MQ: - case IMX8MQ_EP: - case IMX8MP: - case IMX8MP_EP: - clk_disable_unprepare(imx6_pcie->pcie_aux); - break; default: break; } @@ -650,23 +664,9 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie) struct device *dev = pci->dev; int ret; - ret = clk_prepare_enable(imx6_pcie->pcie_phy); - if (ret) { - dev_err(dev, "unable to enable pcie_phy clock\n"); + ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks); + if (ret) return ret; - } - - ret = clk_prepare_enable(imx6_pcie->pcie_bus); - if (ret) { - dev_err(dev, "unable to enable pcie_bus clock\n"); - goto err_pcie_bus; - } - - ret = clk_prepare_enable(imx6_pcie->pcie); - if (ret) { - dev_err(dev, "unable to enable pcie clock\n"); - goto err_pcie; - } ret = imx6_pcie_enable_ref_clk(imx6_pcie); if (ret) { @@ -679,11 +679,7 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie) return 0; err_ref_clk: - clk_disable_unprepare(imx6_pcie->pcie); -err_pcie: - clk_disable_unprepare(imx6_pcie->pcie_bus); -err_pcie_bus: - clk_disable_unprepare(imx6_pcie->pcie_phy); + clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks); return ret; } @@ -691,25 +687,15 @@ err_pcie_bus: static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) { imx6_pcie_disable_ref_clk(imx6_pcie); - clk_disable_unprepare(imx6_pcie->pcie); - clk_disable_unprepare(imx6_pcie->pcie_bus); - clk_disable_unprepare(imx6_pcie->pcie_phy); + clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks); } static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) { + reset_control_assert(imx6_pcie->pciephy_reset); + reset_control_assert(imx6_pcie->apps_reset); + switch (imx6_pcie->drvdata->variant) { - case IMX7D: - case IMX8MQ: - case IMX8MQ_EP: - reset_control_assert(imx6_pcie->pciephy_reset); - fallthrough; - case IMX8MM: - case IMX8MM_EP: - case IMX8MP: - case IMX8MP_EP: - reset_control_assert(imx6_pcie->apps_reset); - break; case IMX6SX: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_TEST_POWERDOWN, @@ -730,6 +716,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); break; + default: + break; } /* Some boards don't have PCIe reset GPIO. */ @@ -743,14 +731,10 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) struct dw_pcie *pci = imx6_pcie->pci; struct device *dev = pci->dev; + reset_control_deassert(imx6_pcie->pciephy_reset); + switch (imx6_pcie->drvdata->variant) { - case IMX8MQ: - case IMX8MQ_EP: - reset_control_deassert(imx6_pcie->pciephy_reset); - break; case IMX7D: - reset_control_deassert(imx6_pcie->pciephy_reset); - /* Workaround for ERR010728, failure of PCI-e PLL VCO to * oscillate, especially when cold. This turns off "Duty-cycle * Corrector" and other mysterious undocumented things. @@ -782,11 +766,7 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) usleep_range(200, 500); break; - case IMX6Q: /* Nothing to do */ - case IMX8MM: - case IMX8MM_EP: - case IMX8MP: - case IMX8MP_EP: + default: break; } @@ -824,48 +804,25 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) static void imx6_pcie_ltssm_enable(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata; - switch (imx6_pcie->drvdata->variant) { - case IMX6Q: - case IMX6SX: - case IMX6QP: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, - IMX6Q_GPR12_PCIE_CTL_2); - break; - case IMX7D: - case IMX8MQ: - case IMX8MQ_EP: - case IMX8MM: - case IMX8MM_EP: - case IMX8MP: - case IMX8MP_EP: - reset_control_deassert(imx6_pcie->apps_reset); - break; - } + if (drvdata->ltssm_mask) + regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, + drvdata->ltssm_mask); + + reset_control_deassert(imx6_pcie->apps_reset); } static void imx6_pcie_ltssm_disable(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata; - switch (imx6_pcie->drvdata->variant) { - case IMX6Q: - case IMX6SX: - case IMX6QP: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, 0); - break; - case IMX7D: - case IMX8MQ: - case IMX8MQ_EP: - case IMX8MM: - case IMX8MM_EP: - case IMX8MP: - case IMX8MP_EP: - reset_control_assert(imx6_pcie->apps_reset); - break; - } + if (drvdata->ltssm_mask) + regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, + drvdata->ltssm_mask, 0); + + reset_control_assert(imx6_pcie->apps_reset); } static int imx6_pcie_start_link(struct dw_pcie *pci) @@ -977,7 +934,11 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) } imx6_pcie_assert_core_reset(imx6_pcie); - imx6_pcie_init_phy(imx6_pcie); + + if (imx6_pcie->drvdata->init_phy) + imx6_pcie->drvdata->init_phy(imx6_pcie); + + imx6_pcie_configure_type(imx6_pcie); ret = imx6_pcie_clk_enable(imx6_pcie); if (ret) { @@ -1039,8 +1000,8 @@ static void imx6_pcie_host_exit(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops imx6_pcie_host_ops = { - .host_init = imx6_pcie_host_init, - .host_deinit = imx6_pcie_host_exit, + .init = imx6_pcie_host_init, + .deinit = imx6_pcie_host_exit, }; static const struct dw_pcie_ops dw_pcie_ops = { @@ -1058,17 +1019,16 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep) } static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); @@ -1082,18 +1042,39 @@ static const struct pci_epc_features imx8m_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, - .reserved_bar = 1 << BAR_1 | 1 << BAR_3, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, .align = SZ_64K, }; +/* + * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme + * ================================================================================================ + * BAR0 | Enable | 64-bit | 1 MB | Programmable Size + * BAR1 | Disable | 32-bit | 64 KB | Fixed Size + * BAR1 should be disabled if BAR0 is 64bit. + * BAR2 | Enable | 32-bit | 1 MB | Programmable Size + * BAR3 | Enable | 32-bit | 64 KB | Programmable Size + * BAR4 | Enable | 32-bit | 1M | Programmable Size + * BAR5 | Enable | 32-bit | 64 KB | Programmable Size + */ +static const struct pci_epc_features imx95_pcie_epc_features = { + .msi_capable = true, + .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, + .align = SZ_4K, +}; + static const struct pci_epc_features* imx6_pcie_ep_get_features(struct dw_pcie_ep *ep) { - return &imx8m_pcie_epc_features; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + + return imx6_pcie->drvdata->epc_features; } static const struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = imx6_pcie_ep_init, + .init = imx6_pcie_ep_init, .raise_irq = imx6_pcie_ep_raise_irq, .get_features = imx6_pcie_ep_get_features, }; @@ -1104,7 +1085,6 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, int ret; unsigned int pcie_dbi2_offset; struct dw_pcie_ep *ep; - struct resource *res; struct dw_pcie *pci = imx6_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = pci->dev; @@ -1123,14 +1103,20 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, pcie_dbi2_offset = SZ_4K; break; } + pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); - if (!res) - return -EINVAL; - ep->phys_base = res->start; - ep->addr_size = resource_size(res); - ep->page_size = SZ_64K; + /* + * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining + * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC + * core code can fetch that from DT. But once all platform DTs were fixed, this and the + * above "dbi_base2" setting should be removed. + */ + if (device_property_match_string(dev, "reg-names", "dbi2") >= 0) + pci->dbi_base2 = NULL; + + if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT)) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ret = dw_pcie_ep_init(ep); if (ret) { @@ -1252,6 +1238,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; int ret; u16 val; + int i; imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); if (!imx6_pcie) @@ -1305,81 +1292,48 @@ static int imx6_pcie_probe(struct platform_device *pdev) return imx6_pcie->reset_gpio; } - /* Fetch clocks */ - imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); - if (IS_ERR(imx6_pcie->pcie_bus)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus), - "pcie_bus clock source missing or invalid\n"); + if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS) + return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); - imx6_pcie->pcie = devm_clk_get(dev, "pcie"); - if (IS_ERR(imx6_pcie->pcie)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie), - "pcie clock source missing or invalid\n"); + for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++) + imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i]; - switch (imx6_pcie->drvdata->variant) { - case IMX6SX: - imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, - "pcie_inbound_axi"); - if (IS_ERR(imx6_pcie->pcie_inbound_axi)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi), - "pcie_inbound_axi clock missing or invalid\n"); - break; - case IMX8MQ: - case IMX8MQ_EP: - imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux"); - if (IS_ERR(imx6_pcie->pcie_aux)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux), - "pcie_aux clock source missing or invalid\n"); - fallthrough; - case IMX7D: - if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) - imx6_pcie->controller_id = 1; + /* Fetch clocks */ + ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks); + if (ret) + return ret; - imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, - "pciephy"); - if (IS_ERR(imx6_pcie->pciephy_reset)) { - dev_err(dev, "Failed to get PCIEPHY reset control\n"); - return PTR_ERR(imx6_pcie->pciephy_reset); - } + if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) { + imx6_pcie->phy = devm_phy_get(dev, "pcie-phy"); + if (IS_ERR(imx6_pcie->phy)) + return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy), + "failed to get pcie phy\n"); + } - imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, - "apps"); - if (IS_ERR(imx6_pcie->apps_reset)) { - dev_err(dev, "Failed to get PCIE APPS reset control\n"); - return PTR_ERR(imx6_pcie->apps_reset); - } - break; - case IMX8MM: - case IMX8MM_EP: - case IMX8MP: - case IMX8MP_EP: - imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux"); - if (IS_ERR(imx6_pcie->pcie_aux)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux), - "pcie_aux clock source missing or invalid\n"); - imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, - "apps"); + if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) { + imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); if (IS_ERR(imx6_pcie->apps_reset)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset), "failed to get pcie apps reset control\n"); + } - imx6_pcie->phy = devm_phy_get(dev, "pcie-phy"); - if (IS_ERR(imx6_pcie->phy)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy), - "failed to get pcie phy\n"); + if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) { + imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); + if (IS_ERR(imx6_pcie->pciephy_reset)) + return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset), + "Failed to get PCIEPHY reset control\n"); + } + switch (imx6_pcie->drvdata->variant) { + case IMX8MQ: + case IMX8MQ_EP: + case IMX7D: + if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) + imx6_pcie->controller_id = 1; break; default: break; } - /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */ - if (imx6_pcie->phy == NULL) { - imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); - if (IS_ERR(imx6_pcie->pcie_phy)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy), - "pcie_phy clock source missing or invalid\n"); - } - /* Grab turnoff reset */ imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); @@ -1388,12 +1342,32 @@ static int imx6_pcie_probe(struct platform_device *pdev) return PTR_ERR(imx6_pcie->turnoff_reset); } + if (imx6_pcie->drvdata->gpr) { /* Grab GPR config register range */ - imx6_pcie->iomuxc_gpr = - syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr); - if (IS_ERR(imx6_pcie->iomuxc_gpr)) { - dev_err(dev, "unable to find iomuxc registers\n"); - return PTR_ERR(imx6_pcie->iomuxc_gpr); + imx6_pcie->iomuxc_gpr = + syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr); + if (IS_ERR(imx6_pcie->iomuxc_gpr)) + return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr), + "unable to find iomuxc registers\n"); + } + + if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) { + void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app"); + + if (IS_ERR(off)) + return dev_err_probe(dev, PTR_ERR(off), + "unable to find serdes registers\n"); + + static const struct regmap_config regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + }; + + imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); + if (IS_ERR(imx6_pcie->iomuxc_gpr)) + return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr), + "unable to find iomuxc registers\n"); } /* Grab PCIe PHY Tx Settings */ @@ -1470,6 +1444,11 @@ static void imx6_pcie_shutdown(struct platform_device *pdev) imx6_pcie_assert_core_reset(imx6_pcie); } +static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"}; +static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"}; +static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"}; +static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"}; + static const struct imx6_pcie_drvdata drvdata[] = { [IMX6Q] = { .variant = IMX6Q, @@ -1477,6 +1456,13 @@ static const struct imx6_pcie_drvdata drvdata[] = { IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", + .clk_names = imx6q_clks, + .clks_cnt = ARRAY_SIZE(imx6q_clks), + .ltssm_off = IOMUXC_GPR12, + .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .init_phy = imx6_pcie_init_phy, }, [IMX6SX] = { .variant = IMX6SX, @@ -1484,6 +1470,13 @@ static const struct imx6_pcie_drvdata drvdata[] = { IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx6q-iomuxc-gpr", + .clk_names = imx6sx_clks, + .clks_cnt = ARRAY_SIZE(imx6sx_clks), + .ltssm_off = IOMUXC_GPR12, + .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .init_phy = imx6sx_pcie_init_phy, }, [IMX6QP] = { .variant = IMX6QP, @@ -1492,40 +1485,122 @@ static const struct imx6_pcie_drvdata drvdata[] = { IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", + .clk_names = imx6q_clks, + .clks_cnt = ARRAY_SIZE(imx6q_clks), + .ltssm_off = IOMUXC_GPR12, + .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .init_phy = imx6_pcie_init_phy, }, [IMX7D] = { .variant = IMX7D, - .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND | + IMX6_PCIE_FLAG_HAS_APP_RESET | + IMX6_PCIE_FLAG_HAS_PHY_RESET, .gpr = "fsl,imx7d-iomuxc-gpr", + .clk_names = imx6q_clks, + .clks_cnt = ARRAY_SIZE(imx6q_clks), + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .init_phy = imx7d_pcie_init_phy, }, [IMX8MQ] = { .variant = IMX8MQ, + .flags = IMX6_PCIE_FLAG_HAS_APP_RESET | + IMX6_PCIE_FLAG_HAS_PHY_RESET, .gpr = "fsl,imx8mq-iomuxc-gpr", + .clk_names = imx8mq_clks, + .clks_cnt = ARRAY_SIZE(imx8mq_clks), + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .mode_off[1] = IOMUXC_GPR12, + .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, + .init_phy = imx8mq_pcie_init_phy, }, [IMX8MM] = { .variant = IMX8MM, - .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND | + IMX6_PCIE_FLAG_HAS_PHYDRV | + IMX6_PCIE_FLAG_HAS_APP_RESET, .gpr = "fsl,imx8mm-iomuxc-gpr", + .clk_names = imx8mm_clks, + .clks_cnt = ARRAY_SIZE(imx8mm_clks), + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, }, [IMX8MP] = { .variant = IMX8MP, - .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND | + IMX6_PCIE_FLAG_HAS_PHYDRV | + IMX6_PCIE_FLAG_HAS_APP_RESET, .gpr = "fsl,imx8mp-iomuxc-gpr", + .clk_names = imx8mm_clks, + .clks_cnt = ARRAY_SIZE(imx8mm_clks), + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + }, + [IMX95] = { + .variant = IMX95, + .flags = IMX6_PCIE_FLAG_HAS_SERDES, + .clk_names = imx8mq_clks, + .clks_cnt = ARRAY_SIZE(imx8mq_clks), + .ltssm_off = IMX95_PE0_GEN_CTRL_3, + .ltssm_mask = IMX95_PCIE_LTSSM_EN, + .mode_off[0] = IMX95_PE0_GEN_CTRL_1, + .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, + .init_phy = imx95_pcie_init_phy, }, [IMX8MQ_EP] = { .variant = IMX8MQ_EP, + .flags = IMX6_PCIE_FLAG_HAS_APP_RESET | + IMX6_PCIE_FLAG_HAS_PHY_RESET, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mq-iomuxc-gpr", + .clk_names = imx8mq_clks, + .clks_cnt = ARRAY_SIZE(imx8mq_clks), + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .mode_off[1] = IOMUXC_GPR12, + .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, + .epc_features = &imx8m_pcie_epc_features, + .init_phy = imx8mq_pcie_init_phy, }, [IMX8MM_EP] = { .variant = IMX8MM_EP, + .flags = IMX6_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mm-iomuxc-gpr", + .clk_names = imx8mm_clks, + .clks_cnt = ARRAY_SIZE(imx8mm_clks), + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .epc_features = &imx8m_pcie_epc_features, }, [IMX8MP_EP] = { .variant = IMX8MP_EP, + .flags = IMX6_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mp-iomuxc-gpr", + .clk_names = imx8mm_clks, + .clks_cnt = ARRAY_SIZE(imx8mm_clks), + .mode_off[0] = IOMUXC_GPR12, + .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .epc_features = &imx8m_pcie_epc_features, + }, + [IMX95_EP] = { + .variant = IMX95_EP, + .flags = IMX6_PCIE_FLAG_HAS_SERDES | + IMX6_PCIE_FLAG_SUPPORT_64BIT, + .clk_names = imx8mq_clks, + .clks_cnt = ARRAY_SIZE(imx8mq_clks), + .ltssm_off = IMX95_PE0_GEN_CTRL_3, + .ltssm_mask = IMX95_PCIE_LTSSM_EN, + .mode_off[0] = IMX95_PE0_GEN_CTRL_1, + .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, + .init_phy = imx95_pcie_init_phy, + .epc_features = &imx95_pcie_epc_features, + .mode = DW_PCIE_EP_TYPE, }, }; @@ -1537,9 +1612,11 @@ static const struct of_device_id imx6_pcie_of_match[] = { { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, + { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], }, { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, + { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, {}, }; diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 0def919f89fa..844de4418724 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -115,8 +115,7 @@ struct keystone_pcie { struct dw_pcie *pci; /* PCI Device ID */ u32 device_id; - int legacy_host_irqs[PCI_NUM_INTX]; - struct device_node *legacy_intc_np; + int intx_host_irqs[PCI_NUM_INTX]; int msi_host_irq; int num_lanes; @@ -124,7 +123,7 @@ struct keystone_pcie { struct phy **phy; struct device_link **link; struct device_node *msi_intc_np; - struct irq_domain *legacy_irq_domain; + struct irq_domain *intx_irq_domain; struct device_node *np; /* Application register space */ @@ -252,8 +251,8 @@ static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp) return dw_pcie_allocate_domains(pp); } -static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, - int offset) +static void ks_pcie_handle_intx_irq(struct keystone_pcie *ks_pcie, + int offset) { struct dw_pcie *pci = ks_pcie->pci; struct device *dev = pci->dev; @@ -263,7 +262,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, if (BIT(0) & pending) { dev_dbg(dev, ": irq: irq_offset %d", offset); - generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset); + generic_handle_domain_irq(ks_pcie->intx_irq_domain, offset); } /* EOI the INTx interrupt */ @@ -307,38 +306,37 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) return IRQ_HANDLED; } -static void ks_pcie_ack_legacy_irq(struct irq_data *d) +static void ks_pcie_ack_intx_irq(struct irq_data *d) { } -static void ks_pcie_mask_legacy_irq(struct irq_data *d) +static void ks_pcie_mask_intx_irq(struct irq_data *d) { } -static void ks_pcie_unmask_legacy_irq(struct irq_data *d) +static void ks_pcie_unmask_intx_irq(struct irq_data *d) { } -static struct irq_chip ks_pcie_legacy_irq_chip = { - .name = "Keystone-PCI-Legacy-IRQ", - .irq_ack = ks_pcie_ack_legacy_irq, - .irq_mask = ks_pcie_mask_legacy_irq, - .irq_unmask = ks_pcie_unmask_legacy_irq, +static struct irq_chip ks_pcie_intx_irq_chip = { + .name = "Keystone-PCI-INTX-IRQ", + .irq_ack = ks_pcie_ack_intx_irq, + .irq_mask = ks_pcie_mask_intx_irq, + .irq_unmask = ks_pcie_unmask_intx_irq, }; -static int ks_pcie_init_legacy_irq_map(struct irq_domain *d, - unsigned int irq, - irq_hw_number_t hw_irq) +static int ks_pcie_init_intx_irq_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hw_irq) { - irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip, + irq_set_chip_and_handler(irq, &ks_pcie_intx_irq_chip, handle_level_irq); irq_set_chip_data(irq, d->host_data); return 0; } -static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = { - .map = ks_pcie_init_legacy_irq_map, +static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = { + .map = ks_pcie_init_intx_irq_map, .xlate = irq_domain_xlate_onetwocell, }; @@ -605,22 +603,22 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) } /** - * ks_pcie_legacy_irq_handler() - Handle legacy interrupt + * ks_pcie_intx_irq_handler() - Handle INTX interrupt * @desc: Pointer to irq descriptor * - * Traverse through pending legacy interrupts and invoke handler for each. Also + * Traverse through pending INTX interrupts and invoke handler for each. Also * takes care of interrupt controller level mask/ack operation. */ -static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) +static void ks_pcie_intx_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); struct dw_pcie *pci = ks_pcie->pci; struct device *dev = pci->dev; - u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; + u32 irq_offset = irq - ks_pcie->intx_host_irqs[0]; struct irq_chip *chip = irq_desc_get_chip(desc); - dev_dbg(dev, ": Handling legacy irq %d\n", irq); + dev_dbg(dev, ": Handling INTX irq %d\n", irq); /* * The chained irq handler installation would have replaced normal @@ -628,7 +626,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) * ack operation. */ chained_irq_enter(chip, desc); - ks_pcie_handle_legacy_irq(ks_pcie, irq_offset); + ks_pcie_handle_intx_irq(ks_pcie, irq_offset); chained_irq_exit(chip, desc); } @@ -686,10 +684,10 @@ err: return ret; } -static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) +static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie) { struct device *dev = ks_pcie->pci->dev; - struct irq_domain *legacy_irq_domain; + struct irq_domain *intx_irq_domain; struct device_node *np = ks_pcie->np; struct device_node *intc_np; int irq_count, irq, ret = 0, i; @@ -697,7 +695,7 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) intc_np = of_get_child_by_name(np, "legacy-interrupt-controller"); if (!intc_np) { /* - * Since legacy interrupts are modeled as edge-interrupts in + * Since INTX interrupts are modeled as edge-interrupts in * AM6, keep it disabled for now. */ if (ks_pcie->is_am6) @@ -719,22 +717,21 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) ret = -EINVAL; goto err; } - ks_pcie->legacy_host_irqs[i] = irq; + ks_pcie->intx_host_irqs[i] = irq; irq_set_chained_handler_and_data(irq, - ks_pcie_legacy_irq_handler, + ks_pcie_intx_irq_handler, ks_pcie); } - legacy_irq_domain = - irq_domain_add_linear(intc_np, PCI_NUM_INTX, - &ks_pcie_legacy_irq_domain_ops, NULL); - if (!legacy_irq_domain) { - dev_err(dev, "Failed to add irq domain for legacy irqs\n"); + intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX, + &ks_pcie_intx_irq_domain_ops, NULL); + if (!intx_irq_domain) { + dev_err(dev, "Failed to add irq domain for INTX irqs\n"); ret = -EINVAL; goto err; } - ks_pcie->legacy_irq_domain = legacy_irq_domain; + ks_pcie->intx_irq_domain = intx_irq_domain; for (i = 0; i < PCI_NUM_INTX; i++) ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN); @@ -808,7 +805,7 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp) if (!ks_pcie->is_am6) pp->bridge->child_ops = &ks_child_pcie_ops; - ret = ks_pcie_config_legacy_irq(ks_pcie); + ret = ks_pcie_config_intx_irq(ks_pcie); if (ret) return ret; @@ -838,12 +835,12 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops ks_pcie_host_ops = { - .host_init = ks_pcie_host_init, - .msi_host_init = ks_pcie_msi_host_init, + .init = ks_pcie_host_init, + .msi_init = ks_pcie_msi_host_init, }; static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = { - .host_init = ks_pcie_host_init, + .init = ks_pcie_host_init, }; static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) @@ -881,7 +878,7 @@ static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags); } -static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie) +static void ks_pcie_am654_raise_intx_irq(struct keystone_pcie *ks_pcie) { struct dw_pcie *pci = ks_pcie->pci; u8 int_pin; @@ -900,20 +897,19 @@ static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie) } static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); switch (type) { - case PCI_EPC_IRQ_LEGACY: - ks_pcie_am654_raise_legacy_irq(ks_pcie); + case PCI_IRQ_INTX: + ks_pcie_am654_raise_intx_irq(ks_pcie); break; - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); break; - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); break; default: @@ -928,12 +924,12 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = true, - .reserved_bar = 1 << BAR_0 | 1 << BAR_1, - .bar_fixed_64bit = 1 << BAR_0, - .bar_fixed_size[2] = SZ_1M, - .bar_fixed_size[3] = SZ_64K, - .bar_fixed_size[4] = 256, - .bar_fixed_size[5] = SZ_1M, + .bar[BAR_0] = { .type = BAR_RESERVED, }, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, + .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, + .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, }, + .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, .align = SZ_1M, }; @@ -944,7 +940,7 @@ ks_pcie_am654_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = { - .ep_init = ks_pcie_am654_ep_init, + .init = ks_pcie_am654_ep_init, .raise_irq = ks_pcie_am654_raise_irq, .get_features = &ks_pcie_am654_get_features, }; @@ -1218,7 +1214,16 @@ static int ks_pcie_probe(struct platform_device *pdev) goto err_link; } + /* Obtain references to the PHYs */ + for (i = 0; i < num_lanes; i++) + phy_pm_runtime_get_sync(ks_pcie->phy[i]); + ret = ks_pcie_enable_phy(ks_pcie); + + /* Release references to the PHYs */ + for (i = 0; i < num_lanes; i++) + phy_pm_runtime_put_sync(ks_pcie->phy[i]); + if (ret) { dev_err(dev, "failed to enable phy\n"); goto err_link; @@ -1302,7 +1307,7 @@ err_link: return ret; } -static int ks_pcie_remove(struct platform_device *pdev) +static void ks_pcie_remove(struct platform_device *pdev) { struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); struct device_link **link = ks_pcie->link; @@ -1314,13 +1319,11 @@ static int ks_pcie_remove(struct platform_device *pdev) ks_pcie_disable_phy(ks_pcie); while (num_lanes--) device_link_del(link[num_lanes]); - - return 0; } static struct platform_driver ks_pcie_driver = { .probe = ks_pcie_probe, - .remove = ks_pcie_remove, + .remove_new = ks_pcie_remove, .driver = { .name = "keystone-pcie", .of_match_table = ks_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index 3d3c50ef4b6f..1f6ee1460ec2 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -49,7 +49,7 @@ struct ls_pcie_ep { bool big_endian; }; -static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset) +static u32 ls_pcie_pf_lut_readl(struct ls_pcie_ep *pcie, u32 offset) { struct dw_pcie *pci = pcie->pci; @@ -59,7 +59,7 @@ static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset) return ioread32(pci->dbi_base + offset); } -static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value) +static void ls_pcie_pf_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value) { struct dw_pcie *pci = pcie->pci; @@ -76,8 +76,8 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id) u32 val, cfg; u8 offset; - val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR); - ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val); + val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_DR); + ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_DR, val); if (!val) return IRQ_NONE; @@ -96,9 +96,9 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id) dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap); dw_pcie_dbi_ro_wr_dis(pci); - cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG); + cfg = ls_pcie_pf_lut_readl(pcie, PEX_PF0_CONFIG); cfg |= PEX_PF0_CFG_READY; - ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg); + ls_pcie_pf_lut_writel(pcie, PEX_PF0_CONFIG, cfg); dw_pcie_ep_linkup(&pci->ep); dev_dbg(pci->dev, "Link up\n"); @@ -130,10 +130,10 @@ static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie, } /* Enable interrupts */ - val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER); + val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_IER); val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE | PEX_PF0_PME_MES_IER_LUDIE; - ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val); + ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_IER, val); return 0; } @@ -166,16 +166,16 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep) } static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no, interrupt_num); default: @@ -184,8 +184,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, } } -static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep, - u8 func_no) +static unsigned int ls_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, u8 func_no) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci); @@ -195,10 +194,10 @@ static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep, } static const struct dw_pcie_ep_ops ls_pcie_ep_ops = { - .ep_init = ls_pcie_ep_init, + .init = ls_pcie_ep_init, .raise_irq = ls_pcie_ep_raise_irq, .get_features = ls_pcie_ep_get_features, - .func_conf_select = ls_pcie_ep_func_conf_select, + .get_dbi_offset = ls_pcie_ep_get_dbi_offset, }; static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = { @@ -251,7 +250,10 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = pcie->drvdata->dw_pcie_ops; - ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4); + ls_epc->bar[BAR_2].only_64bit = true; + ls_epc->bar[BAR_3].type = BAR_RESERVED; + ls_epc->bar[BAR_4].only_64bit = true; + ls_epc->bar[BAR_5].type = BAR_RESERVED; ls_epc->linkup_notifier = true; pcie->pci = pci; diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 37956e09c65b..ee6f52568133 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -35,21 +35,41 @@ #define PF_MCR_PTOMR BIT(0) #define PF_MCR_EXL2S BIT(1) +/* LS1021A PEXn PM Write Control Register */ +#define SCFG_PEXPMWRCR(idx) (0x5c + (idx) * 0x64) +#define PMXMTTURNOFF BIT(31) +#define SCFG_PEXSFTRSTCR 0x190 +#define PEXSR(idx) BIT(idx) + +/* LS1043A PEX PME control register */ +#define SCFG_PEXPMECR 0x144 +#define PEXPME(idx) BIT(31 - (idx) * 4) + +/* LS1043A PEX LUT debug register */ +#define LS_PCIE_LDBG 0x7fc +#define LDBG_SR BIT(30) +#define LDBG_WE BIT(31) + #define PCIE_IATU_NUM 6 struct ls_pcie_drvdata { - const u32 pf_off; + const u32 pf_lut_off; + const struct dw_pcie_host_ops *ops; + int (*exit_from_l2)(struct dw_pcie_rp *pp); + bool scfg_support; bool pm_support; }; struct ls_pcie { struct dw_pcie *pci; const struct ls_pcie_drvdata *drvdata; - void __iomem *pf_base; + void __iomem *pf_lut_base; + struct regmap *scfg; + int index; bool big_endian; }; -#define ls_pcie_pf_readl_addr(addr) ls_pcie_pf_readl(pcie, addr) +#define ls_pcie_pf_lut_readl_addr(addr) ls_pcie_pf_lut_readl(pcie, addr) #define to_ls_pcie(x) dev_get_drvdata((x)->dev) static bool ls_pcie_is_bridge(struct ls_pcie *pcie) @@ -90,20 +110,20 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie) iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR); } -static u32 ls_pcie_pf_readl(struct ls_pcie *pcie, u32 off) +static u32 ls_pcie_pf_lut_readl(struct ls_pcie *pcie, u32 off) { if (pcie->big_endian) - return ioread32be(pcie->pf_base + off); + return ioread32be(pcie->pf_lut_base + off); - return ioread32(pcie->pf_base + off); + return ioread32(pcie->pf_lut_base + off); } -static void ls_pcie_pf_writel(struct ls_pcie *pcie, u32 off, u32 val) +static void ls_pcie_pf_lut_writel(struct ls_pcie *pcie, u32 off, u32 val) { if (pcie->big_endian) - iowrite32be(val, pcie->pf_base + off); + iowrite32be(val, pcie->pf_lut_base + off); else - iowrite32(val, pcie->pf_base + off); + iowrite32(val, pcie->pf_lut_base + off); } static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) @@ -113,11 +133,11 @@ static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) u32 val; int ret; - val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR); + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR); val |= PF_MCR_PTOMR; - ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val); + ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val); - ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR, + ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR, val, !(val & PF_MCR_PTOMR), PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); @@ -125,7 +145,7 @@ static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) dev_err(pcie->pci->dev, "PME_Turn_off timeout\n"); } -static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp) +static int ls_pcie_exit_from_l2(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct ls_pcie *pcie = to_ls_pcie(pci); @@ -136,20 +156,22 @@ static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp) * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link * to exit L2 state. */ - val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR); + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR); val |= PF_MCR_EXL2S; - ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val); + ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val); /* * L2 exit timeout of 10ms is not defined in the specifications, * it was chosen based on empirical observations. */ - ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR, + ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR, val, !(val & PF_MCR_EXL2S), 1000, 10000); if (ret) dev_err(pcie->pci->dev, "L2 exit timeout\n"); + + return ret; } static int ls_pcie_host_init(struct dw_pcie_rp *pp) @@ -168,25 +190,130 @@ static int ls_pcie_host_init(struct dw_pcie_rp *pp) return 0; } +static void scfg_pcie_send_turnoff_msg(struct regmap *scfg, u32 reg, u32 mask) +{ + /* Send PME_Turn_Off message */ + regmap_write_bits(scfg, reg, mask, mask); + + /* + * There is no specific register to check for PME_To_Ack from endpoint. + * So on the safe side, wait for PCIE_PME_TO_L2_TIMEOUT_US. + */ + mdelay(PCIE_PME_TO_L2_TIMEOUT_US/1000); + + /* + * Layerscape hardware reference manual recommends clearing the PMXMTTURNOFF bit + * to complete the PME_Turn_Off handshake. + */ + regmap_write_bits(scfg, reg, mask, 0); +} + +static void ls1021a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + + scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), PMXMTTURNOFF); +} + +static int scfg_pcie_exit_from_l2(struct regmap *scfg, u32 reg, u32 mask) +{ + /* Reset the PEX wrapper to bring the link out of L2 */ + regmap_write_bits(scfg, reg, mask, mask); + regmap_write_bits(scfg, reg, mask, 0); + + return 0; +} + +static int ls1021a_pcie_exit_from_l2(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + + return scfg_pcie_exit_from_l2(pcie->scfg, SCFG_PEXSFTRSTCR, PEXSR(pcie->index)); +} + +static void ls1043a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + + scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMECR, PEXPME(pcie->index)); +} + +static int ls1043a_pcie_exit_from_l2(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + u32 val; + + /* + * Reset the PEX wrapper to bring the link out of L2. + * LDBG_WE: allows the user to have write access to the PEXDBG[SR] for both setting and + * clearing the soft reset on the PEX module. + * LDBG_SR: When SR is set to 1, the PEX module enters soft reset. + */ + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG); + val |= LDBG_WE; + ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val); + + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG); + val |= LDBG_SR; + ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val); + + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG); + val &= ~LDBG_SR; + ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val); + + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG); + val &= ~LDBG_WE; + ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val); + + return 0; +} + static const struct dw_pcie_host_ops ls_pcie_host_ops = { - .host_init = ls_pcie_host_init, + .init = ls_pcie_host_init, .pme_turn_off = ls_pcie_send_turnoff_msg, }; +static const struct dw_pcie_host_ops ls1021a_pcie_host_ops = { + .init = ls_pcie_host_init, + .pme_turn_off = ls1021a_pcie_send_turnoff_msg, +}; + static const struct ls_pcie_drvdata ls1021a_drvdata = { - .pm_support = false, + .pm_support = true, + .scfg_support = true, + .ops = &ls1021a_pcie_host_ops, + .exit_from_l2 = ls1021a_pcie_exit_from_l2, +}; + +static const struct dw_pcie_host_ops ls1043a_pcie_host_ops = { + .init = ls_pcie_host_init, + .pme_turn_off = ls1043a_pcie_send_turnoff_msg, +}; + +static const struct ls_pcie_drvdata ls1043a_drvdata = { + .pf_lut_off = 0x10000, + .pm_support = true, + .scfg_support = true, + .ops = &ls1043a_pcie_host_ops, + .exit_from_l2 = ls1043a_pcie_exit_from_l2, }; static const struct ls_pcie_drvdata layerscape_drvdata = { - .pf_off = 0xc0000, + .pf_lut_off = 0xc0000, .pm_support = true, + .ops = &ls_pcie_host_ops, + .exit_from_l2 = ls_pcie_exit_from_l2, }; static const struct of_device_id ls_pcie_of_match[] = { { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata }, { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata }, - { .compatible = "fsl,ls1043a-pcie", .data = &ls1021a_drvdata }, + { .compatible = "fsl,ls1043a-pcie", .data = &ls1043a_drvdata }, { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata }, @@ -201,6 +328,8 @@ static int ls_pcie_probe(struct platform_device *pdev) struct dw_pcie *pci; struct ls_pcie *pcie; struct resource *dbi_base; + u32 index[2]; + int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) @@ -213,9 +342,8 @@ static int ls_pcie_probe(struct platform_device *pdev) pcie->drvdata = of_device_get_match_data(dev); pci->dev = dev; - pci->pp.ops = &ls_pcie_host_ops; - pcie->pci = pci; + pci->pp.ops = pcie->drvdata->ops; dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); @@ -224,7 +352,21 @@ static int ls_pcie_probe(struct platform_device *pdev) pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian"); - pcie->pf_base = pci->dbi_base + pcie->drvdata->pf_off; + pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off; + + if (pcie->drvdata->scfg_support) { + pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg"); + if (IS_ERR(pcie->scfg)) { + dev_err(dev, "No syscfg phandle specified\n"); + return PTR_ERR(pcie->scfg); + } + + ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2); + if (ret) + return ret; + + pcie->index = index[1]; + } if (!ls_pcie_is_bridge(pcie)) return -ENODEV; @@ -247,11 +389,14 @@ static int ls_pcie_suspend_noirq(struct device *dev) static int ls_pcie_resume_noirq(struct device *dev) { struct ls_pcie *pcie = dev_get_drvdata(dev); + int ret; if (!pcie->drvdata->pm_support) return 0; - ls_pcie_exit_from_l2(&pcie->pci->pp); + ret = pcie->drvdata->exit_from_l2(&pcie->pci->pp); + if (ret) + return ret; return dw_pcie_resume_noirq(pcie->pci); } diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 407558f5d74a..6477c83262c2 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -389,7 +389,7 @@ static int meson_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops meson_pcie_host_ops = { - .host_init = meson_pcie_host_init, + .init = meson_pcie_host_init, }; static const struct dw_pcie_ops dw_pcie_ops = { diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index b8cb77c9c4bd..6dfdda59f328 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -311,7 +311,7 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops al_pcie_host_ops = { - .host_init = al_pcie_host_init, + .init = al_pcie_host_init, }; static int al_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 5c999e15c357..b5c599ccaacf 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -225,7 +225,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) } static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { - .host_init = armada8k_pcie_host_init, + .init = armada8k_pcie_host_init, }; static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index 9b572a2b2c9a..9ed0a9ba7619 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -333,7 +333,7 @@ static int artpec6_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { - .host_init = artpec6_pcie_host_init, + .init = artpec6_pcie_host_init, }; static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) @@ -352,15 +352,15 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) } static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); + case PCI_IRQ_INTX: + dev_err(pci->dev, "EP cannot trigger INTx IRQs\n"); return -EINVAL; - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); @@ -370,7 +370,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, } static const struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = artpec6_pcie_ep_init, + .init = artpec6_pcie_ep_init, .raise_irq = artpec6_pcie_raise_irq, }; diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c index 17e696797ff5..76d0ddea8007 100644 --- a/drivers/pci/controller/dwc/pcie-bt1.c +++ b/drivers/pci/controller/dwc/pcie-bt1.c @@ -559,8 +559,8 @@ static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops bt1_pcie_host_ops = { - .host_init = bt1_pcie_host_init, - .host_deinit = bt1_pcie_host_deinit, + .init = bt1_pcie_host_init, + .deinit = bt1_pcie_host_deinit, }; static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev) diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index f6207989fc6a..746a11dcb67f 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -6,6 +6,7 @@ * Author: Kishon Vijay Abraham I <kishon@ti.com> */ +#include <linux/align.h> #include <linux/bitfield.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -43,46 +44,19 @@ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) return NULL; } -static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no) -{ - unsigned int func_offset = 0; - - if (ep->ops->func_conf_select) - func_offset = ep->ops->func_conf_select(ep, func_no); - - return func_offset; -} - -static unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep, u8 func_no) -{ - unsigned int dbi2_offset = 0; - - if (ep->ops->get_dbi2_offset) - dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no); - else if (ep->ops->func_conf_select) /* for backward compatibility */ - dbi2_offset = ep->ops->func_conf_select(ep, func_no); - - return dbi2_offset; -} - static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no, enum pci_barno bar, int flags) { - unsigned int func_offset, dbi2_offset; struct dw_pcie_ep *ep = &pci->ep; - u32 reg, reg_dbi2; - - func_offset = dw_pcie_ep_func_select(ep, func_no); - dbi2_offset = dw_pcie_ep_get_dbi2_offset(ep, func_no); + u32 reg; - reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar); - reg_dbi2 = dbi2_offset + PCI_BASE_ADDRESS_0 + (4 * bar); + reg = PCI_BASE_ADDRESS_0 + (4 * bar); dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writel_dbi2(pci, reg_dbi2, 0x0); - dw_pcie_writel_dbi(pci, reg, 0x0); + dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0); + dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0); if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { - dw_pcie_writel_dbi2(pci, reg_dbi2 + 4, 0x0); - dw_pcie_writel_dbi(pci, reg + 4, 0x0); + dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0); + dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0); } dw_pcie_dbi_ro_wr_dis(pci); } @@ -99,19 +73,15 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar); static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no, - u8 cap_ptr, u8 cap) + u8 cap_ptr, u8 cap) { - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - unsigned int func_offset = 0; u8 cap_id, next_cap_ptr; u16 reg; if (!cap_ptr) return 0; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr); + reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr); cap_id = (reg & 0x00ff); if (cap_id > PCI_CAP_ID_MAX) @@ -126,14 +96,10 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no, static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap) { - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - unsigned int func_offset = 0; u8 next_cap_ptr; u16 reg; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST); + reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST); next_cap_ptr = (reg & 0x00ff); return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); @@ -144,24 +110,21 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - unsigned int func_offset = 0; - - func_offset = dw_pcie_ep_func_select(ep, func_no); dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid); - dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid); - dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid); - dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code); - dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE, - hdr->subclass_code | hdr->baseclass_code << 8); - dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE, - hdr->cache_line_size); - dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID, - hdr->subsys_vendor_id); - dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id); - dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN, - hdr->interrupt_pin); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid); + dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid); + dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE, + hdr->subclass_code | hdr->baseclass_code << 8); + dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE, + hdr->cache_line_size); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID, + hdr->subsys_vendor_id); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id); + dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN, + hdr->interrupt_pin); dw_pcie_dbi_ro_wr_dis(pci); return 0; @@ -243,18 +206,13 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - unsigned int func_offset, dbi2_offset; enum pci_barno bar = epf_bar->barno; size_t size = epf_bar->size; int flags = epf_bar->flags; - u32 reg, reg_dbi2; int ret, type; + u32 reg; - func_offset = dw_pcie_ep_func_select(ep, func_no); - dbi2_offset = dw_pcie_ep_get_dbi2_offset(ep, func_no); - - reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset; - reg_dbi2 = PCI_BASE_ADDRESS_0 + (4 * bar) + dbi2_offset; + reg = PCI_BASE_ADDRESS_0 + (4 * bar); if (!(flags & PCI_BASE_ADDRESS_SPACE)) type = PCIE_ATU_TYPE_MEM; @@ -270,12 +228,12 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writel_dbi2(pci, reg_dbi2, lower_32_bits(size - 1)); - dw_pcie_writel_dbi(pci, reg, flags); + dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); + dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { - dw_pcie_writel_dbi2(pci, reg_dbi2 + 4, upper_32_bits(size - 1)); - dw_pcie_writel_dbi(pci, reg + 4, 0); + dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); + dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); } ep->epf_bar[bar] = epf_bar; @@ -335,19 +293,15 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - u32 val, reg; - unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; + u32 val, reg; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msi_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS; - val = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_FLAGS; + val = dw_pcie_ep_readw_dbi(ep, func_no, reg); if (!(val & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; @@ -361,22 +315,19 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - u32 val, reg; - unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; + u32 val, reg; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msi_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS; - val = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_FLAGS; + val = dw_pcie_ep_readw_dbi(ep, func_no, reg); val &= ~PCI_MSI_FLAGS_QMASK; val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts); dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writew_dbi(pci, reg, val); + dw_pcie_ep_writew_dbi(ep, func_no, reg, val); dw_pcie_dbi_ro_wr_dis(pci); return 0; @@ -385,19 +336,15 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - u32 val, reg; - unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; + u32 val, reg; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msix_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS; - val = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msix_cap + PCI_MSIX_FLAGS; + val = dw_pcie_ep_readw_dbi(ep, func_no, reg); if (!(val & PCI_MSIX_FLAGS_ENABLE)) return -EINVAL; @@ -411,9 +358,8 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - u32 val, reg; - unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; + u32 val, reg; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msix_cap) @@ -421,21 +367,19 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, dw_pcie_dbi_ro_wr_en(pci); - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS; - val = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msix_cap + PCI_MSIX_FLAGS; + val = dw_pcie_ep_readw_dbi(ep, func_no, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; val |= interrupts; dw_pcie_writew_dbi(pci, reg, val); - reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE; + reg = ep_func->msix_cap + PCI_MSIX_TABLE; val = offset | bir; - dw_pcie_writel_dbi(pci, reg, val); + dw_pcie_ep_writel_dbi(ep, func_no, reg, val); - reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA; + reg = ep_func->msix_cap + PCI_MSIX_PBA; val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; - dw_pcie_writel_dbi(pci, reg, val); + dw_pcie_ep_writel_dbi(ep, func_no, reg, val); dw_pcie_dbi_ro_wr_dis(pci); @@ -443,7 +387,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, } static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); @@ -496,56 +440,53 @@ static const struct pci_epc_ops epc_ops = { .get_features = dw_pcie_ep_get_features, }; -int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) +int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; - dev_err(dev, "EP cannot trigger legacy IRQs\n"); + dev_err(dev, "EP cannot raise INTX IRQs\n"); return -EINVAL; } -EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq); +EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq); int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num) { - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 msg_addr_lower, msg_addr_upper, reg; struct dw_pcie_ep_func *ep_func; struct pci_epc *epc = ep->epc; unsigned int aligned_offset; - unsigned int func_offset = 0; u16 msg_ctrl, msg_data; - u32 msg_addr_lower, msg_addr_upper, reg; - u64 msg_addr; bool has_upper; + u64 msg_addr; int ret; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msi_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ - reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS; - msg_ctrl = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_FLAGS; + msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg); has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); - reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO; - msg_addr_lower = dw_pcie_readl_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO; + msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg); if (has_upper) { - reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI; - msg_addr_upper = dw_pcie_readl_dbi(pci, reg); - reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64; - msg_data = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI; + msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg); + reg = ep_func->msi_cap + PCI_MSI_DATA_64; + msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); } else { msg_addr_upper = 0; - reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32; - msg_data = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_DATA_32; + msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); } - aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1); - msg_addr = ((u64)msg_addr_upper) << 32 | - (msg_addr_lower & ~aligned_offset); + msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; + + aligned_offset = msg_addr & (epc->mem->window.page_size - 1); + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) @@ -582,10 +523,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct dw_pcie_ep_func *ep_func; struct pci_epf_msix_tbl *msix_tbl; + struct dw_pcie_ep_func *ep_func; struct pci_epc *epc = ep->epc; - unsigned int func_offset = 0; u32 reg, msg_data, vec_ctrl; unsigned int aligned_offset; u32 tbl_offset; @@ -597,10 +537,8 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, if (!ep_func || !ep_func->msix_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE; - tbl_offset = dw_pcie_readl_dbi(pci, reg); + reg = ep_func->msix_cap + PCI_MSIX_TABLE; + tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg); bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); tbl_offset &= PCI_MSIX_TABLE_OFFSET; @@ -615,6 +553,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, } aligned_offset = msg_addr & (epc->mem->window.page_size - 1); + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) @@ -690,8 +629,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; + /* + * PCIe r6.0, sec 7.8.6.2 require us to support at least one + * size in the range from 1 MB to 512 GB. Advertise support + * for 1 MB BAR size only. + */ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) - dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); } /* @@ -794,8 +738,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) list_add_tail(&ep_func->list, &ep->func_list); } - if (ep->ops->ep_init) - ep->ops->ep_init(ep); + if (ep->ops->init) + ep->ops->init(ep); ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, ep->page_size); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 7991f0e179b2..d15a5c2d5b48 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -328,7 +328,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; struct platform_device *pdev = to_platform_device(dev); - u64 *msi_vaddr; + u64 *msi_vaddr = NULL; int ret; u32 ctrl, num_ctrls; @@ -379,15 +379,20 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) * memory. */ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); - if (ret) - dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); + if (!ret) + msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, + GFP_KERNEL); - msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, - GFP_KERNEL); if (!msi_vaddr) { - dev_err(dev, "Failed to alloc and map MSI data\n"); - dw_pcie_free_msi(pp); - return -ENOMEM; + dev_warn(dev, "Failed to allocate 32-bit MSI address\n"); + dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, + GFP_KERNEL); + if (!msi_vaddr) { + dev_err(dev, "Failed to allocate MSI address\n"); + dw_pcie_free_msi(pp); + return -ENOMEM; + } } return 0; @@ -441,14 +446,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) bridge->ops = &dw_pcie_ops; bridge->child_ops = &dw_child_pcie_ops; - if (pp->ops->host_init) { - ret = pp->ops->host_init(pp); + if (pp->ops->init) { + ret = pp->ops->init(pp); if (ret) return ret; } if (pci_msi_enabled()) { - pp->has_msi_ctrl = !(pp->ops->msi_host_init || + pp->has_msi_ctrl = !(pp->ops->msi_init || of_property_read_bool(np, "msi-parent") || of_property_read_bool(np, "msi-map")); @@ -464,8 +469,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) goto err_deinit_host; } - if (pp->ops->msi_host_init) { - ret = pp->ops->msi_host_init(pp); + if (pp->ops->msi_init) { + ret = pp->ops->msi_init(pp); if (ret < 0) goto err_deinit_host; } else if (pp->has_msi_ctrl) { @@ -502,8 +507,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) if (ret) goto err_stop_link; - if (pp->ops->host_post_init) - pp->ops->host_post_init(pp); + if (pp->ops->post_init) + pp->ops->post_init(pp); return 0; @@ -518,8 +523,8 @@ err_free_msi: dw_pcie_free_msi(pp); err_deinit_host: - if (pp->ops->host_deinit) - pp->ops->host_deinit(pp); + if (pp->ops->deinit) + pp->ops->deinit(pp); return ret; } @@ -539,8 +544,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp) if (pp->has_msi_ctrl) dw_pcie_free_msi(pp); - if (pp->ops->host_deinit) - pp->ops->host_deinit(pp); + if (pp->ops->deinit) + pp->ops->deinit(pp); } EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); @@ -842,8 +847,8 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci) return ret; } - if (pci->pp.ops->host_deinit) - pci->pp.ops->host_deinit(&pci->pp); + if (pci->pp.ops->deinit) + pci->pp.ops->deinit(&pci->pp); pci->suspended = true; @@ -860,8 +865,8 @@ int dw_pcie_resume_noirq(struct dw_pcie *pci) pci->suspended = false; - if (pci->pp.ops->host_init) { - ret = pci->pp.ops->host_init(&pci->pp); + if (pci->pp.ops->init) { + ret = pci->pp.ops->init(&pci->pp); if (ret) { dev_err(pci->dev, "Host init failed: %d\n", ret); return ret; diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index b625841e98aa..778588b4be70 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -42,17 +42,16 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) } static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); @@ -74,7 +73,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = dw_plat_pcie_ep_init, + .init = dw_plat_pcie_ep_init, .raise_irq = dw_plat_pcie_ep_raise_irq, .get_features = dw_plat_pcie_get_features, }; diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 55ff76e3d384..26dae4837462 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -300,10 +300,10 @@ enum dw_pcie_ltssm { }; struct dw_pcie_host_ops { - int (*host_init)(struct dw_pcie_rp *pp); - void (*host_deinit)(struct dw_pcie_rp *pp); - void (*host_post_init)(struct dw_pcie_rp *pp); - int (*msi_host_init)(struct dw_pcie_rp *pp); + int (*init)(struct dw_pcie_rp *pp); + void (*deinit)(struct dw_pcie_rp *pp); + void (*post_init)(struct dw_pcie_rp *pp); + int (*msi_init)(struct dw_pcie_rp *pp); void (*pme_turn_off)(struct dw_pcie_rp *pp); }; @@ -332,10 +332,10 @@ struct dw_pcie_rp { struct dw_pcie_ep_ops { void (*pre_init)(struct dw_pcie_ep *ep); - void (*ep_init)(struct dw_pcie_ep *ep); + void (*init)(struct dw_pcie_ep *ep); void (*deinit)(struct dw_pcie_ep *ep); int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num); + unsigned int type, u16 interrupt_num); const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep); /* * Provide a method to implement the different func config space @@ -344,7 +344,7 @@ struct dw_pcie_ep_ops { * return a 0, and implement code in callback function of platform * driver. */ - unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no); + unsigned int (*get_dbi_offset)(struct dw_pcie_ep *ep, u8 func_no); unsigned int (*get_dbi2_offset)(struct dw_pcie_ep *ep, u8 func_no); }; @@ -486,6 +486,99 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) dw_pcie_write_dbi2(pci, reg, 0x4, val); } +static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, + u8 func_no) +{ + unsigned int dbi_offset = 0; + + if (ep->ops->get_dbi_offset) + dbi_offset = ep->ops->get_dbi_offset(ep, func_no); + + return dbi_offset; +} + +static inline u32 dw_pcie_ep_read_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, size_t size) +{ + unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + return dw_pcie_read_dbi(pci, offset + reg, size); +} + +static inline void dw_pcie_ep_write_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, size_t size, u32 val) +{ + unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + dw_pcie_write_dbi(pci, offset + reg, size, val); +} + +static inline void dw_pcie_ep_writel_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, u32 val) +{ + dw_pcie_ep_write_dbi(ep, func_no, reg, 0x4, val); +} + +static inline u32 dw_pcie_ep_readl_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg) +{ + return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x4); +} + +static inline void dw_pcie_ep_writew_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, u16 val) +{ + dw_pcie_ep_write_dbi(ep, func_no, reg, 0x2, val); +} + +static inline u16 dw_pcie_ep_readw_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg) +{ + return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x2); +} + +static inline void dw_pcie_ep_writeb_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, u8 val) +{ + dw_pcie_ep_write_dbi(ep, func_no, reg, 0x1, val); +} + +static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg) +{ + return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1); +} + +static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep, + u8 func_no) +{ + unsigned int dbi2_offset = 0; + + if (ep->ops->get_dbi2_offset) + dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no); + else if (ep->ops->get_dbi_offset) /* for backward compatibility */ + dbi2_offset = ep->ops->get_dbi_offset(ep, func_no); + + return dbi2_offset; +} + +static inline void dw_pcie_ep_write_dbi2(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, size_t size, u32 val) +{ + unsigned int offset = dw_pcie_ep_get_dbi2_offset(ep, func_no); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + dw_pcie_write_dbi2(pci, offset + reg, size, val); +} + +static inline void dw_pcie_ep_writel_dbi2(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, u32 val) +{ + dw_pcie_ep_write_dbi2(ep, func_no, reg, 0x4, val); +} + static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) { u32 reg; @@ -580,7 +673,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep); int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep); void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep); void dw_pcie_ep_exit(struct dw_pcie_ep *ep); -int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no); +int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no); int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num); int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, @@ -613,7 +706,7 @@ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) { } -static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) +static inline int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no) { return 0; } diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index 2fe42c70097f..d6842141d384 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -72,7 +72,7 @@ static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, writel_relaxed(val, rockchip->apb_base + reg); } -static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) +static void rockchip_pcie_intx_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); @@ -202,7 +202,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp) if (ret < 0) dev_err(dev, "failed to init irq domain\n"); - irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler, + irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler, rockchip); /* LTSSM enable control mode */ @@ -215,7 +215,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops rockchip_pcie_host_ops = { - .host_init = rockchip_pcie_host_init, + .init = rockchip_pcie_host_init, }; static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip) diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c index 1e9b44b8bba4..66367252032b 100644 --- a/drivers/pci/controller/dwc/pcie-fu740.c +++ b/drivers/pci/controller/dwc/pcie-fu740.c @@ -279,7 +279,7 @@ static int fu740_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops fu740_pcie_host_ops = { - .host_init = fu740_pcie_host_init, + .init = fu740_pcie_host_init, }; static const struct dw_pcie_ops dw_pcie_ops = { diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index fd484cc7c481..7a11c618b9d9 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -198,7 +198,7 @@ static int histb_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops histb_pcie_host_ops = { - .host_init = histb_pcie_host_init, + .init = histb_pcie_host_init, }; static void histb_pcie_host_disable(struct histb_pcie *hipcie) diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index c9c93524e01d..acbe4f6d3291 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -391,7 +391,7 @@ static const struct dw_pcie_ops intel_pcie_ops = { }; static const struct dw_pcie_host_ops intel_pcie_dw_ops = { - .host_init = intel_pcie_rc_init, + .init = intel_pcie_rc_init, }; static int intel_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c index 289bff99d762..5e8e54f597dd 100644 --- a/drivers/pci/controller/dwc/pcie-keembay.c +++ b/drivers/pci/controller/dwc/pcie-keembay.c @@ -289,19 +289,18 @@ static void keembay_pcie_ep_init(struct dw_pcie_ep *ep) } static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - /* Legacy interrupts are not supported in Keem Bay */ - dev_err(pci->dev, "Legacy IRQ is not supported\n"); + case PCI_IRQ_INTX: + /* INTx interrupts are not supported in Keem Bay */ + dev_err(pci->dev, "INTx IRQ is not supported\n"); return -EINVAL; - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "Unknown IRQ type %d\n", type); @@ -313,8 +312,12 @@ static const struct pci_epc_features keembay_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = true, - .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5), - .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), + .bar[BAR_0] = { .only_64bit = true, }, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_2] = { .only_64bit = true, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_4] = { .only_64bit = true, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, .align = SZ_16K, }; @@ -325,7 +328,7 @@ keembay_pcie_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = { - .ep_init = keembay_pcie_ep_init, + .init = keembay_pcie_ep_init, .raise_irq = keembay_pcie_ep_raise_irq, .get_features = keembay_pcie_get_features, }; diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 2ee146767971..d5523f302102 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -366,7 +366,6 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie, struct platform_device *pdev) { struct device *dev = &pdev->dev; - char name[32]; int ret, i; /* This is an optional property */ @@ -387,9 +386,8 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie, if (pcie->gpio_id_clkreq[i] < 0) return pcie->gpio_id_clkreq[i]; - sprintf(name, "pcie_clkreq_%d", i); - pcie->clkreq_names[i] = devm_kstrdup_const(dev, name, - GFP_KERNEL); + pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL, + "pcie_clkreq_%d", i); if (!pcie->clkreq_names[i]) return -ENOMEM; } @@ -404,7 +402,6 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, struct device *dev = &pdev->dev; struct device_node *parent, *child; int ret, slot, i; - char name[32]; for_each_available_child_of_node(node, parent) { for_each_available_child_of_node(parent, child) { @@ -430,9 +427,9 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, slot = PCI_SLOT(ret); - sprintf(name, "pcie_perst_%d", slot); - pcie->reset_names[i] = devm_kstrdup_const(dev, name, - GFP_KERNEL); + pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL, + "pcie_perst_%d", + slot); if (!pcie->reset_names[i]) { ret = -ENOMEM; goto put_node; @@ -672,7 +669,7 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = { }; static const struct dw_pcie_host_ops kirin_pcie_host_ops = { - .host_init = kirin_pcie_host_init, + .init = kirin_pcie_host_init, }; static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie) @@ -741,15 +738,13 @@ err: return ret; } -static int kirin_pcie_remove(struct platform_device *pdev) +static void kirin_pcie_remove(struct platform_device *pdev) { struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev); dw_pcie_host_deinit(&kirin_pcie->pci->pp); kirin_pcie_power_off(kirin_pcie); - - return 0; } struct kirin_pcie_data { @@ -818,7 +813,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) static struct platform_driver kirin_pcie_driver = { .probe = kirin_pcie_probe, - .remove = kirin_pcie_remove, + .remove_new = kirin_pcie_remove, .driver = { .name = "kirin-pcie", .of_match_table = kirin_pcie_match, diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 9e58f055199a..36e5e80cd22f 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -726,14 +726,14 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, } static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "Unknown IRQ type\n"); @@ -796,7 +796,7 @@ static void qcom_pcie_ep_init(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops pci_ep_ops = { - .ep_init = qcom_pcie_ep_init, + .init = qcom_pcie_ep_init, .raise_irq = qcom_pcie_ep_raise_irq, .get_features = qcom_pcie_epc_get_features, }; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 6902e97719d1..14772edcf0d3 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -53,6 +53,7 @@ #define PARF_SLV_ADDR_SPACE_SIZE 0x358 #define PARF_DEVICE_TYPE 0x1000 #define PARF_BDF_TO_SID_TABLE_N 0x2000 +#define PARF_BDF_TO_SID_CFG 0x2c00 /* ELBI registers */ #define ELBI_SYS_CTRL 0x04 @@ -120,6 +121,9 @@ /* PARF_DEVICE_TYPE register fields */ #define DEVICE_TYPE_RC 0x4 +/* PARF_BDF_TO_SID_CFG fields */ +#define BDF_TO_SID_BYPASS BIT(0) + /* ELBI_SYS_CTRL register fields */ #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) @@ -229,6 +233,7 @@ struct qcom_pcie_ops { struct qcom_pcie_cfg { const struct qcom_pcie_ops *ops; + bool no_l0s; }; struct qcom_pcie { @@ -272,6 +277,26 @@ static int qcom_pcie_start_link(struct dw_pcie *pci) return 0; } +static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci) +{ + struct qcom_pcie *pcie = to_qcom_pcie(pci); + u16 offset; + u32 val; + + if (!pcie->cfg->no_l0s) + return; + + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + + dw_pcie_dbi_ro_wr_en(pci); + + val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_ASPM_L0S; + writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); + + dw_pcie_dbi_ro_wr_dis(pci); +} + static void qcom_pcie_clear_hpc(struct dw_pcie *pci) { u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); @@ -961,6 +986,7 @@ err_disable_regulators: static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) { + qcom_pcie_clear_aspm_l0s(pcie->pci); qcom_pcie_clear_hpc(pcie->pci); return 0; @@ -968,9 +994,12 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) { - /* Downstream devices need to be in D0 state before enabling PCI PM substates */ - pci_set_power_state(pdev, PCI_D0); - pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL); + /* + * Downstream devices need to be in D0 state before enabling PCI PM + * substates. + */ + pci_set_power_state_locked(pdev, PCI_D0); + pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); return 0; } @@ -1005,11 +1034,17 @@ static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; int i, nr_map, size = 0; u32 smmu_sid_base; + u32 val; of_get_property(dev->of_node, "iommu-map", &size); if (!size) return 0; + /* Enable BDF to SID translation by disabling bypass mode (default) */ + val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); + val &= ~BDF_TO_SID_BYPASS; + writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); + map = kzalloc(size, GFP_KERNEL); if (!map) return -ENOMEM; @@ -1244,9 +1279,9 @@ static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { - .host_init = qcom_pcie_host_init, - .host_deinit = qcom_pcie_host_deinit, - .host_post_init = qcom_pcie_host_post_init, + .init = qcom_pcie_host_init, + .deinit = qcom_pcie_host_deinit, + .post_init = qcom_pcie_host_post_init, }; /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ @@ -1355,6 +1390,11 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = { .ops = &ops_2_9_0, }; +static const struct qcom_pcie_cfg cfg_sc8280xp = { + .ops = &ops_1_9_0, + .no_l0s = true, +}; + static const struct dw_pcie_ops dw_pcie_ops = { .link_up = qcom_pcie_link_up, .start_link = qcom_pcie_start_link, @@ -1626,11 +1666,11 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, - { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0}, { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, - { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp }, { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, @@ -1639,6 +1679,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 }, { } }; diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index 3bc45e513b3d..0be760ed420b 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -8,7 +8,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -307,8 +307,8 @@ static void rcar_gen4_pcie_host_deinit(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops rcar_gen4_pcie_host_ops = { - .host_init = rcar_gen4_pcie_host_init, - .host_deinit = rcar_gen4_pcie_host_deinit, + .init = rcar_gen4_pcie_host_init, + .deinit = rcar_gen4_pcie_host_deinit, }; static int rcar_gen4_add_dw_pcie_rp(struct rcar_gen4_pcie *rcar) @@ -362,15 +362,14 @@ static void rcar_gen4_pcie_ep_deinit(struct dw_pcie_ep *ep) } static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *dw = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: dev_err(dw->dev, "Unknown IRQ type\n"); @@ -384,7 +383,9 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, - .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, .align = SZ_1M, }; @@ -394,7 +395,7 @@ rcar_gen4_pcie_ep_get_features(struct dw_pcie_ep *ep) return &rcar_gen4_pcie_epc_features; } -static unsigned int rcar_gen4_pcie_ep_func_conf_select(struct dw_pcie_ep *ep, +static unsigned int rcar_gen4_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, u8 func_no) { return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET; @@ -408,11 +409,11 @@ static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep, static const struct dw_pcie_ep_ops pcie_ep_ops = { .pre_init = rcar_gen4_pcie_ep_pre_init, - .ep_init = rcar_gen4_pcie_ep_init, + .init = rcar_gen4_pcie_ep_init, .deinit = rcar_gen4_pcie_ep_deinit, .raise_irq = rcar_gen4_pcie_ep_raise_irq, .get_features = rcar_gen4_pcie_ep_get_features, - .func_conf_select = rcar_gen4_pcie_ep_func_conf_select, + .get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset, .get_dbi2_offset = rcar_gen4_pcie_ep_get_dbi2_offset, }; @@ -436,7 +437,7 @@ static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar) /* Common */ static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar) { - rcar->mode = (enum dw_pcie_device_mode)of_device_get_match_data(&rcar->pdev->dev); + rcar->mode = (uintptr_t)of_device_get_match_data(&rcar->pdev->dev); switch (rcar->mode) { case DW_PCIE_RC_TYPE: diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 99d47ae80331..201dced209f0 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -148,7 +148,7 @@ static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { - .host_init = spear13xx_pcie_host_init, + .init = spear13xx_pcie_host_init, }; static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 0fe113598ebb..1f7b662cb8e1 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -773,13 +773,13 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp) val_w); } -static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp) +static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; - /* Enable legacy interrupt generation */ + /* Enable INTX interrupt generation */ val = appl_readl(pcie, APPL_INTR_EN_L0_0); val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; val |= APPL_INTR_EN_L0_0_INT_INT_EN; @@ -830,7 +830,7 @@ static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); tegra_pcie_enable_system_interrupts(pp); - tegra_pcie_enable_legacy_interrupts(pp); + tegra_pcie_enable_intx_interrupts(pp); if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_enable_msi_interrupts(pp); } @@ -1060,7 +1060,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = { }; static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { - .host_init = tegra_pcie_dw_host_init, + .init = tegra_pcie_dw_host_init, }; static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) @@ -1947,7 +1947,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) return IRQ_HANDLED; } -static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) +static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq) { /* Tegra194 supports only INTA */ if (irq > 1) @@ -1979,20 +1979,19 @@ static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) } static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); + case PCI_IRQ_INTX: + return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); default: @@ -2008,9 +2007,13 @@ static const struct pci_epc_features tegra_pcie_epc_features = { .core_init_notifier = true, .msi_capable = false, .msix_capable = false, - .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5, - .bar_fixed_64bit = 1 << BAR_0, - .bar_fixed_size[0] = SZ_1M, + .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, + .only_64bit = true, }, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_2] = { .type = BAR_RESERVED, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_4] = { .type = BAR_RESERVED, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, }; static const struct pci_epc_features* diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c index cba3c88fcf39..639bc2e12476 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c +++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c @@ -212,7 +212,7 @@ static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep) dw_pcie_ep_reset_bar(pci, bar); } -static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep) +static int uniphier_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); @@ -256,15 +256,14 @@ static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, } static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return uniphier_pcie_ep_raise_legacy_irq(ep); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return uniphier_pcie_ep_raise_intx_irq(ep); + case PCI_IRQ_MSI: return uniphier_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: @@ -284,7 +283,7 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = { - .ep_init = uniphier_pcie_ep_init, + .init = uniphier_pcie_ep_init, .raise_irq = uniphier_pcie_ep_raise_irq, .get_features = uniphier_pcie_get_features, }; @@ -412,8 +411,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = { .msi_capable = true, .msix_capable = false, .align = 1 << 16, - .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), - .reserved_bar = BIT(BAR_4), + .bar[BAR_0] = { .only_64bit = true, }, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_2] = { .only_64bit = true, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_4] = { .type = BAR_RESERVED, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, }, }; @@ -426,7 +429,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = { .msi_capable = true, .msix_capable = false, .align = 1 << 12, - .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), + .bar[BAR_0] = { .only_64bit = true, }, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_2] = { .only_64bit = true, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_4] = { .only_64bit = true, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, }, }; diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index 48c3eba817b4..5757ca3803c9 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -67,7 +67,7 @@ struct uniphier_pcie { struct clk *clk; struct reset_control *rst; struct phy *phy; - struct irq_domain *legacy_irq_domain; + struct irq_domain *intx_irq_domain; }; #define to_uniphier_pcie(x) dev_get_drvdata((x)->dev) @@ -253,12 +253,12 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc) reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val); for_each_set_bit(bit, ®, PCI_NUM_INTX) - generic_handle_domain_irq(pcie->legacy_irq_domain, bit); + generic_handle_domain_irq(pcie->intx_irq_domain, bit); chained_irq_exit(chip, desc); } -static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp) +static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); @@ -279,9 +279,9 @@ static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp) goto out_put_node; } - pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, + pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, &uniphier_intx_domain_ops, pp); - if (!pcie->legacy_irq_domain) { + if (!pcie->intx_irq_domain) { dev_err(pci->dev, "Failed to get INTx domain\n"); ret = -ENODEV; goto out_put_node; @@ -301,7 +301,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp) struct uniphier_pcie *pcie = to_uniphier_pcie(pci); int ret; - ret = uniphier_pcie_config_legacy_irq(pp); + ret = uniphier_pcie_config_intx_irq(pp); if (ret) return ret; @@ -311,7 +311,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops uniphier_pcie_host_ops = { - .host_init = uniphier_pcie_host_init, + .init = uniphier_pcie_host_init, }; static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie) diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c index 71026fefa366..318c278e65c8 100644 --- a/drivers/pci/controller/dwc/pcie-visconti.c +++ b/drivers/pci/controller/dwc/pcie-visconti.c @@ -236,7 +236,7 @@ static int visconti_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops visconti_pcie_host_ops = { - .host_init = visconti_pcie_host_init, + .init = visconti_pcie_host_init, }; static int visconti_get_resources(struct platform_device *pdev, diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index 6be3266cd7b5..45b71806182d 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -85,7 +85,7 @@ int pci_host_common_probe(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(pci_host_common_probe); -int pci_host_common_remove(struct platform_device *pdev) +void pci_host_common_remove(struct platform_device *pdev) { struct pci_host_bridge *bridge = platform_get_drvdata(pdev); @@ -93,8 +93,6 @@ int pci_host_common_remove(struct platform_device *pdev) pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); pci_unlock_rescan_remove(); - - return 0; } EXPORT_SYMBOL_GPL(pci_host_common_remove); diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c index 63865aeb636b..41cb6a057f6e 100644 --- a/drivers/pci/controller/pci-host-generic.c +++ b/drivers/pci/controller/pci-host-generic.c @@ -82,7 +82,7 @@ static struct platform_driver gen_pci_driver = { .of_match_table = gen_pci_of_match, }, .probe = pci_host_common_probe, - .remove = pci_host_common_remove, + .remove_new = pci_host_common_remove, }; module_platform_driver(gen_pci_driver); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 30c7dfeccb16..5992280e8110 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -49,6 +49,7 @@ #include <linux/refcount.h> #include <linux/irqdomain.h> #include <linux/acpi.h> +#include <linux/sizes.h> #include <asm/mshyperv.h> /* @@ -465,7 +466,7 @@ struct pci_eject_response { u32 status; } __packed; -static int pci_ring_size = (4 * PAGE_SIZE); +static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K); /* * Driver specific state. @@ -650,13 +651,6 @@ static void hv_arch_irq_unmask(struct irq_data *data) PCI_FUNC(pdev->devfn); params->int_target.vector = hv_msi_get_int_vector(data); - /* - * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by - * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a - * spurious interrupt storm. Not doing so does not seem to have a - * negative effect (yet?). - */ - if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { /* * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index d45e7b8dc530..8b34ccff073a 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -80,13 +80,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_LPC, system_bus_quirk); +/* + * Some Loongson PCIe ports have hardware limitations on their Maximum Read + * Request Size. They can't handle anything larger than this. Sane + * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for + * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly, + * so we have to enforce maximum safe MRRS, which is 256 bytes. + */ +#ifdef CONFIG_MIPS +static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev) +{ + struct pci_bus *bus = pdev->bus; + struct pci_dev *bridge; + static const struct pci_device_id bridge_devids[] = { + { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) }, + { 0, }, + }; + + /* look for the matching bridge */ + while (!pci_is_root_bus(bus)) { + bridge = bus->self; + bus = bus->parent; + + if (pci_match_id(bridge_devids, bridge)) { + if (pcie_get_readrq(pdev) > 256) { + pci_info(pdev, "limiting MRRS to 256\n"); + pcie_set_readrq(pdev, 256); + } + break; + } + } +} +DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk); +#endif + static void loongson_mrrs_quirk(struct pci_dev *pdev) { - /* - * Some Loongson PCIe ports have h/w limitations of maximum read - * request size. They can't handle anything larger than this. So - * force this limit on any devices attached under these ports. - */ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); bridge->no_inc_mrrs = 1; diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index f9dd6622fe10..c08683febdd4 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -48,6 +48,9 @@ #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 +#define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8 +#define PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK 0xf8 + #define PCIE_RC_DL_MDIO_ADDR 0x1100 #define PCIE_RC_DL_MDIO_WR_DATA 0x1104 #define PCIE_RC_DL_MDIO_RD_DATA 0x1108 @@ -121,9 +124,12 @@ #define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2 +#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000 #define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000 - +#define PCIE_CLKREQ_MASK \ + (PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \ + PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK) #define PCIE_INTR2_CPU_BASE 0x4300 #define PCIE_MSI_INTR2_BASE 0x4500 @@ -330,7 +336,7 @@ static int brcm_pcie_mdio_write(void __iomem *base, u8 port, readl(base + PCIE_RC_DL_MDIO_ADDR); writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA); - err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data, + err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data, MDIO_WT_DONE(data), 10, 100); return err; } @@ -1028,13 +1034,89 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) return 0; } +/* + * This extends the timeout period for an access to an internal bus. This + * access timeout may occur during L1SS sleep periods, even without the + * presence of a PCIe access. + */ +static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie) +{ + /* TIMEOUT register is two registers before RGR1_SW_INIT_1 */ + const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8; + u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */ + + /* Each unit in timeout register is 1/216,000,000 seconds */ + writel(216 * timeout_us, pcie->base + REG_OFFSET); +} + +static void brcm_config_clkreq(struct brcm_pcie *pcie) +{ + static const char err_msg[] = "invalid 'brcm,clkreq-mode' DT string\n"; + const char *mode = "default"; + u32 clkreq_cntl; + int ret, tmp; + + ret = of_property_read_string(pcie->np, "brcm,clkreq-mode", &mode); + if (ret && ret != -EINVAL) { + dev_err(pcie->dev, err_msg); + mode = "safe"; + } + + /* Start out assuming safe mode (both mode bits cleared) */ + clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + clkreq_cntl &= ~PCIE_CLKREQ_MASK; + + if (strcmp(mode, "no-l1ss") == 0) { + /* + * "no-l1ss" -- Provides Clock Power Management, L0s, and + * L1, but cannot provide L1 substate (L1SS) power + * savings. If the downstream device connected to the RC is + * L1SS capable AND the OS enables L1SS, all PCIe traffic + * may abruptly halt, potentially hanging the system. + */ + clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK; + /* + * We want to un-advertise L1 substates because if the OS + * tries to configure the controller into using L1 substate + * power savings it may fail or hang when the RC HW is in + * "no-l1ss" mode. + */ + tmp = readl(pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP); + u32p_replace_bits(&tmp, 2, PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK); + writel(tmp, pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP); + + } else if (strcmp(mode, "default") == 0) { + /* + * "default" -- Provides L0s, L1, and L1SS, but not + * compliant to provide Clock Power Management; + * specifically, may not be able to meet the Tclron max + * timing of 400ns as specified in "Dynamic Clock Control", + * section 3.2.5.2.2 of the PCIe spec. This situation is + * atypical and should happen only with older devices. + */ + clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK; + brcm_extend_rbus_timeout(pcie); + + } else { + /* + * "safe" -- No power savings; refclk is driven by RC + * unconditionally. + */ + if (strcmp(mode, "safe") != 0) + dev_err(pcie->dev, err_msg); + mode = "safe"; + } + writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + + dev_info(pcie->dev, "clkreq-mode set to %s\n", mode); +} + static int brcm_pcie_start_link(struct brcm_pcie *pcie) { struct device *dev = pcie->dev; void __iomem *base = pcie->base; u16 nlw, cls, lnksta; bool ssc_good = false; - u32 tmp; int ret, i; /* Unassert the fundamental reset */ @@ -1059,6 +1141,8 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) return -ENODEV; } + brcm_config_clkreq(pcie); + if (pcie->gen) brcm_pcie_set_gen(pcie, pcie->gen); @@ -1077,14 +1161,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) pci_speed_string(pcie_link_speed[cls]), nlw, ssc_good ? "(SSC)" : "(!SSC)"); - /* - * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1 - * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1. - */ - tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); - tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK; - writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); - return 0; } diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index acdc583d2980..4e6aa882a567 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -52,7 +52,7 @@ static int iproc_pltfm_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; - pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev); + pcie->type = (uintptr_t)of_device_get_match_data(dev); ret = of_address_to_resource(np, 0, ®); if (ret < 0) { diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index e0e27645fdf4..975b3024fb08 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -245,35 +245,60 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie, resource_size_t cpu_addr, resource_size_t pci_addr, resource_size_t size, - unsigned long type, int num) + unsigned long type, int *num) { + resource_size_t remaining = size; + resource_size_t table_size; + resource_size_t addr_align; + const char *range_type; void __iomem *table; u32 val; - if (num >= PCIE_MAX_TRANS_TABLES) { - dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", - (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES); - return -ENODEV; - } + while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) { + /* Table size needs to be a power of 2 */ + table_size = BIT(fls(remaining) - 1); + + if (cpu_addr > 0) { + addr_align = BIT(ffs(cpu_addr) - 1); + table_size = min(table_size, addr_align); + } + + /* Minimum size of translate table is 4KiB */ + if (table_size < 0x1000) { + dev_err(pcie->dev, "illegal table size %#llx\n", + (unsigned long long)table_size); + return -EINVAL; + } - table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + - num * PCIE_ATR_TLB_SET_OFFSET; + table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET; + writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table); + writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET); + writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET); + writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET); - writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1), - table); - writel_relaxed(upper_32_bits(cpu_addr), - table + PCIE_ATR_SRC_ADDR_MSB_OFFSET); - writel_relaxed(lower_32_bits(pci_addr), - table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET); - writel_relaxed(upper_32_bits(pci_addr), - table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET); + if (type == IORESOURCE_IO) { + val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO; + range_type = "IO"; + } else { + val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM; + range_type = "MEM"; + } - if (type == IORESOURCE_IO) - val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO; - else - val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM; + writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET); - writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET); + dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", + range_type, *num, (unsigned long long)cpu_addr, + (unsigned long long)pci_addr, (unsigned long long)table_size); + + cpu_addr += table_size; + pci_addr += table_size; + remaining -= table_size; + (*num)++; + } + + if (remaining) + dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", + (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES); return 0; } @@ -380,30 +405,20 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) resource_size_t cpu_addr; resource_size_t pci_addr; resource_size_t size; - const char *range_type; - if (type == IORESOURCE_IO) { + if (type == IORESOURCE_IO) cpu_addr = pci_pio_to_address(res->start); - range_type = "IO"; - } else if (type == IORESOURCE_MEM) { + else if (type == IORESOURCE_MEM) cpu_addr = res->start; - range_type = "MEM"; - } else { + else continue; - } pci_addr = res->start - entry->offset; size = resource_size(res); err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size, - type, table_index); + type, &table_index); if (err) return err; - - dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", - range_type, table_index, (unsigned long long)cpu_addr, - (unsigned long long)pci_addr, (unsigned long long)size); - - table_index++; } return 0; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 66a8f73296fc..48372013f26d 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -617,12 +617,18 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc) if (status & MSI_STATUS){ unsigned long imsi_status; + /* + * The interrupt status can be cleared even if the + * MSI status remains pending. As such, given the + * edge-triggered interrupt type, its status should + * be cleared before being dispatched to the + * handler of the underlying device. + */ + writel(MSI_STATUS, port->base + PCIE_INT_STATUS); while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) generic_handle_domain_irq(port->inner_domain, bit); } - /* Clear MSI interrupt status */ - writel(MSI_STATUS, port->base + PCIE_INT_STATUS); } } diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c index 7034c0ff23d0..05967c6c0b42 100644 --- a/drivers/pci/controller/pcie-rcar-ep.c +++ b/drivers/pci/controller/pcie-rcar-ep.c @@ -402,16 +402,15 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie, } static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); switch (type) { - case PCI_EPC_IRQ_LEGACY: + case PCI_IRQ_INTX: return rcar_pcie_ep_assert_intx(ep, fn, 0); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num); default: @@ -441,11 +440,15 @@ static const struct pci_epc_features rcar_pcie_epc_features = { .msi_capable = true, .msix_capable = false, /* use 64-bit BARs so mark BAR[1,3,5] as reserved */ - .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5, - .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4, - .bar_fixed_size[0] = 128, - .bar_fixed_size[2] = 256, - .bar_fixed_size[4] = 256, + .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128, + .only_64bit = true, }, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = 256, + .only_64bit = true, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, + .only_64bit = true, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, }; static const struct pci_epc_features* diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index bf7cc0b6a695..996077ab7cfd 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -29,6 +29,7 @@ #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> #include "pcie-rcar.h" @@ -953,14 +954,22 @@ static const struct of_device_id rcar_pcie_of_match[] = { {}, }; +/* Design note 346 from Linear Technology says order is not important. */ +static const char * const rcar_pcie_supplies[] = { + "vpcie1v5", + "vpcie3v3", + "vpcie12v", +}; + static int rcar_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct pci_host_bridge *bridge; struct rcar_pcie_host *host; struct rcar_pcie *pcie; + unsigned int i; u32 data; int err; - struct pci_host_bridge *bridge; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host)); if (!bridge) @@ -971,6 +980,13 @@ static int rcar_pcie_probe(struct platform_device *pdev) pcie->dev = dev; platform_set_drvdata(pdev, host); + for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) { + err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]); + if (err < 0 && err != -ENODEV) + return dev_err_probe(dev, err, "failed to enable regulator: %s\n", + rcar_pcie_supplies[i]); + } + pm_runtime_enable(pcie->dev); err = pm_runtime_get_sync(pcie->dev); if (err < 0) { diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 0af0e965fb57..c9046e97a1d2 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -26,16 +26,16 @@ * @max_regions: maximum number of regions supported by hardware * @ob_region_map: bitmask of mapped outbound regions * @ob_addr: base addresses in the AXI bus where the outbound regions start - * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ + * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ * dedicated outbound regions is mapped. * @irq_cpu_addr: base address in the CPU space where a write access triggers - * the sending of a memory write (MSI) / normal message (legacy + * the sending of a memory write (MSI) / normal message (INTX * IRQ) TLP through the PCIe bus. - * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ + * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ * dedicated outbound region. * @irq_pci_fn: the latest PCI function that has updated the mapping of - * the MSI/legacy IRQ dedicated outbound region. - * @irq_pending: bitmask of asserted legacy IRQs. + * the MSI/INTX IRQ dedicated outbound region. + * @irq_pending: bitmask of asserted INTX IRQs. */ struct rockchip_pcie_ep { struct rockchip_pcie rockchip; @@ -325,8 +325,8 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, } } -static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn, - u8 intx) +static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn, + u8 intx) { u16 cmd; @@ -407,15 +407,14 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, } static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return rockchip_pcie_ep_send_intx_irq(ep, fn, 0); + case PCI_IRQ_MSI: return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num); default: return -EINVAL; diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index afbbdccd195d..300b9dc85ecc 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -505,7 +505,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) +static void rockchip_pcie_intx_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); @@ -553,7 +553,7 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) return irq; irq_set_chained_handler_and_data(irq, - rockchip_pcie_legacy_int_handler, + rockchip_pcie_intx_handler, rockchip); irq = platform_get_irq_byname(pdev, "client"); diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c index 2f7d676c683c..5be5dfd8398f 100644 --- a/drivers/pci/controller/pcie-xilinx-dma-pl.c +++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c @@ -576,7 +576,7 @@ static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port) &intx_domain_ops, port); if (!port->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return PTR_ERR(port->intx_domain); + return -ENOMEM; } irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); @@ -635,14 +635,14 @@ static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port) err = devm_request_irq(dev, port->intx_irq, xilinx_pl_dma_pcie_intx_flow, IRQF_SHARED | IRQF_NO_THREAD, NULL, port); if (err) { - dev_err(dev, "Failed to request INTx IRQ %d\n", irq); + dev_err(dev, "Failed to request INTx IRQ %d\n", port->intx_irq); return err; } err = devm_request_irq(dev, port->irq, xilinx_pl_dma_pcie_event_flow, IRQF_SHARED | IRQF_NO_THREAD, NULL, port); if (err) { - dev_err(dev, "Failed to request event IRQ %d\n", irq); + dev_err(dev, "Failed to request event IRQ %d\n", port->irq); return err; } @@ -684,10 +684,8 @@ static int xilinx_request_msi_irq(struct pl_dma_pcie *port) int ret; port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0"); - if (port->msi.irq_msi0 <= 0) { - dev_err(dev, "Unable to find msi0 IRQ line\n"); + if (port->msi.irq_msi0 <= 0) return port->msi.irq_msi0; - } ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low, IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl", @@ -698,10 +696,8 @@ static int xilinx_request_msi_irq(struct pl_dma_pcie *port) } port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1"); - if (port->msi.irq_msi1 <= 0) { - dev_err(dev, "Unable to find msi1 IRQ line\n"); + if (port->msi.irq_msi1 <= 0) return port->msi.irq_msi1; - } ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high, IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl", diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index e307aceba5c9..0408f4d612b5 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -166,7 +166,7 @@ struct nwl_pcie { int irq_intx; int irq_misc; struct nwl_msi msi; - struct irq_domain *legacy_irq_domain; + struct irq_domain *intx_irq_domain; struct clk *clk; raw_spinlock_t leg_mask_lock; }; @@ -324,7 +324,7 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc) while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & MSGF_LEG_SR_MASKALL) != 0) { for_each_set_bit(bit, &status, PCI_NUM_INTX) - generic_handle_domain_irq(pcie->legacy_irq_domain, bit); + generic_handle_domain_irq(pcie->intx_irq_domain, bit); } chained_irq_exit(chip, desc); @@ -364,7 +364,7 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void nwl_mask_leg_irq(struct irq_data *data) +static void nwl_mask_intx_irq(struct irq_data *data) { struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; @@ -378,7 +378,7 @@ static void nwl_mask_leg_irq(struct irq_data *data) raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); } -static void nwl_unmask_leg_irq(struct irq_data *data) +static void nwl_unmask_intx_irq(struct irq_data *data) { struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; @@ -392,26 +392,26 @@ static void nwl_unmask_leg_irq(struct irq_data *data) raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); } -static struct irq_chip nwl_leg_irq_chip = { +static struct irq_chip nwl_intx_irq_chip = { .name = "nwl_pcie:legacy", - .irq_enable = nwl_unmask_leg_irq, - .irq_disable = nwl_mask_leg_irq, - .irq_mask = nwl_mask_leg_irq, - .irq_unmask = nwl_unmask_leg_irq, + .irq_enable = nwl_unmask_intx_irq, + .irq_disable = nwl_mask_intx_irq, + .irq_mask = nwl_mask_intx_irq, + .irq_unmask = nwl_unmask_intx_irq, }; -static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) +static int nwl_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) { - irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq); + irq_set_chip_and_handler(irq, &nwl_intx_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); irq_set_status_flags(irq, IRQ_LEVEL); return 0; } -static const struct irq_domain_ops legacy_domain_ops = { - .map = nwl_legacy_map, +static const struct irq_domain_ops intx_domain_ops = { + .map = nwl_intx_map, .xlate = pci_irqd_intx_xlate, }; @@ -525,20 +525,20 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *node = dev->of_node; - struct device_node *legacy_intc_node; + struct device_node *intc_node; - legacy_intc_node = of_get_next_child(node, NULL); - if (!legacy_intc_node) { + intc_node = of_get_next_child(node, NULL); + if (!intc_node) { dev_err(dev, "No legacy intc node found\n"); return -EINVAL; } - pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node, - PCI_NUM_INTX, - &legacy_domain_ops, - pcie); - of_node_put(legacy_intc_node); - if (!pcie->legacy_irq_domain) { + pcie->intx_irq_domain = irq_domain_add_linear(intc_node, + PCI_NUM_INTX, + &intx_domain_ops, + pcie); + of_node_put(intc_node); + if (!pcie->intx_irq_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } @@ -710,14 +710,14 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) /* Enable all misc interrupts */ nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); - /* Disable all legacy interrupts */ + /* Disable all INTX interrupts */ nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); - /* Clear pending legacy interrupts */ + /* Clear pending INTX interrupts */ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS); - /* Enable all legacy interrupts */ + /* Enable all INTX interrupts */ nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); /* Enable the bridge config interrupt */ diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 94ba61fe1c44..87b7856f375a 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -751,7 +751,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata) if (!(features & VMD_FEAT_BIOS_PM_QUIRK)) return 0; - pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL); + pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR); if (!pos) @@ -984,7 +984,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENOMEM; vmd->dev = dev; - vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL); + vmd->instance = ida_alloc(&vmd_instance_ida, GFP_KERNEL); if (vmd->instance < 0) return vmd->instance; @@ -1026,7 +1026,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) return 0; out_release_instance: - ida_simple_remove(&vmd_instance_ida, vmd->instance); + ida_free(&vmd_instance_ida, vmd->instance); return err; } @@ -1048,7 +1048,7 @@ static void vmd_remove(struct pci_dev *dev) vmd_cleanup_srcu(vmd); vmd_detach_resources(vmd); vmd_remove_irq_domain(vmd); - ida_simple_remove(&vmd_instance_ida, vmd->instance); + ida_free(&vmd_instance_ida, vmd->instance); } static void vmd_shutdown(struct pci_dev *dev) diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c new file mode 100644 index 000000000000..2c562b9eaf80 --- /dev/null +++ b/drivers/pci/devres.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/device.h> +#include <linux/pci.h> +#include "pci.h" + +/* + * PCI iomap devres + */ +#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS + +struct pcim_iomap_devres { + void __iomem *table[PCIM_IOMAP_MAX]; +}; + + +static void devm_pci_unmap_iospace(struct device *dev, void *ptr) +{ + struct resource **res = ptr; + + pci_unmap_iospace(*res); +} + +/** + * devm_pci_remap_iospace - Managed pci_remap_iospace() + * @dev: Generic device to remap IO address for + * @res: Resource describing the I/O space + * @phys_addr: physical address of range to be mapped + * + * Managed pci_remap_iospace(). Map is automatically unmapped on driver + * detach. + */ +int devm_pci_remap_iospace(struct device *dev, const struct resource *res, + phys_addr_t phys_addr) +{ + const struct resource **ptr; + int error; + + ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + error = pci_remap_iospace(res, phys_addr); + if (error) { + devres_free(ptr); + } else { + *ptr = res; + devres_add(dev, ptr); + } + + return error; +} +EXPORT_SYMBOL(devm_pci_remap_iospace); + +/** + * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() + * @dev: Generic device to remap IO address for + * @offset: Resource address to map + * @size: Size of map + * + * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver + * detach. + */ +void __iomem *devm_pci_remap_cfgspace(struct device *dev, + resource_size_t offset, + resource_size_t size) +{ + void __iomem **ptr, *addr; + + ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return NULL; + + addr = pci_remap_cfgspace(offset, size); + if (addr) { + *ptr = addr; + devres_add(dev, ptr); + } else + devres_free(ptr); + + return addr; +} +EXPORT_SYMBOL(devm_pci_remap_cfgspace); + +/** + * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource + * @dev: generic device to handle the resource for + * @res: configuration space resource to be handled + * + * Checks that a resource is a valid memory region, requests the memory + * region and ioremaps with pci_remap_cfgspace() API that ensures the + * proper PCI configuration space memory attributes are guaranteed. + * + * All operations are managed and will be undone on driver detach. + * + * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code + * on failure. Usage example:: + * + * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + * base = devm_pci_remap_cfg_resource(&pdev->dev, res); + * if (IS_ERR(base)) + * return PTR_ERR(base); + */ +void __iomem *devm_pci_remap_cfg_resource(struct device *dev, + struct resource *res) +{ + resource_size_t size; + const char *name; + void __iomem *dest_ptr; + + BUG_ON(!dev); + + if (!res || resource_type(res) != IORESOURCE_MEM) { + dev_err(dev, "invalid resource\n"); + return IOMEM_ERR_PTR(-EINVAL); + } + + size = resource_size(res); + + if (res->name) + name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev), + res->name); + else + name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); + if (!name) + return IOMEM_ERR_PTR(-ENOMEM); + + if (!devm_request_mem_region(dev, res->start, size, name)) { + dev_err(dev, "can't request region for resource %pR\n", res); + return IOMEM_ERR_PTR(-EBUSY); + } + + dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size); + if (!dest_ptr) { + dev_err(dev, "ioremap failed for resource %pR\n", res); + devm_release_mem_region(dev, res->start, size); + dest_ptr = IOMEM_ERR_PTR(-ENOMEM); + } + + return dest_ptr; +} +EXPORT_SYMBOL(devm_pci_remap_cfg_resource); + +/** + * pcim_set_mwi - a device-managed pci_set_mwi() + * @dev: the PCI device for which MWI is enabled + * + * Managed pci_set_mwi(). + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +int pcim_set_mwi(struct pci_dev *dev) +{ + struct pci_devres *dr; + + dr = find_pci_dr(dev); + if (!dr) + return -ENOMEM; + + dr->mwi = 1; + return pci_set_mwi(dev); +} +EXPORT_SYMBOL(pcim_set_mwi); + + +static void pcim_release(struct device *gendev, void *res) +{ + struct pci_dev *dev = to_pci_dev(gendev); + struct pci_devres *this = res; + int i; + + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + if (this->region_mask & (1 << i)) + pci_release_region(dev, i); + + if (this->mwi) + pci_clear_mwi(dev); + + if (this->restore_intx) + pci_intx(dev, this->orig_intx); + + if (this->enabled && !this->pinned) + pci_disable_device(dev); +} + +/* + * TODO: After the last four callers in pci.c are ported, find_pci_dr() + * needs to be made static again. + */ +struct pci_devres *find_pci_dr(struct pci_dev *pdev) +{ + if (pci_is_managed(pdev)) + return devres_find(&pdev->dev, pcim_release, NULL, NULL); + return NULL; +} + +static struct pci_devres *get_pci_dr(struct pci_dev *pdev) +{ + struct pci_devres *dr, *new_dr; + + dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); + if (dr) + return dr; + + new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); + if (!new_dr) + return NULL; + return devres_get(&pdev->dev, new_dr, NULL, NULL); +} + +/** + * pcim_enable_device - Managed pci_enable_device() + * @pdev: PCI device to be initialized + * + * Managed pci_enable_device(). + */ +int pcim_enable_device(struct pci_dev *pdev) +{ + struct pci_devres *dr; + int rc; + + dr = get_pci_dr(pdev); + if (unlikely(!dr)) + return -ENOMEM; + if (dr->enabled) + return 0; + + rc = pci_enable_device(pdev); + if (!rc) { + pdev->is_managed = 1; + dr->enabled = 1; + } + return rc; +} +EXPORT_SYMBOL(pcim_enable_device); + +/** + * pcim_pin_device - Pin managed PCI device + * @pdev: PCI device to pin + * + * Pin managed PCI device @pdev. Pinned device won't be disabled on + * driver detach. @pdev must have been enabled with + * pcim_enable_device(). + */ +void pcim_pin_device(struct pci_dev *pdev) +{ + struct pci_devres *dr; + + dr = find_pci_dr(pdev); + WARN_ON(!dr || !dr->enabled); + if (dr) + dr->pinned = 1; +} +EXPORT_SYMBOL(pcim_pin_device); + +static void pcim_iomap_release(struct device *gendev, void *res) +{ + struct pci_dev *dev = to_pci_dev(gendev); + struct pcim_iomap_devres *this = res; + int i; + + for (i = 0; i < PCIM_IOMAP_MAX; i++) + if (this->table[i]) + pci_iounmap(dev, this->table[i]); +} + +/** + * pcim_iomap_table - access iomap allocation table + * @pdev: PCI device to access iomap table for + * + * Access iomap allocation table for @dev. If iomap table doesn't + * exist and @pdev is managed, it will be allocated. All iomaps + * recorded in the iomap table are automatically unmapped on driver + * detach. + * + * This function might sleep when the table is first allocated but can + * be safely called without context and guaranteed to succeed once + * allocated. + */ +void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) +{ + struct pcim_iomap_devres *dr, *new_dr; + + dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); + if (dr) + return dr->table; + + new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!new_dr) + return NULL; + dr = devres_get(&pdev->dev, new_dr, NULL, NULL); + return dr->table; +} +EXPORT_SYMBOL(pcim_iomap_table); + +/** + * pcim_iomap - Managed pcim_iomap() + * @pdev: PCI device to iomap for + * @bar: BAR to iomap + * @maxlen: Maximum length of iomap + * + * Managed pci_iomap(). Map is automatically unmapped on driver + * detach. + */ +void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) +{ + void __iomem **tbl; + + BUG_ON(bar >= PCIM_IOMAP_MAX); + + tbl = (void __iomem **)pcim_iomap_table(pdev); + if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ + return NULL; + + tbl[bar] = pci_iomap(pdev, bar, maxlen); + return tbl[bar]; +} +EXPORT_SYMBOL(pcim_iomap); + +/** + * pcim_iounmap - Managed pci_iounmap() + * @pdev: PCI device to iounmap for + * @addr: Address to unmap + * + * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). + */ +void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) +{ + void __iomem **tbl; + int i; + + pci_iounmap(pdev, addr); + + tbl = (void __iomem **)pcim_iomap_table(pdev); + BUG_ON(!tbl); + + for (i = 0; i < PCIM_IOMAP_MAX; i++) + if (tbl[i] == addr) { + tbl[i] = NULL; + return; + } + WARN_ON(1); +} +EXPORT_SYMBOL(pcim_iounmap); + +/** + * pcim_iomap_regions - Request and iomap PCI BARs + * @pdev: PCI device to map IO resources for + * @mask: Mask of BARs to request and iomap + * @name: Name used when requesting regions + * + * Request and iomap regions specified by @mask. + */ +int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) +{ + void __iomem * const *iomap; + int i, rc; + + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; + + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + unsigned long len; + + if (!(mask & (1 << i))) + continue; + + rc = -EINVAL; + len = pci_resource_len(pdev, i); + if (!len) + goto err_inval; + + rc = pci_request_region(pdev, i, name); + if (rc) + goto err_inval; + + rc = -ENOMEM; + if (!pcim_iomap(pdev, i, 0)) + goto err_region; + } + + return 0; + + err_region: + pci_release_region(pdev, i); + err_inval: + while (--i >= 0) { + if (!(mask & (1 << i))) + continue; + pcim_iounmap(pdev, iomap[i]); + pci_release_region(pdev, i); + } + + return rc; +} +EXPORT_SYMBOL(pcim_iomap_regions); + +/** + * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones + * @pdev: PCI device to map IO resources for + * @mask: Mask of BARs to iomap + * @name: Name used when requesting regions + * + * Request all PCI BARs and iomap regions specified by @mask. + */ +int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, + const char *name) +{ + int request_mask = ((1 << 6) - 1) & ~mask; + int rc; + + rc = pci_request_selected_regions(pdev, request_mask, name); + if (rc) + return rc; + + rc = pcim_iomap_regions(pdev, mask, name); + if (rc) + pci_release_selected_regions(pdev, request_mask); + return rc; +} +EXPORT_SYMBOL(pcim_iomap_regions_request_all); + +/** + * pcim_iounmap_regions - Unmap and release PCI BARs + * @pdev: PCI device to map IO resources for + * @mask: Mask of BARs to unmap and release + * + * Unmap and release regions specified by @mask. + */ +void pcim_iounmap_regions(struct pci_dev *pdev, int mask) +{ + void __iomem * const *iomap; + int i; + + iomap = pcim_iomap_table(pdev); + if (!iomap) + return; + + for (i = 0; i < PCIM_IOMAP_MAX; i++) { + if (!(mask & (1 << i))) + continue; + + pcim_iounmap(pdev, iomap[i]); + pci_release_region(pdev, i); + } +} +EXPORT_SYMBOL(pcim_iounmap_regions); diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c index b7b9d3e21f97..2c54d80107cf 100644 --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c @@ -21,6 +21,15 @@ /* Platform specific flags */ #define MHI_EPF_USE_DMA BIT(0) +struct pci_epf_mhi_dma_transfer { + struct pci_epf_mhi *epf_mhi; + struct mhi_ep_buf_info buf_info; + struct list_head node; + dma_addr_t paddr; + enum dma_data_direction dir; + size_t size; +}; + struct pci_epf_mhi_ep_info { const struct mhi_ep_cntrl_config *config; struct pci_epf_header *epf_header; @@ -114,6 +123,22 @@ static const struct pci_epf_mhi_ep_info sm8450_info = { .flags = MHI_EPF_USE_DMA, }; +static struct pci_epf_header sa8775p_header = { + .vendorid = PCI_VENDOR_ID_QCOM, + .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */ + .baseclass_code = PCI_CLASS_OTHERS, + .interrupt_pin = PCI_INTERRUPT_INTA, +}; + +static const struct pci_epf_mhi_ep_info sa8775p_info = { + .config = &mhi_v1_config, + .epf_header = &sa8775p_header, + .bar_num = BAR_0, + .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32, + .msi_count = 32, + .mru = 0x8000, +}; + struct pci_epf_mhi { const struct pci_epc_features *epc_features; const struct pci_epf_mhi_ep_info *info; @@ -124,6 +149,10 @@ struct pci_epf_mhi { resource_size_t mmio_phys; struct dma_chan *dma_chan_tx; struct dma_chan *dma_chan_rx; + struct workqueue_struct *dma_wq; + struct work_struct dma_work; + struct list_head dma_list; + spinlock_t list_lock; u32 mmio_size; int irq; }; @@ -205,63 +234,69 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector) * MHI supplies 0 based MSI vectors but the API expects the vector * number to start from 1, so we need to increment the vector by 1. */ - pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI, + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_IRQ_MSI, vector + 1); } -static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, - void *to, size_t size) +static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) { struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); - size_t offset = get_align_offset(epf_mhi, from); + size_t offset = get_align_offset(epf_mhi, buf_info->host_addr); void __iomem *tre_buf; phys_addr_t tre_phys; int ret; mutex_lock(&epf_mhi->lock); - ret = __pci_epf_mhi_alloc_map(mhi_cntrl, from, &tre_phys, &tre_buf, - offset, size); + ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys, + &tre_buf, offset, buf_info->size); if (ret) { mutex_unlock(&epf_mhi->lock); return ret; } - memcpy_fromio(to, tre_buf, size); + memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size); - __pci_epf_mhi_unmap_free(mhi_cntrl, from, tre_phys, tre_buf, offset, - size); + __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys, + tre_buf, offset, buf_info->size); mutex_unlock(&epf_mhi->lock); + if (buf_info->cb) + buf_info->cb(buf_info); + return 0; } static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl, - void *from, u64 to, size_t size) + struct mhi_ep_buf_info *buf_info) { struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); - size_t offset = get_align_offset(epf_mhi, to); + size_t offset = get_align_offset(epf_mhi, buf_info->host_addr); void __iomem *tre_buf; phys_addr_t tre_phys; int ret; mutex_lock(&epf_mhi->lock); - ret = __pci_epf_mhi_alloc_map(mhi_cntrl, to, &tre_phys, &tre_buf, - offset, size); + ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys, + &tre_buf, offset, buf_info->size); if (ret) { mutex_unlock(&epf_mhi->lock); return ret; } - memcpy_toio(tre_buf, from, size); + memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size); - __pci_epf_mhi_unmap_free(mhi_cntrl, to, tre_phys, tre_buf, offset, - size); + __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys, + tre_buf, offset, buf_info->size); mutex_unlock(&epf_mhi->lock); + if (buf_info->cb) + buf_info->cb(buf_info); + return 0; } @@ -270,8 +305,8 @@ static void pci_epf_mhi_dma_callback(void *param) complete(param); } -static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, - void *to, size_t size) +static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) { struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); struct device *dma_dev = epf_mhi->epf->epc->dev.parent; @@ -284,13 +319,13 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, dma_addr_t dst_addr; int ret; - if (size < SZ_4K) - return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size); + if (buf_info->size < SZ_4K) + return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info); mutex_lock(&epf_mhi->lock); config.direction = DMA_DEV_TO_MEM; - config.src_addr = from; + config.src_addr = buf_info->host_addr; ret = dmaengine_slave_config(chan, &config); if (ret) { @@ -298,14 +333,16 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, goto err_unlock; } - dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE); + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_FROM_DEVICE); ret = dma_mapping_error(dma_dev, dst_addr); if (ret) { dev_err(dev, "Failed to map remote memory\n"); goto err_unlock; } - desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM, + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size, + DMA_DEV_TO_MEM, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!desc) { dev_err(dev, "Failed to prepare DMA\n"); @@ -332,15 +369,15 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, } err_unmap: - dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE); + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE); err_unlock: mutex_unlock(&epf_mhi->lock); return ret; } -static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from, - u64 to, size_t size) +static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) { struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); struct device *dma_dev = epf_mhi->epf->epc->dev.parent; @@ -353,13 +390,13 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from, dma_addr_t src_addr; int ret; - if (size < SZ_4K) - return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size); + if (buf_info->size < SZ_4K) + return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info); mutex_lock(&epf_mhi->lock); config.direction = DMA_MEM_TO_DEV; - config.dst_addr = to; + config.dst_addr = buf_info->host_addr; ret = dmaengine_slave_config(chan, &config); if (ret) { @@ -367,14 +404,16 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from, goto err_unlock; } - src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE); + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_TO_DEVICE); ret = dma_mapping_error(dma_dev, src_addr); if (ret) { dev_err(dev, "Failed to map remote memory\n"); goto err_unlock; } - desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV, + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size, + DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!desc) { dev_err(dev, "Failed to prepare DMA\n"); @@ -401,7 +440,199 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from, } err_unmap: - dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE); + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE); +err_unlock: + mutex_unlock(&epf_mhi->lock); + + return ret; +} + +static void pci_epf_mhi_dma_worker(struct work_struct *work) +{ + struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work); + struct device *dma_dev = epf_mhi->epf->epc->dev.parent; + struct pci_epf_mhi_dma_transfer *itr, *tmp; + struct mhi_ep_buf_info *buf_info; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&epf_mhi->list_lock, flags); + list_splice_tail_init(&epf_mhi->dma_list, &head); + spin_unlock_irqrestore(&epf_mhi->list_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir); + buf_info = &itr->buf_info; + buf_info->cb(buf_info); + kfree(itr); + } +} + +static void pci_epf_mhi_dma_async_callback(void *param) +{ + struct pci_epf_mhi_dma_transfer *transfer = param; + struct pci_epf_mhi *epf_mhi = transfer->epf_mhi; + + spin_lock(&epf_mhi->list_lock); + list_add_tail(&transfer->node, &epf_mhi->dma_list); + spin_unlock(&epf_mhi->list_lock); + + queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work); +} + +static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) +{ + struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); + struct device *dma_dev = epf_mhi->epf->epc->dev.parent; + struct pci_epf_mhi_dma_transfer *transfer = NULL; + struct dma_chan *chan = epf_mhi->dma_chan_rx; + struct device *dev = &epf_mhi->epf->dev; + DECLARE_COMPLETION_ONSTACK(complete); + struct dma_async_tx_descriptor *desc; + struct dma_slave_config config = {}; + dma_cookie_t cookie; + dma_addr_t dst_addr; + int ret; + + mutex_lock(&epf_mhi->lock); + + config.direction = DMA_DEV_TO_MEM; + config.src_addr = buf_info->host_addr; + + ret = dmaengine_slave_config(chan, &config); + if (ret) { + dev_err(dev, "Failed to configure DMA channel\n"); + goto err_unlock; + } + + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_FROM_DEVICE); + ret = dma_mapping_error(dma_dev, dst_addr); + if (ret) { + dev_err(dev, "Failed to map remote memory\n"); + goto err_unlock; + } + + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size, + DMA_DEV_TO_MEM, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "Failed to prepare DMA\n"); + ret = -EIO; + goto err_unmap; + } + + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL); + if (!transfer) { + ret = -ENOMEM; + goto err_unmap; + } + + transfer->epf_mhi = epf_mhi; + transfer->paddr = dst_addr; + transfer->size = buf_info->size; + transfer->dir = DMA_FROM_DEVICE; + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info)); + + desc->callback = pci_epf_mhi_dma_async_callback; + desc->callback_param = transfer; + + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(dev, "Failed to do DMA submit\n"); + goto err_free_transfer; + } + + dma_async_issue_pending(chan); + + goto err_unlock; + +err_free_transfer: + kfree(transfer); +err_unmap: + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE); +err_unlock: + mutex_unlock(&epf_mhi->lock); + + return ret; +} + +static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) +{ + struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); + struct device *dma_dev = epf_mhi->epf->epc->dev.parent; + struct pci_epf_mhi_dma_transfer *transfer = NULL; + struct dma_chan *chan = epf_mhi->dma_chan_tx; + struct device *dev = &epf_mhi->epf->dev; + DECLARE_COMPLETION_ONSTACK(complete); + struct dma_async_tx_descriptor *desc; + struct dma_slave_config config = {}; + dma_cookie_t cookie; + dma_addr_t src_addr; + int ret; + + mutex_lock(&epf_mhi->lock); + + config.direction = DMA_MEM_TO_DEV; + config.dst_addr = buf_info->host_addr; + + ret = dmaengine_slave_config(chan, &config); + if (ret) { + dev_err(dev, "Failed to configure DMA channel\n"); + goto err_unlock; + } + + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_TO_DEVICE); + ret = dma_mapping_error(dma_dev, src_addr); + if (ret) { + dev_err(dev, "Failed to map remote memory\n"); + goto err_unlock; + } + + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size, + DMA_MEM_TO_DEV, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "Failed to prepare DMA\n"); + ret = -EIO; + goto err_unmap; + } + + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL); + if (!transfer) { + ret = -ENOMEM; + goto err_unmap; + } + + transfer->epf_mhi = epf_mhi; + transfer->paddr = src_addr; + transfer->size = buf_info->size; + transfer->dir = DMA_TO_DEVICE; + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info)); + + desc->callback = pci_epf_mhi_dma_async_callback; + desc->callback_param = transfer; + + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(dev, "Failed to do DMA submit\n"); + goto err_free_transfer; + } + + dma_async_issue_pending(chan); + + goto err_unlock; + +err_free_transfer: + kfree(transfer); +err_unmap: + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE); err_unlock: mutex_unlock(&epf_mhi->lock); @@ -431,6 +662,7 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi) struct device *dev = &epf_mhi->epf->dev; struct epf_dma_filter filter; dma_cap_mask_t mask; + int ret; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -449,16 +681,35 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi) &filter); if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) { dev_err(dev, "Failed to request rx channel\n"); - dma_release_channel(epf_mhi->dma_chan_tx); - epf_mhi->dma_chan_tx = NULL; - return -ENODEV; + ret = -ENODEV; + goto err_release_tx; } + epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0); + if (!epf_mhi->dma_wq) { + ret = -ENOMEM; + goto err_release_rx; + } + + INIT_LIST_HEAD(&epf_mhi->dma_list); + INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker); + spin_lock_init(&epf_mhi->list_lock); + return 0; + +err_release_rx: + dma_release_channel(epf_mhi->dma_chan_rx); + epf_mhi->dma_chan_rx = NULL; +err_release_tx: + dma_release_channel(epf_mhi->dma_chan_tx); + epf_mhi->dma_chan_tx = NULL; + + return ret; } static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi) { + destroy_workqueue(epf_mhi->dma_wq); dma_release_channel(epf_mhi->dma_chan_tx); dma_release_channel(epf_mhi->dma_chan_rx); epf_mhi->dma_chan_tx = NULL; @@ -531,12 +782,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf) mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq; mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map; mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free; + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read; + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write; if (info->flags & MHI_EPF_USE_DMA) { - mhi_cntrl->read_from_host = pci_epf_mhi_edma_read; - mhi_cntrl->write_to_host = pci_epf_mhi_edma_write; - } else { - mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read; - mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write; + mhi_cntrl->read_sync = pci_epf_mhi_edma_read; + mhi_cntrl->write_sync = pci_epf_mhi_edma_write; + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async; + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async; } /* Register the MHI EP controller */ @@ -644,7 +896,7 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf) pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar); } -static struct pci_epc_event_ops pci_epf_mhi_event_ops = { +static const struct pci_epc_event_ops pci_epf_mhi_event_ops = { .core_init = pci_epf_mhi_core_init, .link_up = pci_epf_mhi_link_up, .link_down = pci_epf_mhi_link_down, @@ -677,12 +929,13 @@ static int pci_epf_mhi_probe(struct pci_epf *epf, } static const struct pci_epf_device_id pci_epf_mhi_ids[] = { - { .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info }, - { .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info }, + { .name = "pci_epf_mhi_sa8775p", .driver_data = (kernel_ulong_t)&sa8775p_info }, + { .name = "pci_epf_mhi_sdx55", .driver_data = (kernel_ulong_t)&sdx55_info }, + { .name = "pci_epf_mhi_sm8450", .driver_data = (kernel_ulong_t)&sm8450_info }, {}, }; -static struct pci_epf_ops pci_epf_mhi_ops = { +static const struct pci_epf_ops pci_epf_mhi_ops = { .unbind = pci_epf_mhi_unbind, .bind = pci_epf_mhi_bind, }; diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c index 9aac2c6f3bb9..e01a98e74d21 100644 --- a/drivers/pci/endpoint/functions/pci-epf-ntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c @@ -140,9 +140,9 @@ static struct pci_epf_header epf_ntb_header = { static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) { enum pci_epc_interface_type type; - enum pci_epc_irq_type irq_type; struct epf_ntb_epc *ntb_epc; struct epf_ntb_ctrl *ctrl; + unsigned int irq_type; struct pci_epc *epc; u8 func_no, vfunc_no; bool is_msix; @@ -159,7 +159,7 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) ctrl->link_status |= LINK_STATUS_UP; else ctrl->link_status &= ~LINK_STATUS_UP; - irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI; + irq_type = is_msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1); if (ret) { dev_err(&epc->dev, @@ -1012,13 +1012,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb, epc_features = ntb_epc->epc_features; barno = ntb_epc->epf_ntb_bar[BAR_CONFIG]; - size = epc_features->bar_fixed_size[barno]; + size = epc_features->bar[barno].fixed_size; align = epc_features->align; peer_ntb_epc = ntb->epc[!type]; peer_epc_features = peer_ntb_epc->epc_features; peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD]; - peer_size = peer_epc_features->bar_fixed_size[peer_barno]; + peer_size = peer_epc_features->bar[peer_barno].fixed_size; /* Check if epc_features is populated incorrectly */ if ((!IS_ALIGNED(size, align))) @@ -1067,7 +1067,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb, else if (size < ctrl_size + spad_size) return -EINVAL; - base = pci_epf_alloc_space(epf, size, barno, align, type); + base = pci_epf_alloc_space(epf, size, barno, epc_features, type); if (!base) { dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n", pci_epc_interface_string(type)); @@ -2099,7 +2099,7 @@ static int epf_ntb_probe(struct pci_epf *epf, return 0; } -static struct pci_epf_ops epf_ntb_ops = { +static const struct pci_epf_ops epf_ntb_ops = { .bind = epf_ntb_bind, .unbind = epf_ntb_unbind, .add_cfs = epf_ntb_add_cfs, diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 1f0d2b84296a..cd4ffb39dcdc 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -19,11 +19,11 @@ #include <linux/pci-epf.h> #include <linux/pci_regs.h> -#define IRQ_TYPE_LEGACY 0 +#define IRQ_TYPE_INTX 0 #define IRQ_TYPE_MSI 1 #define IRQ_TYPE_MSIX 2 -#define COMMAND_RAISE_LEGACY_IRQ BIT(0) +#define COMMAND_RAISE_INTX_IRQ BIT(0) #define COMMAND_RAISE_MSI_IRQ BIT(1) #define COMMAND_RAISE_MSIX_IRQ BIT(2) #define COMMAND_READ BIT(3) @@ -600,9 +600,9 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, WRITE_ONCE(reg->status, status); switch (reg->irq_type) { - case IRQ_TYPE_LEGACY: + case IRQ_TYPE_INTX: pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, - PCI_EPC_IRQ_LEGACY, 0); + PCI_IRQ_INTX, 0); break; case IRQ_TYPE_MSI: count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no); @@ -612,7 +612,7 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, return; } pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, - PCI_EPC_IRQ_MSI, reg->irq_number); + PCI_IRQ_MSI, reg->irq_number); break; case IRQ_TYPE_MSIX: count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no); @@ -622,7 +622,7 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, return; } pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, - PCI_EPC_IRQ_MSIX, reg->irq_number); + PCI_IRQ_MSIX, reg->irq_number); break; default: dev_err(dev, "Failed to raise IRQ, unknown type\n"); @@ -659,7 +659,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) } switch (command) { - case COMMAND_RAISE_LEGACY_IRQ: + case COMMAND_RAISE_INTX_IRQ: case COMMAND_RAISE_MSI_IRQ: case COMMAND_RAISE_MSIX_IRQ: pci_epf_test_raise_irq(epf_test, reg); @@ -729,7 +729,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) */ add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; - if (!!(epc_features->reserved_bar & (1 << bar))) + if (epc_features->bar[bar].type == BAR_RESERVED) continue; ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, @@ -841,14 +841,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) } test_reg_size = test_reg_bar_size + msix_table_size + pba_size; - if (epc_features->bar_fixed_size[test_reg_bar]) { - if (test_reg_size > bar_size[test_reg_bar]) - return -ENOMEM; - test_reg_size = bar_size[test_reg_bar]; - } - base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, - epc_features->align, PRIMARY_INTERFACE); + epc_features, PRIMARY_INTERFACE); if (!base) { dev_err(dev, "Failed to allocated register space\n"); return -ENOMEM; @@ -862,12 +856,11 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) if (bar == test_reg_bar) continue; - if (!!(epc_features->reserved_bar & (1 << bar))) + if (epc_features->bar[bar].type == BAR_RESERVED) continue; base = pci_epf_alloc_space(epf, bar_size[bar], bar, - epc_features->align, - PRIMARY_INTERFACE); + epc_features, PRIMARY_INTERFACE); if (!base) dev_err(dev, "Failed to allocate space for BAR%d\n", bar); @@ -881,16 +874,12 @@ static void pci_epf_configure_bar(struct pci_epf *epf, const struct pci_epc_features *epc_features) { struct pci_epf_bar *epf_bar; - bool bar_fixed_64bit; int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) { epf_bar = &epf->bar[i]; - bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i)); - if (bar_fixed_64bit) + if (epc_features->bar[i].only_64bit) epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; - if (epc_features->bar_fixed_size[i]) - bar_size[i] = epc_features->bar_fixed_size[i]; } } @@ -973,7 +962,7 @@ static int pci_epf_test_probe(struct pci_epf *epf, return 0; } -static struct pci_epf_ops ops = { +static const struct pci_epf_ops ops = { .unbind = pci_epf_test_unbind, .bind = pci_epf_test_bind, }; diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c index 3f60128560ed..8e779eecd62d 100644 --- a/drivers/pci/endpoint/functions/pci-epf-vntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -422,7 +422,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) epf->func_no, epf->vfunc_no); barno = ntb->epf_ntb_bar[BAR_CONFIG]; - size = epc_features->bar_fixed_size[barno]; + size = epc_features->bar[barno].fixed_size; align = epc_features->align; if ((!IS_ALIGNED(size, align))) @@ -446,7 +446,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) else if (size < ctrl_size + spad_size) return -EINVAL; - base = pci_epf_alloc_space(epf, size, barno, align, 0); + base = pci_epf_alloc_space(epf, size, barno, epc_features, 0); if (!base) { dev_err(dev, "Config/Status/SPAD alloc region fail\n"); return -ENOMEM; @@ -527,7 +527,6 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb) static int epf_ntb_db_bar_init(struct epf_ntb *ntb) { const struct pci_epc_features *epc_features; - u32 align; struct device *dev = &ntb->epf->dev; int ret; struct pci_epf_bar *epf_bar; @@ -538,19 +537,9 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb) epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no); - align = epc_features->align; - - if (size < 128) - size = 128; - - if (align) - size = ALIGN(size, align); - else - size = roundup_pow_of_two(size); - barno = ntb->epf_ntb_bar[BAR_DB]; - mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0); + mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, epc_features, 0); if (!mw_addr) { dev_err(dev, "Failed to allocate OB address\n"); return -ENOMEM; @@ -1172,11 +1161,8 @@ static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits) func_no = ntb->epf->func_no; vfunc_no = ntb->epf->vfunc_no; - ret = pci_epc_raise_irq(ntb->epf->epc, - func_no, - vfunc_no, - PCI_EPC_IRQ_MSI, - interrupt_num + 1); + ret = pci_epc_raise_irq(ntb->epf->epc, func_no, vfunc_no, + PCI_IRQ_MSI, interrupt_num + 1); if (ret) dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n"); @@ -1272,21 +1258,17 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) { dev_err(dev, "Cannot set DMA mask\n"); - return -EINVAL; + return ret; } ret = ntb_register_device(&ndev->ntb); if (ret) { dev_err(dev, "Failed to register NTB device\n"); - goto err_register_dev; + return ret; } dev_dbg(dev, "PCI Virtual NTB driver loaded\n"); return 0; - -err_register_dev: - put_device(&ndev->ntb.dev); - return -EINVAL; } static struct pci_device_id pci_vntb_table[] = { @@ -1387,7 +1369,7 @@ static void epf_ntb_unbind(struct pci_epf *epf) } // EPF driver probe -static struct pci_epf_ops epf_ntb_ops = { +static const struct pci_epf_ops epf_ntb_ops = { .bind = epf_ntb_bind, .unbind = epf_ntb_unbind, .add_cfs = epf_ntb_add_cfs, diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 56e1184bc6c2..da3fc0795b0b 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -87,7 +87,7 @@ EXPORT_SYMBOL_GPL(pci_epc_get); * @epc_features: pci_epc_features structure that holds the reserved bar bitmap * * Invoke to get the first unreserved BAR that can be used by the endpoint - * function. For any incorrect value in reserved_bar return '0'. + * function. */ enum pci_barno pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features) @@ -102,32 +102,27 @@ EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar); * @bar: the starting BAR number from where unreserved BAR should be searched * * Invoke to get the next unreserved BAR starting from @bar that can be used - * for endpoint function. For any incorrect value in reserved_bar return '0'. + * for endpoint function. */ enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features *epc_features, enum pci_barno bar) { - unsigned long free_bar; + int i; if (!epc_features) return BAR_0; /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */ - if ((epc_features->bar_fixed_64bit << 1) & 1 << bar) + if (bar > 0 && epc_features->bar[bar - 1].only_64bit) bar++; - /* Find if the reserved BAR is also a 64-bit BAR */ - free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit; - - /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */ - free_bar <<= 1; - free_bar |= epc_features->reserved_bar; - - free_bar = find_next_zero_bit(&free_bar, 6, bar); - if (free_bar > 5) - return NO_BAR; + for (i = bar; i < PCI_STD_NUM_BARS; i++) { + /* If the BAR is not reserved, return it. */ + if (epc_features->bar[i].type != BAR_RESERVED) + return i; + } - return free_bar; + return NO_BAR; } EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar); @@ -211,13 +206,13 @@ EXPORT_SYMBOL_GPL(pci_epc_start); * @epc: the EPC device which has to interrupt the host * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function - * @type: specify the type of interrupt; legacy, MSI or MSI-X + * @type: specify the type of interrupt; INTX, MSI or MSI-X * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N) * - * Invoke to raise an legacy, MSI or MSI-X interrupt + * Invoke to raise an INTX, MSI or MSI-X interrupt */ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { int ret; diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 2c32de667937..0a28a0b0911b 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -17,7 +17,7 @@ static DEFINE_MUTEX(pci_epf_mutex); -static struct bus_type pci_epf_bus_type; +static const struct bus_type pci_epf_bus_type; static const struct device_type pci_epf_type; /** @@ -251,14 +251,17 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space); * @epf: the EPF device to whom allocate the memory * @size: the size of the memory that has to be allocated * @bar: the BAR number corresponding to the allocated register space - * @align: alignment size for the allocation region + * @epc_features: the features provided by the EPC specific to this EPF * @type: Identifies if the allocation is for primary EPC or secondary EPC * * Invoke to allocate memory for the PCI EPF register space. */ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, - size_t align, enum pci_epc_interface_type type) + const struct pci_epc_features *epc_features, + enum pci_epc_interface_type type) { + u64 bar_fixed_size = epc_features->bar[bar].fixed_size; + size_t align = epc_features->align; struct pci_epf_bar *epf_bar; dma_addr_t phys_addr; struct pci_epc *epc; @@ -268,6 +271,15 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, if (size < 128) size = 128; + if (epc_features->bar[bar].type == BAR_FIXED && bar_fixed_size) { + if (size > bar_fixed_size) { + dev_err(&epf->dev, + "requested BAR size is larger than fixed size\n"); + return NULL; + } + size = bar_fixed_size; + } + if (align) size = ALIGN(size, align); else @@ -507,7 +519,7 @@ static void pci_epf_device_remove(struct device *dev) epf->driver = NULL; } -static struct bus_type pci_epf_bus_type = { +static const struct bus_type pci_epf_bus_type = { .name = "pci-epf", .match = pci_epf_device_match, .probe = pci_epf_device_probe, diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 601129772b2d..5b1f271c6034 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -512,15 +512,12 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); pcibios_resource_survey_bus(dev->subordinate); - if (pci_is_root_bus(bus)) - __pci_bus_size_bridges(dev->subordinate, &add_list); + __pci_bus_size_bridges(dev->subordinate, + &add_list); } } } - if (pci_is_root_bus(bus)) - __pci_bus_assign_resources(bus, &add_list, NULL); - else - pci_assign_unassigned_bridge_resources(bus->self); + __pci_bus_assign_resources(bus, &add_list, NULL); } acpiphp_sanitize_bus(bus); diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index a89b7de72dcf..7333b305f2a5 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -26,58 +26,79 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) hotplug_slot); int rc; - if (zdev->state != ZPCI_FN_STATE_STANDBY) - return -EIO; + mutex_lock(&zdev->state_lock); + if (zdev->state != ZPCI_FN_STATE_STANDBY) { + rc = -EIO; + goto out; + } rc = sclp_pci_configure(zdev->fid); zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, rc); if (rc) - return rc; + goto out; zdev->state = ZPCI_FN_STATE_CONFIGURED; - return zpci_scan_configured_device(zdev, zdev->fh); + rc = zpci_scan_configured_device(zdev, zdev->fh); +out: + mutex_unlock(&zdev->state_lock); + return rc; } static int disable_slot(struct hotplug_slot *hotplug_slot) { struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, hotplug_slot); - struct pci_dev *pdev; + struct pci_dev *pdev = NULL; + int rc; - if (zdev->state != ZPCI_FN_STATE_CONFIGURED) - return -EIO; + mutex_lock(&zdev->state_lock); + if (zdev->state != ZPCI_FN_STATE_CONFIGURED) { + rc = -EIO; + goto out; + } pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); if (pdev && pci_num_vf(pdev)) { pci_dev_put(pdev); - return -EBUSY; + rc = -EBUSY; + goto out; } - pci_dev_put(pdev); - return zpci_deconfigure_device(zdev); + rc = zpci_deconfigure_device(zdev); +out: + mutex_unlock(&zdev->state_lock); + if (pdev) + pci_dev_put(pdev); + return rc; } static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe) { struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, hotplug_slot); + int rc = -EIO; - if (zdev->state != ZPCI_FN_STATE_CONFIGURED) - return -EIO; /* - * We can't take the zdev->lock as reset_slot may be called during - * probing and/or device removal which already happens under the - * zdev->lock. Instead the user should use the higher level - * pci_reset_function() or pci_bus_reset() which hold the PCI device - * lock preventing concurrent removal. If not using these functions - * holding the PCI device lock is required. + * If we can't get the zdev->state_lock the device state is + * currently undergoing a transition and we bail out - just + * the same as if the device's state is not configured at all. */ + if (!mutex_trylock(&zdev->state_lock)) + return rc; - /* As long as the function is configured we can reset */ - if (probe) - return 0; + /* We can reset only if the function is configured */ + if (zdev->state != ZPCI_FN_STATE_CONFIGURED) + goto out; + + if (probe) { + rc = 0; + goto out; + } - return zpci_hot_reset_device(zdev); + rc = zpci_hot_reset_device(zdev); +out: + mutex_unlock(&zdev->state_lock); + return rc; } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c new file mode 100644 index 000000000000..c9725428e387 --- /dev/null +++ b/drivers/pci/iomap.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Implement the default iomap interfaces + * + * (C) Copyright 2004 Linus Torvalds + */ +#include <linux/pci.h> +#include <linux/io.h> + +#include <linux/export.h> + +/** + * pci_iomap_range - create a virtual mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @offset: map memory at the given offset in BAR + * @maxlen: max length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR from offset to the end, pass %0 here. + * */ +void __iomem *pci_iomap_range(struct pci_dev *dev, + int bar, + unsigned long offset, + unsigned long maxlen) +{ + resource_size_t start = pci_resource_start(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); + unsigned long flags = pci_resource_flags(dev, bar); + + if (len <= offset || !start) + return NULL; + len -= offset; + start += offset; + if (maxlen && len > maxlen) + len = maxlen; + if (flags & IORESOURCE_IO) + return __pci_ioport_map(dev, start, len); + if (flags & IORESOURCE_MEM) + return ioremap(start, len); + /* What? */ + return NULL; +} +EXPORT_SYMBOL(pci_iomap_range); + +/** + * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @offset: map memory at the given offset in BAR + * @maxlen: max length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. When possible write combining + * is used. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR from offset to the end, pass %0 here. + * */ +void __iomem *pci_iomap_wc_range(struct pci_dev *dev, + int bar, + unsigned long offset, + unsigned long maxlen) +{ + resource_size_t start = pci_resource_start(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); + unsigned long flags = pci_resource_flags(dev, bar); + + + if (flags & IORESOURCE_IO) + return NULL; + + if (len <= offset || !start) + return NULL; + + len -= offset; + start += offset; + if (maxlen && len > maxlen) + len = maxlen; + + if (flags & IORESOURCE_MEM) + return ioremap_wc(start, len); + + /* What? */ + return NULL; +} +EXPORT_SYMBOL_GPL(pci_iomap_wc_range); + +/** + * pci_iomap - create a virtual mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @maxlen: length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR without checking for its length first, pass %0 here. + * */ +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_range(dev, bar, 0, maxlen); +} +EXPORT_SYMBOL(pci_iomap); + +/** + * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @maxlen: length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. When possible write combining + * is used. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR without checking for its length first, pass %0 here. + * */ +void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_wc_range(dev, bar, 0, maxlen); +} +EXPORT_SYMBOL_GPL(pci_iomap_wc); + +/* + * pci_iounmap() somewhat illogically comes from lib/iomap.c for the + * CONFIG_GENERIC_IOMAP case, because that's the code that knows about + * the different IOMAP ranges. + * + * But if the architecture does not use the generic iomap code, and if + * it has _not_ defined it's own private pci_iounmap function, we define + * it here. + * + * NOTE! This default implementation assumes that if the architecture + * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will + * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [, + * and does not need unmapping with 'ioport_unmap()'. + * + * If you have different rules for your architecture, you need to + * implement your own pci_iounmap() that knows the rules for where + * and how IO vs MEM get mapped. + * + * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes + * from legacy <asm-generic/io.h> header file behavior. In particular, + * it would seem to make sense to do the iounmap(p) for the non-IO-space + * case here regardless, but that's not what the old header file code + * did. Probably incorrectly, but this is meant to be bug-for-bug + * compatible. + */ +#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP) + +void pci_iounmap(struct pci_dev *dev, void __iomem *p) +{ +#ifdef ARCH_HAS_GENERIC_IOPORT_MAP + uintptr_t start = (uintptr_t) PCI_IOBASE; + uintptr_t addr = (uintptr_t) p; + + if (addr >= start && addr < start + IO_SPACE_LIMIT) + return; +#endif + iounmap(p); +} +EXPORT_SYMBOL(pci_iounmap); + +#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */ diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 25dbe85c4217..aaa33e8dc4c9 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -745,6 +745,7 @@ static int sriov_init(struct pci_dev *dev, int pos) u16 ctrl, total; struct pci_sriov *iov; struct resource *res; + const char *res_name; struct pci_dev *pdev; pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl); @@ -785,6 +786,8 @@ found: nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &dev->resource[i + PCI_IOV_RESOURCES]; + res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES); + /* * If it is already FIXED, don't change it, something * (perhaps EA or header fixups) wants it this way. @@ -802,8 +805,8 @@ found: } iov->barsz[i] = resource_size(res); res->end = res->start + resource_size(res) * total - 1; - pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n", - i, res, i, total); + pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n", + res_name, res, i, total); i += bar64; nres++; } diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index 0050e8f6814e..4555630be9ec 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -8,9 +8,13 @@ #include <linux/device.h> #include <linux/kernel.h> +#include <linux/errno.h> #include <linux/export.h> +#include <linux/interrupt.h> #include <linux/pci.h> +#include "pci.h" + /** * pci_request_irq - allocate an interrupt line for a PCI device * @dev: PCI device to operate on @@ -74,3 +78,203 @@ void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id) kfree(free_irq(pci_irq_vector(dev, nr), dev_id)); } EXPORT_SYMBOL(pci_free_irq); + +/** + * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge + * @dev: the PCI device + * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) + * + * Perform INTx swizzling for a device behind one level of bridge. This is + * required by section 9.1 of the PCI-to-PCI bridge specification for devices + * behind bridges on add-in cards. For devices with ARI enabled, the slot + * number is always 0 (see the Implementation Note in section 2.2.8.1 of + * the PCI Express Base Specification, Revision 2.1) + */ +u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin) +{ + int slot; + + if (pci_ari_enabled(dev->bus)) + slot = 0; + else + slot = PCI_SLOT(dev->devfn); + + return (((pin - 1) + slot) % 4) + 1; +} + +int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) +{ + u8 pin; + + pin = dev->pin; + if (!pin) + return -1; + + while (!pci_is_root_bus(dev->bus)) { + pin = pci_swizzle_interrupt_pin(dev, pin); + dev = dev->bus->self; + } + *bridge = dev; + return pin; +} + +/** + * pci_common_swizzle - swizzle INTx all the way to root bridge + * @dev: the PCI device + * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) + * + * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI + * bridges all the way up to a PCI root bus. + */ +u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) +{ + u8 pin = *pinp; + + while (!pci_is_root_bus(dev->bus)) { + pin = pci_swizzle_interrupt_pin(dev, pin); + dev = dev->bus->self; + } + *pinp = pin; + return PCI_SLOT(dev->devfn); +} +EXPORT_SYMBOL_GPL(pci_common_swizzle); + +void pci_assign_irq(struct pci_dev *dev) +{ + u8 pin; + u8 slot = -1; + int irq = 0; + struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus); + + if (!(hbrg->map_irq)) { + pci_dbg(dev, "runtime IRQ mapping not provided by arch\n"); + return; + } + + /* + * If this device is not on the primary bus, we need to figure out + * which interrupt pin it will come in on. We know which slot it + * will come in on because that slot is where the bridge is. Each + * time the interrupt line passes through a PCI-PCI bridge we must + * apply the swizzle function. + */ + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + /* Cope with illegal. */ + if (pin > 4) + pin = 1; + + if (pin) { + /* Follow the chain of bridges, swizzling as we go. */ + if (hbrg->swizzle_irq) + slot = (*(hbrg->swizzle_irq))(dev, &pin); + + /* + * If a swizzling function is not used, map_irq() must + * ignore slot. + */ + irq = (*(hbrg->map_irq))(dev, slot, pin); + if (irq == -1) + irq = 0; + } + dev->irq = irq; + + pci_dbg(dev, "assign IRQ: got %d\n", dev->irq); + + /* + * Always tell the device, so the driver knows what is the real IRQ + * to use; the device does not use it. + */ + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); +} + +static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) +{ + struct pci_bus *bus = dev->bus; + bool mask_updated = true; + u32 cmd_status_dword; + u16 origcmd, newcmd; + unsigned long flags; + bool irq_pending; + + /* + * We do a single dword read to retrieve both command and status. + * Document assumptions that make this possible. + */ + BUILD_BUG_ON(PCI_COMMAND % 4); + BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); + + raw_spin_lock_irqsave(&pci_lock, flags); + + bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword); + + irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT; + + /* + * Check interrupt status register to see whether our device + * triggered the interrupt (when masking) or the next IRQ is + * already pending (when unmasking). + */ + if (mask != irq_pending) { + mask_updated = false; + goto done; + } + + origcmd = cmd_status_dword; + newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE; + if (mask) + newcmd |= PCI_COMMAND_INTX_DISABLE; + if (newcmd != origcmd) + bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd); + +done: + raw_spin_unlock_irqrestore(&pci_lock, flags); + + return mask_updated; +} + +/** + * pci_check_and_mask_intx - mask INTx on pending interrupt + * @dev: the PCI device to operate on + * + * Check if the device dev has its INTx line asserted, mask it and return + * true in that case. False is returned if no interrupt was pending. + */ +bool pci_check_and_mask_intx(struct pci_dev *dev) +{ + return pci_check_and_set_intx_mask(dev, true); +} +EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); + +/** + * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending + * @dev: the PCI device to operate on + * + * Check if the device dev has its INTx line asserted, unmask it if not and + * return true. False is returned and the mask remains active if there was + * still an interrupt pending. + */ +bool pci_check_and_unmask_intx(struct pci_dev *dev) +{ + return pci_check_and_set_intx_mask(dev, false); +} +EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); + +/** + * pcibios_penalize_isa_irq - penalize an ISA IRQ + * @irq: ISA IRQ to penalize + * @active: IRQ active or not + * + * Permits the platform to provide architecture-specific functionality when + * penalizing ISA IRQs. This is the default implementation. Architecture + * implementations can override this. + */ +void __weak pcibios_penalize_isa_irq(int irq, int active) {} + +int __weak pcibios_alloc_irq(struct pci_dev *dev) +{ + return 0; +} + +void __weak pcibios_free_irq(struct pci_dev *dev) +{ +} diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c index 4504039056d1..8da3347a95c4 100644 --- a/drivers/pci/mmap.c +++ b/drivers/pci/mmap.c @@ -11,6 +11,8 @@ #include <linux/mm.h> #include <linux/pci.h> +#include "pci.h" + #ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE static const struct vm_operations_struct pci_phys_vm_ops = { @@ -50,3 +52,30 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar, } #endif + +#if (defined(CONFIG_SYSFS) || defined(CONFIG_PROC_FS)) && \ + (defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) + +int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, + enum pci_mmap_api mmap_api) +{ + resource_size_t pci_start = 0, pci_end; + unsigned long nr, start, size; + + if (pci_resource_len(pdev, resno) == 0) + return 0; + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; + if (mmap_api == PCI_MMAP_PROCFS) { + pci_resource_to_user(pdev, resno, &pdev->resource[resno], + &pci_start, &pci_end); + pci_start >>= PAGE_SHIFT; + } + if (start >= pci_start && start < pci_start + size && + start + nr <= pci_start + size) + return 1; + return 0; +} + +#endif diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c index c8be056c248d..cfd84a899c82 100644 --- a/drivers/pci/msi/irqdomain.c +++ b/drivers/pci/msi/irqdomain.c @@ -61,7 +61,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc) return (irq_hw_number_t)desc->msi_index | pci_dev_id(dev) << 11 | - (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; + ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27; } static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 0c361561b855..4f47a13cb500 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -661,7 +661,7 @@ done: p2pdma = rcu_dereference(provider->p2pdma); if (p2pdma) xa_store(&p2pdma->map_types, map_types_idx(client), - xa_mk_value(map_type), GFP_KERNEL); + xa_mk_value(map_type), GFP_ATOMIC); rcu_read_unlock(); return map_type; } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 51ec9e7e784f..af2996d0d17f 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -419,15 +419,6 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) return error; } -int __weak pcibios_alloc_irq(struct pci_dev *dev) -{ - return 0; -} - -void __weak pcibios_free_irq(struct pci_dev *dev) -{ -} - #ifdef CONFIG_PCI_IOV static inline bool pci_device_can_probe(struct pci_dev *pdev) { @@ -473,6 +464,13 @@ static void pci_device_remove(struct device *dev) if (drv->remove) { pm_runtime_get_sync(dev); + /* + * If the driver provides a .runtime_idle() callback and it has + * started to run already, it may continue to run in parallel + * with the code below, so wait until all of the runtime PM + * activity has completed. + */ + pm_runtime_barrier(dev); drv->remove(pci_dev); pm_runtime_put_noidle(dev); } @@ -1382,10 +1380,7 @@ static int pci_pm_runtime_idle(struct device *dev) if (!pci_dev->driver) return 0; - if (!pm) - return -ENOSYS; - - if (pm->runtime_idle) + if (pm && pm->runtime_idle) return pm->runtime_idle(dev); return 0; @@ -1714,7 +1709,7 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) return 1; } -struct bus_type pcie_port_bus_type = { +const struct bus_type pcie_port_bus_type = { .name = "pci_express", .match = pcie_port_bus_match, }; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 8e6d1031510b..40cfa716392f 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -831,6 +831,19 @@ static const struct attribute_group pci_dev_config_attr_group = { .is_bin_visible = pci_dev_config_attr_is_visible, }; +/* + * llseek operation for mmappable PCI resources. + * May be left unused if the arch doesn't provide them. + */ +static __maybe_unused loff_t +pci_llseek_resource(struct file *filep, + struct kobject *kobj __always_unused, + struct bin_attribute *attr, + loff_t offset, int whence) +{ + return fixed_size_llseek(filep, offset, whence, attr->size); +} + #ifdef HAVE_PCI_LEGACY /** * pci_read_legacy_io - read byte(s) from legacy I/O port space @@ -963,6 +976,8 @@ void pci_create_legacy_files(struct pci_bus *b) b->legacy_io->attr.mode = 0600; b->legacy_io->read = pci_read_legacy_io; b->legacy_io->write = pci_write_legacy_io; + /* See pci_create_attr() for motivation */ + b->legacy_io->llseek = pci_llseek_resource; b->legacy_io->mmap = pci_mmap_legacy_io; b->legacy_io->f_mapping = iomem_get_mapping; pci_adjust_legacy_attr(b, pci_mmap_io); @@ -977,6 +992,8 @@ void pci_create_legacy_files(struct pci_bus *b) b->legacy_mem->size = 1024*1024; b->legacy_mem->attr.mode = 0600; b->legacy_mem->mmap = pci_mmap_legacy_mem; + /* See pci_create_attr() for motivation */ + b->legacy_mem->llseek = pci_llseek_resource; b->legacy_mem->f_mapping = iomem_get_mapping; pci_adjust_legacy_attr(b, pci_mmap_mem); error = device_create_bin_file(&b->dev, b->legacy_mem); @@ -1005,29 +1022,6 @@ void pci_remove_legacy_files(struct pci_bus *b) #endif /* HAVE_PCI_LEGACY */ #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) - -int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, - enum pci_mmap_api mmap_api) -{ - unsigned long nr, start, size; - resource_size_t pci_start = 0, pci_end; - - if (pci_resource_len(pdev, resno) == 0) - return 0; - nr = vma_pages(vma); - start = vma->vm_pgoff; - size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; - if (mmap_api == PCI_MMAP_PROCFS) { - pci_resource_to_user(pdev, resno, &pdev->resource[resno], - &pci_start, &pci_end); - pci_start >>= PAGE_SHIFT; - } - if (start >= pci_start && start < pci_start + size && - start + nr <= pci_start + size) - return 1; - return 0; -} - /** * pci_mmap_resource - map a PCI resource into user memory space * @kobj: kobject for mapping @@ -1195,8 +1189,15 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) res_attr->mmap = pci_mmap_resource_uc; } } - if (res_attr->mmap) + if (res_attr->mmap) { res_attr->f_mapping = iomem_get_mapping; + /* + * generic_file_llseek() consults f_mapping->host to determine + * the file size. As iomem_inode knows nothing about the + * attribute, it's not going to work, so override it as well. + */ + res_attr->llseek = pci_llseek_resource; + } res_attr->attr.name = res_attr_name; res_attr->attr.mode = 0600; res_attr->size = pci_resource_len(pdev, num); @@ -1386,79 +1387,89 @@ static const struct attribute_group pci_dev_reset_attr_group = { .is_visible = pci_dev_reset_attr_is_visible, }; +static ssize_t __resource_resize_show(struct device *dev, int n, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + ssize_t ret; + + pci_config_pm_runtime_get(pdev); + + ret = sysfs_emit(buf, "%016llx\n", + (u64)pci_rebar_get_possible_sizes(pdev, n)); + + pci_config_pm_runtime_put(pdev); + + return ret; +} + +static ssize_t __resource_resize_store(struct device *dev, int n, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + unsigned long size, flags; + int ret, i; + u16 cmd; + + if (kstrtoul(buf, 0, &size) < 0) + return -EINVAL; + + device_lock(dev); + if (dev->driver) { + ret = -EBUSY; + goto unlock; + } + + pci_config_pm_runtime_get(pdev); + + if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { + ret = aperture_remove_conflicting_pci_devices(pdev, + "resourceN_resize"); + if (ret) + goto pm_put; + } + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + pci_write_config_word(pdev, PCI_COMMAND, + cmd & ~PCI_COMMAND_MEMORY); + + flags = pci_resource_flags(pdev, n); + + pci_remove_resource_files(pdev); + + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) && + pci_resource_flags(pdev, i) == flags) + pci_release_resource(pdev, i); + } + + ret = pci_resize_resource(pdev, n, size); + + pci_assign_unassigned_bus_resources(pdev->bus); + + if (pci_create_resource_files(pdev)) + pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n"); + + pci_write_config_word(pdev, PCI_COMMAND, cmd); +pm_put: + pci_config_pm_runtime_put(pdev); +unlock: + device_unlock(dev); + + return ret ? ret : count; +} + #define pci_dev_resource_resize_attr(n) \ static ssize_t resource##n##_resize_show(struct device *dev, \ struct device_attribute *attr, \ - char * buf) \ + char *buf) \ { \ - struct pci_dev *pdev = to_pci_dev(dev); \ - ssize_t ret; \ - \ - pci_config_pm_runtime_get(pdev); \ - \ - ret = sysfs_emit(buf, "%016llx\n", \ - (u64)pci_rebar_get_possible_sizes(pdev, n)); \ - \ - pci_config_pm_runtime_put(pdev); \ - \ - return ret; \ + return __resource_resize_show(dev, n, buf); \ } \ - \ static ssize_t resource##n##_resize_store(struct device *dev, \ struct device_attribute *attr,\ const char *buf, size_t count)\ { \ - struct pci_dev *pdev = to_pci_dev(dev); \ - unsigned long size, flags; \ - int ret, i; \ - u16 cmd; \ - \ - if (kstrtoul(buf, 0, &size) < 0) \ - return -EINVAL; \ - \ - device_lock(dev); \ - if (dev->driver) { \ - ret = -EBUSY; \ - goto unlock; \ - } \ - \ - pci_config_pm_runtime_get(pdev); \ - \ - if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \ - ret = aperture_remove_conflicting_pci_devices(pdev, \ - "resourceN_resize"); \ - if (ret) \ - goto pm_put; \ - } \ - \ - pci_read_config_word(pdev, PCI_COMMAND, &cmd); \ - pci_write_config_word(pdev, PCI_COMMAND, \ - cmd & ~PCI_COMMAND_MEMORY); \ - \ - flags = pci_resource_flags(pdev, n); \ - \ - pci_remove_resource_files(pdev); \ - \ - for (i = 0; i < PCI_STD_NUM_BARS; i++) { \ - if (pci_resource_len(pdev, i) && \ - pci_resource_flags(pdev, i) == flags) \ - pci_release_resource(pdev, i); \ - } \ - \ - ret = pci_resize_resource(pdev, n, size); \ - \ - pci_assign_unassigned_bus_resources(pdev->bus); \ - \ - if (pci_create_resource_files(pdev)) \ - pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\ - \ - pci_write_config_word(pdev, PCI_COMMAND, cmd); \ -pm_put: \ - pci_config_pm_runtime_put(pdev); \ -unlock: \ - device_unlock(dev); \ - \ - return ret ? ret : count; \ + return __resource_resize_store(dev, n, buf, count); \ } \ static DEVICE_ATTR_RW(resource##n##_resize) @@ -1636,7 +1647,7 @@ static const struct attribute_group pcie_dev_attr_group = { .is_visible = pcie_dev_attrs_are_visible, }; -static const struct attribute_group *pci_dev_attr_groups[] = { +const struct attribute_group *pci_dev_attr_groups[] = { &pci_dev_attr_group, &pci_dev_hp_attr_group, #ifdef CONFIG_PCI_IOV @@ -1653,7 +1664,3 @@ static const struct attribute_group *pci_dev_attr_groups[] = { #endif NULL, }; - -const struct device_type pci_dev_type = { - .groups = pci_dev_attr_groups, -}; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 55bc3576a985..e5f243dd4288 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -24,7 +24,6 @@ #include <linux/log2.h> #include <linux/logic_pio.h> #include <linux/pm_wakeup.h> -#include <linux/interrupt.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/pci_hotplug.h> @@ -851,6 +850,66 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) EXPORT_SYMBOL(pci_find_resource); /** + * pci_resource_name - Return the name of the PCI resource + * @dev: PCI device to query + * @i: index of the resource + * + * Return the standard PCI resource (BAR) name according to their index. + */ +const char *pci_resource_name(struct pci_dev *dev, unsigned int i) +{ + static const char * const bar_name[] = { + "BAR 0", + "BAR 1", + "BAR 2", + "BAR 3", + "BAR 4", + "BAR 5", + "ROM", +#ifdef CONFIG_PCI_IOV + "VF BAR 0", + "VF BAR 1", + "VF BAR 2", + "VF BAR 3", + "VF BAR 4", + "VF BAR 5", +#endif + "bridge window", /* "io" included in %pR */ + "bridge window", /* "mem" included in %pR */ + "bridge window", /* "mem pref" included in %pR */ + }; + static const char * const cardbus_name[] = { + "BAR 1", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", +#ifdef CONFIG_PCI_IOV + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", +#endif + "CardBus bridge window 0", /* I/O */ + "CardBus bridge window 1", /* I/O */ + "CardBus bridge window 0", /* mem */ + "CardBus bridge window 1", /* mem */ + }; + + if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS && + i < ARRAY_SIZE(cardbus_name)) + return cardbus_name[i]; + + if (i < ARRAY_SIZE(bar_name)) + return bar_name[i]; + + return "unknown"; +} + +/** * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos * @dev: the PCI device to operate on * @pos: config space offset of status word @@ -1008,6 +1067,34 @@ disable_acs_redir: } /** + * pcie_read_tlp_log - read TLP Header Log + * @dev: PCIe device + * @where: PCI Config offset of TLP Header Log + * @tlp_log: TLP Log structure to fill + * + * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC. + * + * Return: 0 on success and filled TLP Log structure, <0 on error. + */ +int pcie_read_tlp_log(struct pci_dev *dev, int where, + struct pcie_tlp_log *tlp_log) +{ + int i, ret; + + memset(tlp_log, 0, sizeof(*tlp_log)); + + for (i = 0; i < 4; i++) { + ret = pci_read_config_dword(dev, where + i * 4, + &tlp_log->dw[i]); + if (ret) + return pcibios_err_to_errno(ret); + } + + return 0; +} +EXPORT_SYMBOL_GPL(pcie_read_tlp_log); + +/** * pci_restore_bars - restore a device's BAR values (e.g. after wake-up) * @dev: PCI device to have its BARs restored * @@ -1219,6 +1306,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) if (delay > PCI_RESET_WAIT) pci_info(dev, "ready %dms after %s\n", delay - 1, reset_type); + else + pci_dbg(dev, "ready %dms after %s\n", delay - 1, + reset_type); return 0; } @@ -1291,6 +1381,7 @@ end: /** * pci_set_full_power_state - Put a PCI device into D0 and update its state * @dev: PCI device to power up + * @locked: whether pci_bus_sem is held * * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register * to confirm the state change, restore its BARs if they might be lost and @@ -1300,7 +1391,7 @@ end: * to D0, it is more efficient to use pci_power_up() directly instead of this * function. */ -static int pci_set_full_power_state(struct pci_dev *dev) +static int pci_set_full_power_state(struct pci_dev *dev, bool locked) { u16 pmcsr; int ret; @@ -1335,6 +1426,9 @@ static int pci_set_full_power_state(struct pci_dev *dev) pci_restore_bars(dev); } + if (dev->bus->self) + pcie_aspm_pm_state_change(dev->bus->self, locked); + return 0; } @@ -1362,10 +1456,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) pci_walk_bus(bus, __pci_dev_set_current_state, &state); } +static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked) +{ + if (!bus) + return; + + if (locked) + pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state); + else + pci_walk_bus(bus, __pci_dev_set_current_state, &state); +} + /** * pci_set_low_power_state - Put a PCI device into a low-power state. * @dev: PCI device to handle. * @state: PCI power state (D1, D2, D3hot) to put the device into. + * @locked: whether pci_bus_sem is held * * Use the device's PCI_PM_CTRL register to put it into a low-power state. * @@ -1376,7 +1482,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) * 0 if device already is in the requested state. * 0 if device's power state has been successfully changed. */ -static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state) +static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked) { u16 pmcsr; @@ -1429,27 +1535,13 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state) pci_power_name(dev->current_state), pci_power_name(state)); + if (dev->bus->self) + pcie_aspm_pm_state_change(dev->bus->self, locked); + return 0; } -/** - * pci_set_power_state - Set the power state of a PCI device - * @dev: PCI device to handle. - * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. - * - * Transition a device to a new power state, using the platform firmware and/or - * the device's PCI PM registers. - * - * RETURN VALUE: - * -EINVAL if the requested state is invalid. - * -EIO if device does not support PCI PM or its PM capabilities register has a - * wrong version, or device doesn't support the requested state. - * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. - * 0 if device already is in the requested state. - * 0 if the transition is to D3 but D3 is not supported. - * 0 if device's power state has been successfully changed. - */ -int pci_set_power_state(struct pci_dev *dev, pci_power_t state) +static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked) { int error; @@ -1473,7 +1565,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) return 0; if (state == PCI_D0) - return pci_set_full_power_state(dev); + return pci_set_full_power_state(dev, locked); /* * This device is quirked not to be put into D3, so don't put it in @@ -1487,16 +1579,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) * To put the device in D3cold, put it into D3hot in the native * way, then put it into D3cold using platform ops. */ - error = pci_set_low_power_state(dev, PCI_D3hot); + error = pci_set_low_power_state(dev, PCI_D3hot, locked); if (pci_platform_power_transition(dev, PCI_D3cold)) return error; /* Powering off a bridge may power off the whole hierarchy */ if (dev->current_state == PCI_D3cold) - pci_bus_set_current_state(dev->subordinate, PCI_D3cold); + __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked); } else { - error = pci_set_low_power_state(dev, state); + error = pci_set_low_power_state(dev, state, locked); if (pci_platform_power_transition(dev, state)) return error; @@ -1504,8 +1596,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) return 0; } + +/** + * pci_set_power_state - Set the power state of a PCI device + * @dev: PCI device to handle. + * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. + * + * Transition a device to a new power state, using the platform firmware and/or + * the device's PCI PM registers. + * + * RETURN VALUE: + * -EINVAL if the requested state is invalid. + * -EIO if device does not support PCI PM or its PM capabilities register has a + * wrong version, or device doesn't support the requested state. + * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. + * 0 if device already is in the requested state. + * 0 if the transition is to D3 but D3 is not supported. + * 0 if device's power state has been successfully changed. + */ +int pci_set_power_state(struct pci_dev *dev, pci_power_t state) +{ + return __pci_set_power_state(dev, state, false); +} EXPORT_SYMBOL(pci_set_power_state); +int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state) +{ + lockdep_assert_held(&pci_bus_sem); + + return __pci_set_power_state(dev, state, true); +} +EXPORT_SYMBOL(pci_set_power_state_locked); + #define PCI_EXP_SAVE_REGS 7 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev, @@ -1554,25 +1676,10 @@ static int pci_save_pcie_state(struct pci_dev *dev) pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); - return 0; -} - -void pci_bridge_reconfigure_ltr(struct pci_dev *dev) -{ -#ifdef CONFIG_PCIEASPM - struct pci_dev *bridge; - u32 ctl; + pci_save_aspm_l1ss_state(dev); + pci_save_ltr_state(dev); - bridge = pci_upstream_bridge(dev); - if (bridge && bridge->ltr_path) { - pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl); - if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) { - pci_dbg(bridge, "re-enabling LTR\n"); - pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_LTR_EN); - } - } -#endif + return 0; } static void pci_restore_pcie_state(struct pci_dev *dev) @@ -1581,6 +1688,13 @@ static void pci_restore_pcie_state(struct pci_dev *dev) struct pci_cap_saved_state *save_state; u16 *cap; + /* + * Restore max latencies (in the LTR capability) before enabling + * LTR itself in PCI_EXP_DEVCTL2. + */ + pci_restore_ltr_state(dev); + pci_restore_aspm_l1ss_state(dev); + save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); if (!save_state) return; @@ -1638,46 +1752,6 @@ static void pci_restore_pcix_state(struct pci_dev *dev) pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); } -static void pci_save_ltr_state(struct pci_dev *dev) -{ - int ltr; - struct pci_cap_saved_state *save_state; - u32 *cap; - - if (!pci_is_pcie(dev)) - return; - - ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); - if (!ltr) - return; - - save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); - if (!save_state) { - pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); - return; - } - - /* Some broken devices only support dword access to LTR */ - cap = &save_state->cap.data[0]; - pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap); -} - -static void pci_restore_ltr_state(struct pci_dev *dev) -{ - struct pci_cap_saved_state *save_state; - int ltr; - u32 *cap; - - save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); - ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); - if (!save_state || !ltr) - return; - - /* Some broken devices only support dword access to LTR */ - cap = &save_state->cap.data[0]; - pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap); -} - /** * pci_save_state - save the PCI configuration space of a device before * suspending @@ -1702,7 +1776,6 @@ int pci_save_state(struct pci_dev *dev) if (i != 0) return i; - pci_save_ltr_state(dev); pci_save_dpc_state(dev); pci_save_aer_state(dev); pci_save_ptm_state(dev); @@ -1803,12 +1876,6 @@ void pci_restore_state(struct pci_dev *dev) if (!dev->state_saved) return; - /* - * Restore max latencies (in the LTR capability) before enabling - * LTR itself (in the PCIe capability). - */ - pci_restore_ltr_state(dev); - pci_restore_pcie_state(dev); pci_restore_pasid_state(dev); pci_restore_pri_state(dev); @@ -2089,107 +2156,6 @@ int pci_enable_device(struct pci_dev *dev) EXPORT_SYMBOL(pci_enable_device); /* - * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X - * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so - * there's no need to track it separately. pci_devres is initialized - * when a device is enabled using managed PCI device enable interface. - */ -struct pci_devres { - unsigned int enabled:1; - unsigned int pinned:1; - unsigned int orig_intx:1; - unsigned int restore_intx:1; - unsigned int mwi:1; - u32 region_mask; -}; - -static void pcim_release(struct device *gendev, void *res) -{ - struct pci_dev *dev = to_pci_dev(gendev); - struct pci_devres *this = res; - int i; - - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) - if (this->region_mask & (1 << i)) - pci_release_region(dev, i); - - if (this->mwi) - pci_clear_mwi(dev); - - if (this->restore_intx) - pci_intx(dev, this->orig_intx); - - if (this->enabled && !this->pinned) - pci_disable_device(dev); -} - -static struct pci_devres *get_pci_dr(struct pci_dev *pdev) -{ - struct pci_devres *dr, *new_dr; - - dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); - if (dr) - return dr; - - new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); - if (!new_dr) - return NULL; - return devres_get(&pdev->dev, new_dr, NULL, NULL); -} - -static struct pci_devres *find_pci_dr(struct pci_dev *pdev) -{ - if (pci_is_managed(pdev)) - return devres_find(&pdev->dev, pcim_release, NULL, NULL); - return NULL; -} - -/** - * pcim_enable_device - Managed pci_enable_device() - * @pdev: PCI device to be initialized - * - * Managed pci_enable_device(). - */ -int pcim_enable_device(struct pci_dev *pdev) -{ - struct pci_devres *dr; - int rc; - - dr = get_pci_dr(pdev); - if (unlikely(!dr)) - return -ENOMEM; - if (dr->enabled) - return 0; - - rc = pci_enable_device(pdev); - if (!rc) { - pdev->is_managed = 1; - dr->enabled = 1; - } - return rc; -} -EXPORT_SYMBOL(pcim_enable_device); - -/** - * pcim_pin_device - Pin managed PCI device - * @pdev: PCI device to pin - * - * Pin managed PCI device @pdev. Pinned device won't be disabled on - * driver detach. @pdev must have been enabled with - * pcim_enable_device(). - */ -void pcim_pin_device(struct pci_dev *pdev) -{ - struct pci_devres *dr; - - dr = find_pci_dr(pdev); - WARN_ON(!dr || !dr->enabled); - if (dr) - dr->pinned = 1; -} -EXPORT_SYMBOL(pcim_pin_device); - -/* * pcibios_device_add - provide arch specific hooks when adding device dev * @dev: the PCI device being added * @@ -2223,17 +2189,6 @@ void __weak pcibios_release_device(struct pci_dev *dev) {} */ void __weak pcibios_disable_device(struct pci_dev *dev) {} -/** - * pcibios_penalize_isa_irq - penalize an ISA IRQ - * @irq: ISA IRQ to penalize - * @active: IRQ active or not - * - * Permits the platform to provide architecture-specific functionality when - * penalizing ISA IRQs. This is the default implementation. Architecture - * implementations can override this. - */ -void __weak pcibios_penalize_isa_irq(int irq, int active) {} - static void do_pci_disable_device(struct pci_dev *dev) { u16 pci_command; @@ -2427,29 +2382,36 @@ static void pci_pme_list_scan(struct work_struct *work) if (pdev->pme_poll) { struct pci_dev *bridge = pdev->bus->self; struct device *dev = &pdev->dev; - int pm_status; + struct device *bdev = bridge ? &bridge->dev : NULL; + int bref = 0; /* - * If bridge is in low power state, the - * configuration space of subordinate devices - * may be not accessible + * If we have a bridge, it should be in an active/D0 + * state or the configuration space of subordinate + * devices may not be accessible or stable over the + * course of the call. */ - if (bridge && bridge->current_state != PCI_D0) - continue; + if (bdev) { + bref = pm_runtime_get_if_active(bdev); + if (!bref) + continue; + + if (bridge->current_state != PCI_D0) + goto put_bridge; + } /* - * If the device is in a low power state it - * should not be polled either. + * The device itself should be suspended but config + * space must be accessible, therefore it cannot be in + * D3cold. */ - pm_status = pm_runtime_get_if_active(dev, true); - if (!pm_status) - continue; - - if (pdev->current_state != PCI_D3cold) + if (pm_runtime_suspended(dev) && + pdev->current_state != PCI_D3cold) pci_pme_wakeup(pdev, NULL); - if (pm_status > 0) - pm_runtime_put(dev); +put_bridge: + if (bref > 0) + pm_runtime_put(bdev); } else { list_del(&pme_dev->list); kfree(pme_dev); @@ -3290,6 +3252,7 @@ static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei, static int pci_ea_read(struct pci_dev *dev, int offset) { struct resource *res; + const char *res_name; int ent_size, ent_offset = offset; resource_size_t start, end; unsigned long flags; @@ -3319,6 +3282,7 @@ static int pci_ea_read(struct pci_dev *dev, int offset) goto out; res = pci_ea_get_resource(dev, bei, prop); + res_name = pci_resource_name(dev, bei); if (!res) { pci_err(dev, "Unsupported EA entry BEI: %u\n", bei); goto out; @@ -3392,16 +3356,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset) res->flags = flags; if (bei <= PCI_EA_BEI_BAR5) - pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", - bei, res, prop); + pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", + res_name, res, prop); else if (bei == PCI_EA_BEI_ROM) - pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", - res, prop); + pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", + res_name, res, prop); else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) - pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", - bei - PCI_EA_BEI_VF_BAR0, res, prop); + pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", + res_name, res, prop); else - pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n", bei, res, prop); out: @@ -3894,66 +3858,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask) EXPORT_SYMBOL(pci_enable_atomic_ops_to_root); /** - * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge - * @dev: the PCI device - * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) - * - * Perform INTx swizzling for a device behind one level of bridge. This is - * required by section 9.1 of the PCI-to-PCI bridge specification for devices - * behind bridges on add-in cards. For devices with ARI enabled, the slot - * number is always 0 (see the Implementation Note in section 2.2.8.1 of - * the PCI Express Base Specification, Revision 2.1) - */ -u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin) -{ - int slot; - - if (pci_ari_enabled(dev->bus)) - slot = 0; - else - slot = PCI_SLOT(dev->devfn); - - return (((pin - 1) + slot) % 4) + 1; -} - -int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) -{ - u8 pin; - - pin = dev->pin; - if (!pin) - return -1; - - while (!pci_is_root_bus(dev->bus)) { - pin = pci_swizzle_interrupt_pin(dev, pin); - dev = dev->bus->self; - } - *bridge = dev; - return pin; -} - -/** - * pci_common_swizzle - swizzle INTx all the way to root bridge - * @dev: the PCI device - * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) - * - * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI - * bridges all the way up to a PCI root bus. - */ -u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) -{ - u8 pin = *pinp; - - while (!pci_is_root_bus(dev->bus)) { - pin = pci_swizzle_interrupt_pin(dev, pin); - dev = dev->bus->self; - } - *pinp = pin; - return PCI_SLOT(dev->devfn); -} -EXPORT_SYMBOL_GPL(pci_common_swizzle); - -/** * pci_release_region - Release a PCI bar * @pdev: PCI device whose resources were previously reserved by * pci_request_region() @@ -4249,8 +4153,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) if (res->end > IO_SPACE_LIMIT) return -EINVAL; - return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, - pgprot_device(PAGE_KERNEL)); + return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr, + pgprot_device(PAGE_KERNEL)); #else /* * This architecture does not have memory mapped I/O space, @@ -4281,133 +4185,6 @@ void pci_unmap_iospace(struct resource *res) } EXPORT_SYMBOL(pci_unmap_iospace); -static void devm_pci_unmap_iospace(struct device *dev, void *ptr) -{ - struct resource **res = ptr; - - pci_unmap_iospace(*res); -} - -/** - * devm_pci_remap_iospace - Managed pci_remap_iospace() - * @dev: Generic device to remap IO address for - * @res: Resource describing the I/O space - * @phys_addr: physical address of range to be mapped - * - * Managed pci_remap_iospace(). Map is automatically unmapped on driver - * detach. - */ -int devm_pci_remap_iospace(struct device *dev, const struct resource *res, - phys_addr_t phys_addr) -{ - const struct resource **ptr; - int error; - - ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return -ENOMEM; - - error = pci_remap_iospace(res, phys_addr); - if (error) { - devres_free(ptr); - } else { - *ptr = res; - devres_add(dev, ptr); - } - - return error; -} -EXPORT_SYMBOL(devm_pci_remap_iospace); - -/** - * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() - * @dev: Generic device to remap IO address for - * @offset: Resource address to map - * @size: Size of map - * - * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver - * detach. - */ -void __iomem *devm_pci_remap_cfgspace(struct device *dev, - resource_size_t offset, - resource_size_t size) -{ - void __iomem **ptr, *addr; - - ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - addr = pci_remap_cfgspace(offset, size); - if (addr) { - *ptr = addr; - devres_add(dev, ptr); - } else - devres_free(ptr); - - return addr; -} -EXPORT_SYMBOL(devm_pci_remap_cfgspace); - -/** - * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource - * @dev: generic device to handle the resource for - * @res: configuration space resource to be handled - * - * Checks that a resource is a valid memory region, requests the memory - * region and ioremaps with pci_remap_cfgspace() API that ensures the - * proper PCI configuration space memory attributes are guaranteed. - * - * All operations are managed and will be undone on driver detach. - * - * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code - * on failure. Usage example:: - * - * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - * base = devm_pci_remap_cfg_resource(&pdev->dev, res); - * if (IS_ERR(base)) - * return PTR_ERR(base); - */ -void __iomem *devm_pci_remap_cfg_resource(struct device *dev, - struct resource *res) -{ - resource_size_t size; - const char *name; - void __iomem *dest_ptr; - - BUG_ON(!dev); - - if (!res || resource_type(res) != IORESOURCE_MEM) { - dev_err(dev, "invalid resource\n"); - return IOMEM_ERR_PTR(-EINVAL); - } - - size = resource_size(res); - - if (res->name) - name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev), - res->name); - else - name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); - if (!name) - return IOMEM_ERR_PTR(-ENOMEM); - - if (!devm_request_mem_region(dev, res->start, size, name)) { - dev_err(dev, "can't request region for resource %pR\n", res); - return IOMEM_ERR_PTR(-EBUSY); - } - - dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size); - if (!dest_ptr) { - dev_err(dev, "ioremap failed for resource %pR\n", res); - devm_release_mem_region(dev, res->start, size); - dest_ptr = IOMEM_ERR_PTR(-ENOMEM); - } - - return dest_ptr; -} -EXPORT_SYMBOL(devm_pci_remap_cfg_resource); - static void __pci_set_master(struct pci_dev *dev, bool enable) { u16 old_cmd, cmd; @@ -4558,27 +4335,6 @@ int pci_set_mwi(struct pci_dev *dev) EXPORT_SYMBOL(pci_set_mwi); /** - * pcim_set_mwi - a device-managed pci_set_mwi() - * @dev: the PCI device for which MWI is enabled - * - * Managed pci_set_mwi(). - * - * RETURNS: An appropriate -ERRNO error value on error, or zero for success. - */ -int pcim_set_mwi(struct pci_dev *dev) -{ - struct pci_devres *dr; - - dr = find_pci_dr(dev); - if (!dr) - return -ENOMEM; - - dr->mwi = 1; - return pci_set_mwi(dev); -} -EXPORT_SYMBOL(pcim_set_mwi); - -/** * pci_try_set_mwi - enables memory-write-invalidate PCI transaction * @dev: the PCI device for which MWI is enabled * @@ -4666,78 +4422,6 @@ void pci_intx(struct pci_dev *pdev, int enable) } EXPORT_SYMBOL_GPL(pci_intx); -static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) -{ - struct pci_bus *bus = dev->bus; - bool mask_updated = true; - u32 cmd_status_dword; - u16 origcmd, newcmd; - unsigned long flags; - bool irq_pending; - - /* - * We do a single dword read to retrieve both command and status. - * Document assumptions that make this possible. - */ - BUILD_BUG_ON(PCI_COMMAND % 4); - BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); - - raw_spin_lock_irqsave(&pci_lock, flags); - - bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword); - - irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT; - - /* - * Check interrupt status register to see whether our device - * triggered the interrupt (when masking) or the next IRQ is - * already pending (when unmasking). - */ - if (mask != irq_pending) { - mask_updated = false; - goto done; - } - - origcmd = cmd_status_dword; - newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE; - if (mask) - newcmd |= PCI_COMMAND_INTX_DISABLE; - if (newcmd != origcmd) - bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd); - -done: - raw_spin_unlock_irqrestore(&pci_lock, flags); - - return mask_updated; -} - -/** - * pci_check_and_mask_intx - mask INTx on pending interrupt - * @dev: the PCI device to operate on - * - * Check if the device dev has its INTx line asserted, mask it and return - * true in that case. False is returned if no interrupt was pending. - */ -bool pci_check_and_mask_intx(struct pci_dev *dev) -{ - return pci_check_and_set_intx_mask(dev, true); -} -EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); - -/** - * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending - * @dev: the PCI device to operate on - * - * Check if the device dev has its INTx line asserted, unmask it if not and - * return true. False is returned and the mask remains active if there was - * still an interrupt pending. - */ -bool pci_check_and_unmask_intx(struct pci_dev *dev) -{ - return pci_check_and_set_intx_mask(dev, false); -} -EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); - /** * pci_wait_for_pending_transaction - wait for pending transaction * @dev: the PCI device to operate on @@ -6224,6 +5908,41 @@ int pcie_set_mps(struct pci_dev *dev, int mps) } EXPORT_SYMBOL(pcie_set_mps); +static enum pci_bus_speed to_pcie_link_speed(u16 lnksta) +{ + return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)]; +} + +int pcie_link_speed_mbps(struct pci_dev *pdev) +{ + u16 lnksta; + int err; + + err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); + if (err) + return err; + + switch (to_pcie_link_speed(lnksta)) { + case PCIE_SPEED_2_5GT: + return 2500; + case PCIE_SPEED_5_0GT: + return 5000; + case PCIE_SPEED_8_0GT: + return 8000; + case PCIE_SPEED_16_0GT: + return 16000; + case PCIE_SPEED_32_0GT: + return 32000; + case PCIE_SPEED_64_0GT: + return 64000; + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(pcie_link_speed_mbps); + /** * pcie_bandwidth_available - determine minimum link settings of a PCIe * device and its bandwidth limitation @@ -6257,8 +5976,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, while (dev) { pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); - next_speed = pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, - lnksta)]; + next_speed = to_pcie_link_speed(lnksta); next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); @@ -6689,14 +6407,15 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar, resource_size_t align, bool resize) { struct resource *r = &dev->resource[bar]; + const char *r_name = pci_resource_name(dev, bar); resource_size_t size; if (!(r->flags & IORESOURCE_MEM)) return; if (r->flags & IORESOURCE_PCI_FIXED) { - pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n", - bar, r, (unsigned long long)align); + pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n", + r_name, r, (unsigned long long)align); return; } @@ -6732,8 +6451,8 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar, * devices and we use the second. */ - pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n", - bar, r, (unsigned long long)align); + pci_info(dev, "%s %pR: requesting alignment to %#llx\n", + r_name, r, (unsigned long long)align); if (resize) { r->start = 0; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 5ecbcf041179..17fed1846847 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -31,9 +31,6 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev); /* Functions internal to the PCI core code */ -int pci_create_sysfs_dev_files(struct pci_dev *pdev); -void pci_remove_sysfs_dev_files(struct pci_dev *pdev); -void pci_cleanup_rom(struct pci_dev *dev); #ifdef CONFIG_DMI extern const struct attribute_group pci_dev_smbios_attr_group; #endif @@ -97,7 +94,6 @@ void pci_msi_init(struct pci_dev *dev); void pci_msix_init(struct pci_dev *dev); bool pci_bridge_d3_possible(struct pci_dev *dev); void pci_bridge_d3_update(struct pci_dev *dev); -void pci_bridge_reconfigure_ltr(struct pci_dev *dev); int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type); static inline void pci_wakeup_event(struct pci_dev *dev) @@ -152,7 +148,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; } /* Functions for PCI Hotplug drivers to use */ int pci_hp_add_bridge(struct pci_dev *dev); -#ifdef HAVE_PCI_LEGACY +#if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY) void pci_create_legacy_files(struct pci_bus *bus); void pci_remove_legacy_files(struct pci_bus *bus); #else @@ -185,10 +181,22 @@ static inline int pci_no_d1d2(struct pci_dev *dev) return (dev->no_d1d2 || parent_dstates); } + +#ifdef CONFIG_SYSFS +int pci_create_sysfs_dev_files(struct pci_dev *pdev); +void pci_remove_sysfs_dev_files(struct pci_dev *pdev); extern const struct attribute_group *pci_dev_groups[]; +extern const struct attribute_group *pci_dev_attr_groups[]; extern const struct attribute_group *pcibus_groups[]; -extern const struct device_type pci_dev_type; extern const struct attribute_group *pci_bus_groups[]; +#else +static inline int pci_create_sysfs_dev_files(struct pci_dev *pdev) { return 0; } +static inline void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { } +#define pci_dev_groups NULL +#define pci_dev_attr_groups NULL +#define pcibus_groups NULL +#define pci_bus_groups NULL +#endif extern unsigned long pci_hotplug_io_size; extern unsigned long pci_hotplug_mmio_size; @@ -255,6 +263,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *fail_head); bool pci_bus_clip_resource(struct pci_dev *dev, int idx); +const char *pci_resource_name(struct pci_dev *dev, unsigned int i); + void pci_reassigndev_resource_alignment(struct pci_dev *dev); void pci_disable_bridge_window(struct pci_dev *dev); struct pci_bus *pci_bus_get(struct pci_bus *bus); @@ -272,7 +282,7 @@ void pci_bus_put(struct pci_bus *bus); /* PCIe speed to Mb/s reduced by encoding overhead */ #define PCIE_SPEED2MBS_ENC(speed) \ - ((speed) == PCIE_SPEED_64_0GT ? 64000*128/130 : \ + ((speed) == PCIE_SPEED_64_0GT ? 64000*1/1 : \ (speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \ (speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ @@ -366,11 +376,6 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) return 0; } -static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) -{ - return dev->error_state == pci_channel_io_perm_failure; -} - /* pci_dev priv_flags */ #define PCI_DEV_ADDED 0 #define PCI_DPC_RECOVERED 1 @@ -407,7 +412,7 @@ struct aer_err_info { unsigned int status; /* COR/UNCOR Error Status */ unsigned int mask; /* COR/UNCOR Error Mask */ - struct aer_header_log_regs tlp; /* TLP Header */ + struct pcie_tlp_log tlp; /* TLP Header */ }; int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info); @@ -566,14 +571,28 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, bool pcie_wait_for_link(struct pci_dev *pdev, bool active); int pcie_retrain_link(struct pci_dev *pdev, bool use_lt); + +/* ASPM-related functionality we need even without CONFIG_PCIEASPM */ +void pci_save_ltr_state(struct pci_dev *dev); +void pci_restore_ltr_state(struct pci_dev *dev); +void pci_configure_aspm_l1ss(struct pci_dev *dev); +void pci_save_aspm_l1ss_state(struct pci_dev *dev); +void pci_restore_aspm_l1ss_state(struct pci_dev *dev); + #ifdef CONFIG_PCIEASPM void pcie_aspm_init_link_state(struct pci_dev *pdev); void pcie_aspm_exit_link_state(struct pci_dev *pdev); +void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked); void pcie_aspm_powersave_config_link(struct pci_dev *pdev); +void pci_configure_ltr(struct pci_dev *pdev); +void pci_bridge_reconfigure_ltr(struct pci_dev *pdev); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { } +static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { } static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { } +static inline void pci_configure_ltr(struct pci_dev *pdev) { } +static inline void pci_bridge_reconfigure_ltr(struct pci_dev *pdev) { } #endif #ifdef CONFIG_PCIE_ECRC @@ -794,6 +813,27 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) #endif /* + * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X + * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so + * there's no need to track it separately. pci_devres is initialized + * when a device is enabled using managed PCI device enable interface. + * + * TODO: Struct pci_devres and find_pci_dr() only need to be here because + * they're used in pci.c. Port or move these functions to devres.c and + * then remove them from here. + */ +struct pci_devres { + unsigned int enabled:1; + unsigned int pinned:1; + unsigned int orig_intx:1; + unsigned int restore_intx:1; + unsigned int mwi:1; + u32 region_mask; +}; + +struct pci_devres *find_pci_dr(struct pci_dev *pdev); + +/* * Config Address for PCI Configuration Mechanism #1 * * See PCI Local Bus Specification, Revision 3.0, diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 228652a59f27..8999fcebde6a 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -49,6 +49,15 @@ config PCIEAER_INJECT gotten from: https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/ +config PCIEAER_CXL + bool "PCI Express CXL RAS support" + default y + depends on PCIEAER && CXL_PCI + help + Enables CXL error handling. + + If unsure, say Y. + # # PCI Express ECRC # diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index 8de4ed5f98f1..6461aa93fe76 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -6,7 +6,7 @@ pcieportdrv-y := portdrv.o rcec.o obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o -obj-$(CONFIG_PCIEASPM) += aspm.o +obj-y += aspm.o obj-$(CONFIG_PCIEAER) += aer.o err.o obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o obj-$(CONFIG_PCIE_PME) += pme.o diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index dcd35993004e..ac6293c24976 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -41,8 +41,8 @@ #define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/ struct aer_err_source { - unsigned int status; - unsigned int id; + u32 status; /* PCI_ERR_ROOT_STATUS */ + u32 id; /* PCI_ERR_ROOT_ERR_SRC */ }; struct aer_rpc { @@ -435,10 +435,10 @@ void pci_aer_exit(struct pci_dev *dev) /* * AER error strings */ -static const char *aer_error_severity_string[] = { - "Uncorrected (Non-Fatal)", - "Uncorrected (Fatal)", - "Corrected" +static const char * const aer_error_severity_string[] = { + "Uncorrectable (Non-Fatal)", + "Uncorrectable (Fatal)", + "Correctable" }; static const char *aer_error_layer[] = { @@ -664,11 +664,10 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev, } } -static void __print_tlp_header(struct pci_dev *dev, - struct aer_header_log_regs *t) +static void __print_tlp_header(struct pci_dev *dev, struct pcie_tlp_log *t) { pci_err(dev, " TLP Header: %08x %08x %08x %08x\n", - t->dw0, t->dw1, t->dw2, t->dw3); + t->dw[0], t->dw[1], t->dw[2], t->dw[3]); } static void __aer_print_error(struct pci_dev *dev, @@ -740,7 +739,7 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) u8 bus = info->id >> 8; u8 devfn = info->id & 0xff; - pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n", + pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n", info->multi_error_valid ? "Multiple " : "", aer_error_severity_string[info->severity], pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), @@ -760,9 +759,10 @@ int cper_severity_to_aer(int cper_severity) } } EXPORT_SYMBOL_GPL(cper_severity_to_aer); +#endif -void cper_print_aer(struct pci_dev *dev, int aer_severity, - struct aer_capability_regs *aer) +void pci_print_aer(struct pci_dev *dev, int aer_severity, + struct aer_capability_regs *aer) { int layer, agent, tlp_header_valid = 0; u32 status, mask; @@ -801,7 +801,7 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity, trace_aer_event(dev_name(&dev->dev), (status & ~mask), aer_severity, tlp_header_valid, &aer->header_log); } -#endif +EXPORT_SYMBOL_NS_GPL(pci_print_aer, CXL); /** * add_error_device - list device to be handled @@ -928,20 +928,164 @@ static bool find_source_device(struct pci_dev *parent, pci_walk_bus(parent->subordinate, find_device_iter, e_info); if (!e_info->error_dev_num) { - pci_info(parent, "can't find device of ID%04x\n", e_info->id); + u8 bus = e_info->id >> 8; + u8 devfn = e_info->id & 0xff; + + pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n", + pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); return false; } return true; } +#ifdef CONFIG_PCIEAER_CXL + +/** + * pci_aer_unmask_internal_errors - unmask internal errors + * @dev: pointer to the pcie_dev data structure + * + * Unmasks internal errors in the Uncorrectable and Correctable Error + * Mask registers. + * + * Note: AER must be enabled and supported by the device which must be + * checked in advance, e.g. with pcie_aer_is_native(). + */ +static void pci_aer_unmask_internal_errors(struct pci_dev *dev) +{ + int aer = dev->aer_cap; + u32 mask; + + pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &mask); + mask &= ~PCI_ERR_UNC_INTN; + pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, mask); + + pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, &mask); + mask &= ~PCI_ERR_COR_INTERNAL; + pci_write_config_dword(dev, aer + PCI_ERR_COR_MASK, mask); +} + +static bool is_cxl_mem_dev(struct pci_dev *dev) +{ + /* + * The capability, status, and control fields in Device 0, + * Function 0 DVSEC control the CXL functionality of the + * entire device (CXL 3.0, 8.1.3). + */ + if (dev->devfn != PCI_DEVFN(0, 0)) + return false; + + /* + * CXL Memory Devices must have the 502h class code set (CXL + * 3.0, 8.1.12.1). + */ + if ((dev->class >> 8) != PCI_CLASS_MEMORY_CXL) + return false; + + return true; +} + +static bool cxl_error_is_native(struct pci_dev *dev) +{ + struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); + + return (pcie_ports_native || host->native_aer); +} + +static bool is_internal_error(struct aer_err_info *info) +{ + if (info->severity == AER_CORRECTABLE) + return info->status & PCI_ERR_COR_INTERNAL; + + return info->status & PCI_ERR_UNC_INTN; +} + +static int cxl_rch_handle_error_iter(struct pci_dev *dev, void *data) +{ + struct aer_err_info *info = (struct aer_err_info *)data; + const struct pci_error_handlers *err_handler; + + if (!is_cxl_mem_dev(dev) || !cxl_error_is_native(dev)) + return 0; + + /* protect dev->driver */ + device_lock(&dev->dev); + + err_handler = dev->driver ? dev->driver->err_handler : NULL; + if (!err_handler) + goto out; + + if (info->severity == AER_CORRECTABLE) { + if (err_handler->cor_error_detected) + err_handler->cor_error_detected(dev); + } else if (err_handler->error_detected) { + if (info->severity == AER_NONFATAL) + err_handler->error_detected(dev, pci_channel_io_normal); + else if (info->severity == AER_FATAL) + err_handler->error_detected(dev, pci_channel_io_frozen); + } +out: + device_unlock(&dev->dev); + return 0; +} + +static void cxl_rch_handle_error(struct pci_dev *dev, struct aer_err_info *info) +{ + /* + * Internal errors of an RCEC indicate an AER error in an + * RCH's downstream port. Check and handle them in the CXL.mem + * device driver. + */ + if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC && + is_internal_error(info)) + pcie_walk_rcec(dev, cxl_rch_handle_error_iter, info); +} + +static int handles_cxl_error_iter(struct pci_dev *dev, void *data) +{ + bool *handles_cxl = data; + + if (!*handles_cxl) + *handles_cxl = is_cxl_mem_dev(dev) && cxl_error_is_native(dev); + + /* Non-zero terminates iteration */ + return *handles_cxl; +} + +static bool handles_cxl_errors(struct pci_dev *rcec) +{ + bool handles_cxl = false; + + if (pci_pcie_type(rcec) == PCI_EXP_TYPE_RC_EC && + pcie_aer_is_native(rcec)) + pcie_walk_rcec(rcec, handles_cxl_error_iter, &handles_cxl); + + return handles_cxl; +} + +static void cxl_rch_enable_rcec(struct pci_dev *rcec) +{ + if (!handles_cxl_errors(rcec)) + return; + + pci_aer_unmask_internal_errors(rcec); + pci_info(rcec, "CXL: Internal errors unmasked"); +} + +#else +static inline void cxl_rch_enable_rcec(struct pci_dev *dev) { } +static inline void cxl_rch_handle_error(struct pci_dev *dev, + struct aer_err_info *info) { } +#endif + /** - * handle_error_source - handle logging error into an event log + * pci_aer_handle_error - handle logging error into an event log * @dev: pointer to pci_dev data structure of error source device * @info: comprehensive error information * * Invoked when an error being detected by Root Port. */ -static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) +static void pci_aer_handle_error(struct pci_dev *dev, struct aer_err_info *info) { int aer = dev->aer_cap; @@ -965,6 +1109,12 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset); else if (info->severity == AER_FATAL) pcie_do_recovery(dev, pci_channel_io_frozen, aer_root_reset); +} + +static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) +{ + cxl_rch_handle_error(dev, info); + pci_aer_handle_error(dev, info); pci_dev_put(dev); } @@ -997,7 +1147,7 @@ static void aer_recover_work_func(struct work_struct *work) PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn)); continue; } - cper_print_aer(pdev, entry.severity, entry.regs); + pci_print_aer(pdev, entry.severity, entry.regs); /* * Memory for aer_capability_regs(entry.regs) is being allocated from the * ghes_estatus_pool to protect it from overwriting when multiple sections @@ -1059,7 +1209,7 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) { int type = pci_pcie_type(dev); int aer = dev->aer_cap; - int temp; + u32 aercc; /* Must reset in this function */ info->status = 0; @@ -1090,19 +1240,12 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) return 0; /* Get First Error Pointer */ - pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp); - info->first_error = PCI_ERR_CAP_FEP(temp); + pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc); + info->first_error = PCI_ERR_CAP_FEP(aercc); if (info->status & AER_LOG_TLP_MASKS) { info->tlp_header_valid = 1; - pci_read_config_dword(dev, - aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0); - pci_read_config_dword(dev, - aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1); - pci_read_config_dword(dev, - aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2); - pci_read_config_dword(dev, - aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3); + pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp); } } @@ -1348,6 +1491,7 @@ static int aer_probe(struct pcie_device *dev) return status; } + cxl_rch_enable_rcec(port); aer_enable_rootport(rpc); pci_info(port, "enabled with IRQ %d\n", dev->irq); return 0; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 50b04ae5c394..47761c7ef267 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -24,6 +24,166 @@ #include "../pci.h" +void pci_save_ltr_state(struct pci_dev *dev) +{ + int ltr; + struct pci_cap_saved_state *save_state; + u32 *cap; + + if (!pci_is_pcie(dev)) + return; + + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); + if (!ltr) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); + if (!save_state) { + pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); + return; + } + + /* Some broken devices only support dword access to LTR */ + cap = &save_state->cap.data[0]; + pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap); +} + +void pci_restore_ltr_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + int ltr; + u32 *cap; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); + if (!save_state || !ltr) + return; + + /* Some broken devices only support dword access to LTR */ + cap = &save_state->cap.data[0]; + pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap); +} + +void pci_configure_aspm_l1ss(struct pci_dev *pdev) +{ + int rc; + + pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + + rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS, + 2 * sizeof(u32)); + if (rc) + pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n", + ERR_PTR(rc)); +} + +void pci_save_aspm_l1ss_state(struct pci_dev *pdev) +{ + struct pci_cap_saved_state *save_state; + u16 l1ss = pdev->l1ss; + u32 *cap; + + /* + * Save L1 substate configuration. The ASPM L0s/L1 configuration + * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state(). + */ + if (!l1ss) + return; + + save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS); + if (!save_state) + return; + + cap = &save_state->cap.data[0]; + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++); + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++); +} + +void pci_restore_aspm_l1ss_state(struct pci_dev *pdev) +{ + struct pci_cap_saved_state *pl_save_state, *cl_save_state; + struct pci_dev *parent = pdev->bus->self; + u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable; + u32 cl_ctl1, cl_ctl2, cl_l1_2_enable; + u16 clnkctl, plnkctl; + + /* + * In case BIOS enabled L1.2 when resuming, we need to disable it first + * on the downstream component before the upstream. So, don't attempt to + * restore either until we are at the downstream component. + */ + if (pcie_downstream_port(pdev) || !parent) + return; + + if (!pdev->l1ss || !parent->l1ss) + return; + + cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS); + pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS); + if (!cl_save_state || !pl_save_state) + return; + + cap = &cl_save_state->cap.data[0]; + cl_ctl2 = *cap++; + cl_ctl1 = *cap; + cap = &pl_save_state->cap.data[0]; + pl_ctl2 = *cap++; + pl_ctl1 = *cap; + + /* Make sure L0s/L1 are disabled before updating L1SS config */ + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl); + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl); + if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) || + FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) { + pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, + clnkctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, + plnkctl & ~PCI_EXP_LNKCTL_ASPMC); + } + + /* + * Disable L1.2 on this downstream endpoint device first, followed + * by the upstream + */ + pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + + /* + * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD + * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2 + * enable bits, even though they're all in PCI_L1SS_CTL1. + */ + pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK; + pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK; + cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK; + cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK; + + /* Write back without enables first (above we cleared them in ctl1) */ + pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2); + pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2); + pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1); + pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1); + + /* Then write back the enables */ + if (pl_l1_2_enable || cl_l1_2_enable) { + pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + pl_ctl1 | pl_l1_2_enable); + pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, + cl_ctl1 | cl_l1_2_enable); + } + + /* Restore L0s/L1 if they were enabled */ + if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) || + FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) { + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl); + pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl); + } +} + +#ifdef CONFIG_PCIEASPM + #ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX #endif @@ -141,16 +301,42 @@ static int policy_to_clkpm_state(struct pcie_link_state *link) return 0; } +static void pci_update_aspm_saved_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + u16 *cap, lnkctl, aspm_ctl; + + save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); + if (!save_state) + return; + + pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl); + + /* + * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only + * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't + * change after being captured in save_state. + */ + aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN); + lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN); + + /* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */ + cap = (u16 *)&save_state->cap.data[0]; + cap[1] = lnkctl | aspm_ctl; +} + static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) { struct pci_dev *child; struct pci_bus *linkbus = link->pdev->subordinate; u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0; - list_for_each_entry(child, &linkbus->devices, bus_list) + list_for_each_entry(child, &linkbus->devices, bus_list) { pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_CLKREQ_EN, val); + pci_update_aspm_saved_state(child); + } link->clkpm_enabled = !!enable; } @@ -426,17 +612,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) } } -static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, - u32 clear, u32 set) -{ - u32 val; - - pci_read_config_dword(pdev, pos, &val); - val &= ~clear; - val |= set; - pci_write_config_dword(pdev, pos, val); -} - /* Calculate L1.2 PM substate timing parameters */ static void aspm_calc_l12_info(struct pcie_link_state *link, u32 parent_l1ss_cap, u32 child_l1ss_cap) @@ -501,10 +676,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); } /* Program T_POWER_ON times in both ports */ @@ -512,22 +689,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); /* Program Common_Mode_Restore_Time in upstream device */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); /* Program LTR_L1.2_THRESHOLD time in both ports */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0, - pl1_2_enables); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0, - cl1_2_enables); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, 0, + pl1_2_enables); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, 0, + cl1_2_enables); } } @@ -687,10 +868,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) */ /* Disable all L1 substates */ - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); /* * If needed, disable L1, and it gets enabled later * in pcie_config_aspm_link(). @@ -713,10 +894,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) @@ -774,6 +955,12 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) pcie_config_aspm_dev(parent, upstream); link->aspm_enabled = state; + + /* Update latest ASPM configuration in saved context */ + pci_save_aspm_l1ss_state(link->downstream); + pci_update_aspm_saved_state(link->downstream); + pci_save_aspm_l1ss_state(parent); + pci_update_aspm_saved_state(parent); } static void pcie_config_aspm_path(struct pcie_link_state *link) @@ -943,6 +1130,78 @@ out: up_read(&pci_bus_sem); } +void pci_bridge_reconfigure_ltr(struct pci_dev *pdev) +{ + struct pci_dev *bridge; + u32 ctl; + + bridge = pci_upstream_bridge(pdev); + if (bridge && bridge->ltr_path) { + pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl); + if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) { + pci_dbg(bridge, "re-enabling LTR\n"); + pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_LTR_EN); + } + } +} + +void pci_configure_ltr(struct pci_dev *pdev) +{ + struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus); + struct pci_dev *bridge; + u32 cap, ctl; + + if (!pci_is_pcie(pdev)) + return; + + pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap); + if (!(cap & PCI_EXP_DEVCAP2_LTR)) + return; + + pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl); + if (ctl & PCI_EXP_DEVCTL2_LTR_EN) { + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) { + pdev->ltr_path = 1; + return; + } + + bridge = pci_upstream_bridge(pdev); + if (bridge && bridge->ltr_path) + pdev->ltr_path = 1; + + return; + } + + if (!host->native_ltr) + return; + + /* + * Software must not enable LTR in an Endpoint unless the Root + * Complex and all intermediate Switches indicate support for LTR. + * PCIe r4.0, sec 6.18. + */ + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) { + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_LTR_EN); + pdev->ltr_path = 1; + return; + } + + /* + * If we're configuring a hot-added device, LTR was likely + * disabled in the upstream bridge, so re-enable it before enabling + * it in the new device. + */ + bridge = pci_upstream_bridge(pdev); + if (bridge && bridge->ltr_path) { + pci_bridge_reconfigure_ltr(pdev); + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_LTR_EN); + pdev->ltr_path = 1; + } +} + /* Recheck latencies and update aspm_capable for links under the root */ static void pcie_update_aspm_capable(struct pcie_link_state *root) { @@ -1008,6 +1267,30 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) up_read(&pci_bus_sem); } +/* + * @pdev: the root port or switch downstream port + * @locked: whether pci_bus_sem is held + */ +void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) +{ + struct pcie_link_state *link = pdev->link_state; + + if (aspm_disabled || !link) + return; + /* + * Devices changed PM state, we should recheck if latency + * meets all functions' requirement + */ + if (!locked) + down_read(&pci_bus_sem); + mutex_lock(&aspm_lock); + pcie_update_aspm_capable(link->root); + pcie_config_aspm_path(link); + mutex_unlock(&aspm_lock); + if (!locked) + up_read(&pci_bus_sem); +} + void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { struct pcie_link_state *link = pdev->link_state; @@ -1041,7 +1324,7 @@ static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev) return bridge->link_state; } -static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) +static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked) { struct pcie_link_state *link = pcie_aspm_get_link(pdev); @@ -1060,7 +1343,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) return -EPERM; } - if (sem) + if (!locked) down_read(&pci_bus_sem); mutex_lock(&aspm_lock); if (state & PCIE_LINK_STATE_L0S) @@ -1082,7 +1365,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) link->clkpm_disable = 1; pcie_set_clkpm(link, policy_to_clkpm_state(link)); mutex_unlock(&aspm_lock); - if (sem) + if (!locked) up_read(&pci_bus_sem); return 0; @@ -1090,7 +1373,9 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) int pci_disable_link_state_locked(struct pci_dev *pdev, int state) { - return __pci_disable_link_state(pdev, state, false); + lockdep_assert_held_read(&pci_bus_sem); + + return __pci_disable_link_state(pdev, state, true); } EXPORT_SYMBOL(pci_disable_link_state_locked); @@ -1105,21 +1390,11 @@ EXPORT_SYMBOL(pci_disable_link_state_locked); */ int pci_disable_link_state(struct pci_dev *pdev, int state) { - return __pci_disable_link_state(pdev, state, true); + return __pci_disable_link_state(pdev, state, false); } EXPORT_SYMBOL(pci_disable_link_state); -/** - * pci_enable_link_state - Clear and set the default device link state so that - * the link may be allowed to enter the specified states. Note that if the - * BIOS didn't grant ASPM control to the OS, this does nothing because we can't - * touch the LNKCTL register. Also note that this does not enable states - * disabled by pci_disable_link_state(). Return 0 or a negative errno. - * - * @pdev: PCI device - * @state: Mask of ASPM link states to enable - */ -int pci_enable_link_state(struct pci_dev *pdev, int state) +static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked) { struct pcie_link_state *link = pcie_aspm_get_link(pdev); @@ -1136,7 +1411,8 @@ int pci_enable_link_state(struct pci_dev *pdev, int state) return -EPERM; } - down_read(&pci_bus_sem); + if (!locked) + down_read(&pci_bus_sem); mutex_lock(&aspm_lock); link->aspm_default = 0; if (state & PCIE_LINK_STATE_L0S) @@ -1157,12 +1433,48 @@ int pci_enable_link_state(struct pci_dev *pdev, int state) link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0; pcie_set_clkpm(link, policy_to_clkpm_state(link)); mutex_unlock(&aspm_lock); - up_read(&pci_bus_sem); + if (!locked) + up_read(&pci_bus_sem); return 0; } + +/** + * pci_enable_link_state - Clear and set the default device link state so that + * the link may be allowed to enter the specified states. Note that if the + * BIOS didn't grant ASPM control to the OS, this does nothing because we can't + * touch the LNKCTL register. Also note that this does not enable states + * disabled by pci_disable_link_state(). Return 0 or a negative errno. + * + * @pdev: PCI device + * @state: Mask of ASPM link states to enable + */ +int pci_enable_link_state(struct pci_dev *pdev, int state) +{ + return __pci_enable_link_state(pdev, state, false); +} EXPORT_SYMBOL(pci_enable_link_state); +/** + * pci_enable_link_state_locked - Clear and set the default device link state + * so that the link may be allowed to enter the specified states. Note that if + * the BIOS didn't grant ASPM control to the OS, this does nothing because we + * can't touch the LNKCTL register. Also note that this does not enable states + * disabled by pci_disable_link_state(). Return 0 or a negative errno. + * + * @pdev: PCI device + * @state: Mask of ASPM link states to enable + * + * Context: Caller holds pci_bus_sem read lock. + */ +int pci_enable_link_state_locked(struct pci_dev *pdev, int state) +{ + lockdep_assert_held_read(&pci_bus_sem); + + return __pci_enable_link_state(pdev, state, true); +} +EXPORT_SYMBOL(pci_enable_link_state_locked); + static int pcie_aspm_set_policy(const char *val, const struct kernel_param *kp) { @@ -1399,3 +1711,5 @@ bool pcie_aspm_support_enabled(void) { return aspm_support_enabled; } + +#endif /* CONFIG_PCIEASPM */ diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index 94111e438241..a668820696dc 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -190,7 +190,8 @@ out: static void dpc_process_rp_pio_error(struct pci_dev *pdev) { u16 cap = pdev->dpc_cap, dpc_status, first_error; - u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix; + u32 status, mask, sev, syserr, exc, log, prefix; + struct pcie_tlp_log tlp_log; int i; pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status); @@ -216,16 +217,9 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev) if (pdev->dpc_rp_log_size < 4) goto clear_status; - pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, - &dw0); - pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4, - &dw1); - pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8, - &dw2); - pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12, - &dw3); + pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &tlp_log); pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n", - dw0, dw1, dw2, dw3); + tlp_log.dw[0], tlp_log.dw[1], tlp_log.dw[2], tlp_log.dw[3]); if (pdev->dpc_rp_log_size < 5) goto clear_status; @@ -234,7 +228,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev) for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) { pci_read_config_dword(pdev, - cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix); + cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix); pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); } clear_status: @@ -303,10 +297,70 @@ void dpc_process_error(struct pci_dev *pdev) } } +static void pci_clear_surpdn_errors(struct pci_dev *pdev) +{ + if (pdev->dpc_rp_extensions) + pci_write_config_dword(pdev, pdev->dpc_cap + + PCI_EXP_DPC_RP_PIO_STATUS, ~0); + + /* + * In practice, Surprise Down errors have been observed to also set + * error bits in the Status Register as well as the Fatal Error + * Detected bit in the Device Status Register. + */ + pci_write_config_word(pdev, PCI_STATUS, 0xffff); + + pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_FED); +} + +static void dpc_handle_surprise_removal(struct pci_dev *pdev) +{ + if (!pcie_wait_for_link(pdev, false)) { + pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n"); + goto out; + } + + if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) + goto out; + + pci_aer_raw_clear_status(pdev); + pci_clear_surpdn_errors(pdev); + + pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, + PCI_EXP_DPC_STATUS_TRIGGER); + +out: + clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags); + wake_up_all(&dpc_completed_waitqueue); +} + +static bool dpc_is_surprise_removal(struct pci_dev *pdev) +{ + u16 status; + + if (!pdev->is_hotplug_bridge) + return false; + + if (pci_read_config_word(pdev, pdev->aer_cap + PCI_ERR_UNCOR_STATUS, + &status)) + return false; + + return status & PCI_ERR_UNC_SURPDN; +} + static irqreturn_t dpc_handler(int irq, void *context) { struct pci_dev *pdev = context; + /* + * According to PCIe r6.0 sec 6.7.6, errors are an expected side effect + * of async removal and should be ignored by software. + */ + if (dpc_is_surprise_removal(pdev)) { + dpc_handle_surprise_removal(pdev); + return IRQ_HANDLED; + } + dpc_process_error(pdev); /* We configure DPC so it only triggers on ERR_FATAL */ diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 59c90d04a609..705893b5f7b0 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -13,6 +13,7 @@ #define dev_fmt(fmt) "AER: " fmt #include <linux/pci.h> +#include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev, return 0; } +static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data) +{ + pm_runtime_get_sync(&pdev->dev); + return 0; +} + +static int pci_pm_runtime_put(struct pci_dev *pdev, void *data) +{ + pm_runtime_put(&pdev->dev); + return 0; +} + static int report_frozen_detected(struct pci_dev *dev, void *data) { return report_error_detected(dev, pci_channel_io_frozen, data); @@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, else bridge = pci_upstream_bridge(dev); + pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL); + pci_dbg(bridge, "broadcast error_detected message\n"); if (state == pci_channel_io_frozen) { pci_walk_bridge(bridge, report_frozen_detected, &status); @@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, pcie_clear_device_status(dev); pci_aer_clear_nonfatal_status(dev); } + + pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); + pci_info(bridge, "device recovery successful\n"); return status; failed: + pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); + pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); /* TODO: Should kernel panic here? */ diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 1f3803bde7ee..12c89ea0313b 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -96,7 +96,7 @@ struct pcie_port_service_driver { int pcie_port_service_register(struct pcie_port_service_driver *new); void pcie_port_service_unregister(struct pcie_port_service_driver *new); -extern struct bus_type pcie_port_bus_type; +extern const struct bus_type pcie_port_bus_type; struct pci_dev; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ed6b7f48736a..1325fbae2f28 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -180,6 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, u64 l64, sz64, mask64; u16 orig_cmd; struct pci_bus_region region, inverted_region; + const char *res_name = pci_resource_name(dev, res - dev->resource); mask = type ? PCI_ROM_ADDRESS_MASK : ~0; @@ -254,8 +255,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, sz64 = pci_size(l64, sz64, mask64); if (!sz64) { - pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n", - pos); + pci_info(dev, FW_BUG "%s: invalid; can't size\n", res_name); goto fail; } @@ -265,8 +265,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; res->start = 0; res->end = 0; - pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", - pos, (unsigned long long)sz64); + pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n", + res_name, (unsigned long long)sz64); goto out; } @@ -275,8 +275,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags |= IORESOURCE_UNSET; res->start = 0; res->end = sz64 - 1; - pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", - pos, (unsigned long long)l64); + pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n", + res_name, (unsigned long long)l64); goto out; } } @@ -302,8 +302,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags |= IORESOURCE_UNSET; res->start = 0; res->end = region.end - region.start; - pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n", - pos, (unsigned long long)region.start); + pci_info(dev, "%s: initial BAR value %#010llx invalid\n", + res_name, (unsigned long long)region.start); } goto out; @@ -313,7 +313,7 @@ fail: res->flags = 0; out: if (res->flags) - pci_info(dev, "reg 0x%x: %pR\n", pos, res); + pci_info(dev, "%s %pR\n", res_name, res); return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; } @@ -344,64 +344,12 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) } } -static void pci_read_bridge_windows(struct pci_dev *bridge) +static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res, + bool log) { - u16 io; - u32 pmem, tmp; - - pci_read_config_word(bridge, PCI_IO_BASE, &io); - if (!io) { - pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); - pci_read_config_word(bridge, PCI_IO_BASE, &io); - pci_write_config_word(bridge, PCI_IO_BASE, 0x0); - } - if (io) - bridge->io_window = 1; - - /* - * DECchip 21050 pass 2 errata: the bridge may miss an address - * disconnect boundary by one PCI data phase. Workaround: do not - * use prefetching on this device. - */ - if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) - return; - - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - if (!pmem) { - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, - 0xffe0fff0); - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); - } - if (!pmem) - return; - - bridge->pref_window = 1; - - if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { - - /* - * Bridge claims to have a 64-bit prefetchable memory - * window; verify that the upper bits are actually - * writable. - */ - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, - 0xffffffff); - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); - if (tmp) - bridge->pref_64_window = 1; - } -} - -static void pci_read_bridge_io(struct pci_bus *child) -{ - struct pci_dev *dev = child->self; u8 io_base_lo, io_limit_lo; unsigned long io_mask, io_granularity, base, limit; struct pci_bus_region region; - struct resource *res; io_mask = PCI_IO_RANGE_MASK; io_granularity = 0x1000; @@ -411,7 +359,6 @@ static void pci_read_bridge_io(struct pci_bus *child) io_granularity = 0x400; } - res = child->resource[0]; pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); base = (io_base_lo & io_mask) << 8; @@ -431,19 +378,18 @@ static void pci_read_bridge_io(struct pci_bus *child) region.start = base; region.end = limit + io_granularity - 1; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, " bridge window %pR\n", res); + if (log) + pci_info(dev, " bridge window %pR\n", res); } } -static void pci_read_bridge_mmio(struct pci_bus *child) +static void pci_read_bridge_mmio(struct pci_dev *dev, struct resource *res, + bool log) { - struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; unsigned long base, limit; struct pci_bus_region region; - struct resource *res; - res = child->resource[1]; pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; @@ -453,20 +399,19 @@ static void pci_read_bridge_mmio(struct pci_bus *child) region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, " bridge window %pR\n", res); + if (log) + pci_info(dev, " bridge window %pR\n", res); } } -static void pci_read_bridge_mmio_pref(struct pci_bus *child) +static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res, + bool log) { - struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; u64 base64, limit64; pci_bus_addr_t base, limit; struct pci_bus_region region; - struct resource *res; - res = child->resource[2]; pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; @@ -506,10 +451,77 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, " bridge window %pR\n", res); + if (log) + pci_info(dev, " bridge window %pR\n", res); } } +static void pci_read_bridge_windows(struct pci_dev *bridge) +{ + u32 buses; + u16 io; + u32 pmem, tmp; + struct resource res; + + pci_read_config_dword(bridge, PCI_PRIMARY_BUS, &buses); + res.flags = IORESOURCE_BUS; + res.start = (buses >> 8) & 0xff; + res.end = (buses >> 16) & 0xff; + pci_info(bridge, "PCI bridge to %pR%s\n", &res, + bridge->transparent ? " (subtractive decode)" : ""); + + pci_read_config_word(bridge, PCI_IO_BASE, &io); + if (!io) { + pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); + pci_read_config_word(bridge, PCI_IO_BASE, &io); + pci_write_config_word(bridge, PCI_IO_BASE, 0x0); + } + if (io) { + bridge->io_window = 1; + pci_read_bridge_io(bridge, &res, true); + } + + pci_read_bridge_mmio(bridge, &res, true); + + /* + * DECchip 21050 pass 2 errata: the bridge may miss an address + * disconnect boundary by one PCI data phase. Workaround: do not + * use prefetching on this device. + */ + if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) + return; + + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + if (!pmem) { + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, + 0xffe0fff0); + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); + } + if (!pmem) + return; + + bridge->pref_window = 1; + + if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { + + /* + * Bridge claims to have a 64-bit prefetchable memory + * window; verify that the upper bits are actually + * writable. + */ + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, + 0xffffffff); + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); + if (tmp) + bridge->pref_64_window = 1; + } + + pci_read_bridge_mmio_pref(bridge, &res, true); +} + void pci_read_bridge_bases(struct pci_bus *child) { struct pci_dev *dev = child->self; @@ -527,9 +539,9 @@ void pci_read_bridge_bases(struct pci_bus *child) for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; - pci_read_bridge_io(child); - pci_read_bridge_mmio(child); - pci_read_bridge_mmio_pref(child); + pci_read_bridge_io(child->self, child->resource[0], false); + pci_read_bridge_mmio(child->self, child->resource[1], false); + pci_read_bridge_mmio_pref(child->self, child->resource[2], false); if (dev->transparent) { pci_bus_for_each_resource(child->parent, res) { @@ -1817,6 +1829,43 @@ static void early_dump_pci_device(struct pci_dev *pdev) value, 256, false); } +static const char *pci_type_str(struct pci_dev *dev) +{ + static const char * const str[] = { + "PCIe Endpoint", + "PCIe Legacy Endpoint", + "PCIe unknown", + "PCIe unknown", + "PCIe Root Port", + "PCIe Switch Upstream Port", + "PCIe Switch Downstream Port", + "PCIe to PCI/PCI-X bridge", + "PCI/PCI-X to PCIe bridge", + "PCIe Root Complex Integrated Endpoint", + "PCIe Root Complex Event Collector", + }; + int type; + + if (pci_is_pcie(dev)) { + type = pci_pcie_type(dev); + if (type < ARRAY_SIZE(str)) + return str[type]; + + return "PCIe unknown"; + } + + switch (dev->hdr_type) { + case PCI_HEADER_TYPE_NORMAL: + return "conventional PCI endpoint"; + case PCI_HEADER_TYPE_BRIDGE: + return "conventional PCI bridge"; + case PCI_HEADER_TYPE_CARDBUS: + return "CardBus bridge"; + default: + return "conventional PCI"; + } +} + /** * pci_setup_device - Fill in class and map information of a device * @dev: the device structure to fill @@ -1887,8 +1936,9 @@ int pci_setup_device(struct pci_dev *dev) pci_set_removable(dev); - pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", - dev->vendor, dev->device, dev->hdr_type, dev->class); + pci_info(dev, "[%04x:%04x] type %02x class %#08x %s\n", + dev->vendor, dev->device, dev->hdr_type, dev->class, + pci_type_str(dev)); /* Device class may be changed after fixup */ class = dev->class >> 8; @@ -1929,14 +1979,14 @@ int pci_setup_device(struct pci_dev *dev) res = &dev->resource[0]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n", + pci_info(dev, "BAR 0 %pR: legacy IDE quirk\n", res); region.start = 0x3F6; region.end = 0x3F6; res = &dev->resource[1]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n", + pci_info(dev, "BAR 1 %pR: legacy IDE quirk\n", res); } if ((progif & 4) == 0) { @@ -1945,14 +1995,14 @@ int pci_setup_device(struct pci_dev *dev) res = &dev->resource[2]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n", + pci_info(dev, "BAR 2 %pR: legacy IDE quirk\n", res); region.start = 0x376; region.end = 0x376; res = &dev->resource[3]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n", + pci_info(dev, "BAR 3 %pR: legacy IDE quirk\n", res); } } @@ -2159,67 +2209,6 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev) } } -static void pci_configure_ltr(struct pci_dev *dev) -{ -#ifdef CONFIG_PCIEASPM - struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); - struct pci_dev *bridge; - u32 cap, ctl; - - if (!pci_is_pcie(dev)) - return; - - /* Read L1 PM substate capabilities */ - dev->l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS); - - pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); - if (!(cap & PCI_EXP_DEVCAP2_LTR)) - return; - - pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl); - if (ctl & PCI_EXP_DEVCTL2_LTR_EN) { - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) { - dev->ltr_path = 1; - return; - } - - bridge = pci_upstream_bridge(dev); - if (bridge && bridge->ltr_path) - dev->ltr_path = 1; - - return; - } - - if (!host->native_ltr) - return; - - /* - * Software must not enable LTR in an Endpoint unless the Root - * Complex and all intermediate Switches indicate support for LTR. - * PCIe r4.0, sec 6.18. - */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) { - pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_LTR_EN); - dev->ltr_path = 1; - return; - } - - /* - * If we're configuring a hot-added device, LTR was likely - * disabled in the upstream bridge, so re-enable it before enabling - * it in the new device. - */ - bridge = pci_upstream_bridge(dev); - if (bridge && bridge->ltr_path) { - pci_bridge_reconfigure_ltr(dev); - pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_LTR_EN); - dev->ltr_path = 1; - } -#endif -} - static void pci_configure_eetlp_prefix(struct pci_dev *dev) { #ifdef CONFIG_PCI_PASID @@ -2270,6 +2259,7 @@ static void pci_configure_device(struct pci_dev *dev) pci_configure_extended_tags(dev, NULL); pci_configure_relaxed_ordering(dev); pci_configure_ltr(dev); + pci_configure_aspm_l1ss(dev); pci_configure_eetlp_prefix(dev); pci_configure_serr(dev); @@ -2307,6 +2297,10 @@ static void pci_release_dev(struct device *dev) kfree(pci_dev); } +static const struct device_type pci_dev_type = { + .groups = pci_dev_attr_groups, +}; + struct pci_dev *pci_alloc_dev(struct pci_bus *bus) { struct pci_dev *dev; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ea476252280a..eff7f5df08e2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -570,13 +570,14 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev) for (i = 0; i < PCI_STD_NUM_BARS; i++) { struct resource *r = &dev->resource[i]; + const char *r_name = pci_resource_name(dev, i); if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { r->end = PAGE_SIZE - 1; r->start = 0; r->flags |= IORESOURCE_UNSET; - pci_info(dev, "expanded BAR %d to page size: %pR\n", - i, r); + pci_info(dev, "%s %pR: expanded to page size\n", + r_name, r); } } } @@ -605,6 +606,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size, u32 region; struct pci_bus_region bus_region; struct resource *res = dev->resource + pos; + const char *res_name = pci_resource_name(dev, pos); pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion); @@ -622,8 +624,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size, bus_region.end = region + size - 1; pcibios_bus_to_resource(dev->bus, res, &bus_region); - pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", - name, PCI_BASE_ADDRESS_0 + (pos << 2), res); + pci_info(dev, FW_BUG "%s %pR: %s quirk\n", res_name, res, name); } /* @@ -670,6 +671,12 @@ static void quirk_io_region(struct pci_dev *dev, int port, bus_region.end = region + size - 1; pcibios_bus_to_resource(dev->bus, res, &bus_region); + /* + * "res" is typically a bridge window resource that's not being + * used for a bridge window, so it's just a place to stash this + * non-standard resource. Printing "nr" or pci_resource_name() of + * it doesn't really make sense. + */ if (!pci_claim_resource(dev, nr)) pci_info(dev, "quirk: %pR claimed by %s\n", res, name); } @@ -702,10 +709,13 @@ static void quirk_amd_dwc_class(struct pci_dev *pdev) { u32 class = pdev->class; - /* Use "USB Device (not host controller)" class */ - pdev->class = PCI_CLASS_SERIAL_USB_DEVICE; - pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n", - class, pdev->class); + if (class != PCI_CLASS_SERIAL_USB_DEVICE) { + /* Use "USB Device (not host controller)" class */ + pdev->class = PCI_CLASS_SERIAL_USB_DEVICE; + pci_info(pdev, + "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n", + class, pdev->class); + } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB, quirk_amd_dwc_class); @@ -3787,6 +3797,19 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset); /* + * Spectrum-{1,2,3,4} devices report that a D3hot->D0 transition causes a reset + * (i.e., they advertise NoSoftRst-). However, this transition does not have + * any effect on the device: It continues to be operational and network ports + * remain up. Advertising this support makes it seem as if a PM reset is viable + * for these devices. Mark it as unavailable to skip it when testing reset + * methods. + */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcb84, quirk_no_pm_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf6c, quirk_no_pm_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf70, quirk_no_pm_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf80, quirk_no_pm_reset); + +/* * Thunderbolt controllers with broken MSI hotplug signaling: * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part * of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge). @@ -4699,17 +4722,21 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) * But the implementation could block peer-to-peer transactions between them * and provide ACS-like functionality. */ -static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags) +static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags) { if (!pci_is_pcie(dev) || ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) return -ENOTTY; + /* + * Future Zhaoxin Root Ports and Switch Downstream Ports will + * implement ACS capability in accordance with the PCIe Spec. + */ switch (dev->device) { case 0x0710 ... 0x071e: case 0x0721: - case 0x0723 ... 0x0732: + case 0x0723 ... 0x0752: return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } @@ -5500,6 +5527,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev) pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL); } +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags); @@ -6198,6 +6226,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size); #endif /* diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index fd74f1c99dba..909e6a7c3cc3 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -213,6 +213,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head, struct list_head *head) { struct resource *res; + const char *res_name; struct pci_dev_resource *add_res, *tmp; struct pci_dev_resource *dev_res; resource_size_t add_size, align; @@ -222,6 +223,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head, bool found_match = false; res = add_res->res; + /* Skip resource that has been reset */ if (!res->flags) goto out; @@ -237,6 +239,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head, continue; idx = res - &add_res->dev->resource[0]; + res_name = pci_resource_name(add_res->dev, idx); add_size = add_res->add_size; align = add_res->min_align; if (!resource_size(res)) { @@ -249,9 +252,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head, (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); if (pci_reassign_resource(add_res->dev, idx, add_size, align)) - pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n", - (unsigned long long) add_size, idx, - res); + pci_info(add_res->dev, "%s %pR: failed to add %llx\n", + res_name, res, + (unsigned long long) add_size); } out: list_del(&add_res->list); @@ -571,6 +574,7 @@ EXPORT_SYMBOL(pci_setup_cardbus); static void pci_setup_bridge_io(struct pci_dev *bridge) { struct resource *res; + const char *res_name; struct pci_bus_region region; unsigned long io_mask; u8 io_base_lo, io_limit_lo; @@ -583,6 +587,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) /* Set up the top and bottom of the PCI I/O segment for this bus */ res = &bridge->resource[PCI_BRIDGE_IO_WINDOW]; + res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW); pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_IO) { pci_read_config_word(bridge, PCI_IO_BASE, &l); @@ -591,7 +596,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) l = ((u16) io_limit_lo << 8) | io_base_lo; /* Set up upper 16 bits of I/O base/limit */ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); - pci_info(bridge, " bridge window %pR\n", res); + pci_info(bridge, " %s %pR\n", res_name, res); } else { /* Clear upper 16 bits of I/O base/limit */ io_upper16 = 0; @@ -608,16 +613,18 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) static void pci_setup_bridge_mmio(struct pci_dev *bridge) { struct resource *res; + const char *res_name; struct pci_bus_region region; u32 l; /* Set up the top and bottom of the PCI Memory segment for this bus */ res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW]; + res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW); pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_MEM) { l = (region.start >> 16) & 0xfff0; l |= region.end & 0xfff00000; - pci_info(bridge, " bridge window %pR\n", res); + pci_info(bridge, " %s %pR\n", res_name, res); } else { l = 0x0000fff0; } @@ -627,6 +634,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge) static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) { struct resource *res; + const char *res_name; struct pci_bus_region region; u32 l, bu, lu; @@ -640,6 +648,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) /* Set up PREF base/limit */ bu = lu = 0; res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; + res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW); pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_PREFETCH) { l = (region.start >> 16) & 0xfff0; @@ -648,7 +657,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) bu = upper_32_bits(region.start); lu = upper_32_bits(region.end); } - pci_info(bridge, " bridge window %pR\n", res); + pci_info(bridge, " %s %pR\n", res_name, res); } else { l = 0x0000fff0; } @@ -1013,6 +1022,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, int i; pci_dev_for_each_resource(dev, r, i) { + const char *r_name = pci_resource_name(dev, i); resource_size_t r_size; if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) || @@ -1043,8 +1053,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (order < 0) order = 0; if (order >= ARRAY_SIZE(aligns)) { - pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n", - i, r, (unsigned long long) align); + pci_warn(dev, "%s %pR: disabling; bad alignment %#llx\n", + r_name, r, (unsigned long long) align); r->flags = 0; continue; } @@ -2235,6 +2245,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END; i++) { struct resource *res = &bridge->resource[i]; + const char *res_name = pci_resource_name(bridge, i); if ((res->flags ^ type) & PCI_RES_TYPE_MASK) continue; @@ -2247,8 +2258,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) if (ret) goto cleanup; - pci_info(bridge, "BAR %d: releasing %pR\n", - i, res); + pci_info(bridge, "%s %pR: releasing\n", res_name, res); if (res->parent) release_resource(res); diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c deleted file mode 100644 index cc7d26b015f3..000000000000 --- a/drivers/pci/setup-irq.c +++ /dev/null @@ -1,64 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Support routines for initializing a PCI subsystem - * - * Extruded from code written by - * Dave Rusling (david.rusling@reo.mts.dec.com) - * David Mosberger (davidm@cs.arizona.edu) - * David Miller (davem@redhat.com) - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/cache.h> -#include "pci.h" - -void pci_assign_irq(struct pci_dev *dev) -{ - u8 pin; - u8 slot = -1; - int irq = 0; - struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus); - - if (!(hbrg->map_irq)) { - pci_dbg(dev, "runtime IRQ mapping not provided by arch\n"); - return; - } - - /* - * If this device is not on the primary bus, we need to figure out - * which interrupt pin it will come in on. We know which slot it - * will come in on because that slot is where the bridge is. Each - * time the interrupt line passes through a PCI-PCI bridge we must - * apply the swizzle function. - */ - pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); - /* Cope with illegal. */ - if (pin > 4) - pin = 1; - - if (pin) { - /* Follow the chain of bridges, swizzling as we go. */ - if (hbrg->swizzle_irq) - slot = (*(hbrg->swizzle_irq))(dev, &pin); - - /* - * If a swizzling function is not used, map_irq() must - * ignore slot. - */ - irq = (*(hbrg->map_irq))(dev, slot, pin); - if (irq == -1) - irq = 0; - } - dev->irq = irq; - - pci_dbg(dev, "assign IRQ: got %d\n", dev->irq); - - /* - * Always tell the device, so the driver knows what is the real IRQ - * to use; the device does not use it. - */ - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); -} diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index ceaa69491f5e..c6d933ddfd46 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -30,6 +30,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) u32 new, check, mask; int reg; struct resource *res = dev->resource + resno; + const char *res_name = pci_resource_name(dev, resno); /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ if (dev->is_virtfn) @@ -104,8 +105,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) pci_read_config_dword(dev, reg, &check); if ((new ^ check) & mask) { - pci_err(dev, "BAR %d: error updating (%#010x != %#010x)\n", - resno, new, check); + pci_err(dev, "%s: error updating (%#010x != %#010x)\n", + res_name, new, check); } if (res->flags & IORESOURCE_MEM_64) { @@ -113,8 +114,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) pci_write_config_dword(dev, reg + 4, new); pci_read_config_dword(dev, reg + 4, &check); if (check != new) { - pci_err(dev, "BAR %d: error updating (high %#010x != %#010x)\n", - resno, new, check); + pci_err(dev, "%s: error updating (high %#010x != %#010x)\n", + res_name, new, check); } } @@ -135,11 +136,12 @@ void pci_update_resource(struct pci_dev *dev, int resno) int pci_claim_resource(struct pci_dev *dev, int resource) { struct resource *res = &dev->resource[resource]; + const char *res_name = pci_resource_name(dev, resource); struct resource *root, *conflict; if (res->flags & IORESOURCE_UNSET) { - pci_info(dev, "can't claim BAR %d %pR: no address assigned\n", - resource, res); + pci_info(dev, "%s %pR: can't claim; no address assigned\n", + res_name, res); return -EINVAL; } @@ -153,16 +155,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource) root = pci_find_parent_resource(dev, res); if (!root) { - pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n", - resource, res); + pci_info(dev, "%s %pR: can't claim; no compatible bridge window\n", + res_name, res); res->flags |= IORESOURCE_UNSET; return -EINVAL; } conflict = request_resource_conflict(root, res); if (conflict) { - pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n", - resource, res, conflict->name, conflict); + pci_info(dev, "%s %pR: can't claim; address conflict with %s %pR\n", + res_name, res, conflict->name, conflict); res->flags |= IORESOURCE_UNSET; return -EBUSY; } @@ -201,6 +203,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, { struct resource *root, *conflict; resource_size_t fw_addr, start, end; + const char *res_name = pci_resource_name(dev, resno); fw_addr = pcibios_retrieve_fw_addr(dev, resno); if (!fw_addr) @@ -231,12 +234,11 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, root = &iomem_resource; } - pci_info(dev, "BAR %d: trying firmware assignment %pR\n", - resno, res); + pci_info(dev, "%s: trying firmware assignment %pR\n", res_name, res); conflict = request_resource_conflict(root, res); if (conflict) { - pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n", - resno, res, conflict->name, conflict); + pci_info(dev, "%s %pR: conflicts with %s %pR\n", res_name, res, + conflict->name, conflict); res->start = start; res->end = end; res->flags |= IORESOURCE_UNSET; @@ -325,6 +327,7 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno, int pci_assign_resource(struct pci_dev *dev, int resno) { struct resource *res = dev->resource + resno; + const char *res_name = pci_resource_name(dev, resno); resource_size_t align, size; int ret; @@ -334,8 +337,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno) res->flags |= IORESOURCE_UNSET; align = pci_resource_alignment(dev, res); if (!align) { - pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n", - resno, res); + pci_info(dev, "%s %pR: can't assign; bogus alignment\n", + res_name, res); return -EINVAL; } @@ -348,18 +351,18 @@ int pci_assign_resource(struct pci_dev *dev, int resno) * working, which is better than just leaving it disabled. */ if (ret < 0) { - pci_info(dev, "BAR %d: no space for %pR\n", resno, res); + pci_info(dev, "%s %pR: can't assign; no space\n", res_name, res); ret = pci_revert_fw_address(res, dev, resno, size); } if (ret < 0) { - pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res); + pci_info(dev, "%s %pR: failed to assign\n", res_name, res); return ret; } res->flags &= ~IORESOURCE_UNSET; res->flags &= ~IORESOURCE_STARTALIGN; - pci_info(dev, "BAR %d: assigned %pR\n", resno, res); + pci_info(dev, "%s %pR: assigned\n", res_name, res); if (resno < PCI_BRIDGE_RESOURCES) pci_update_resource(dev, resno); @@ -367,10 +370,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno) } EXPORT_SYMBOL(pci_assign_resource); -int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize, - resource_size_t min_align) +int pci_reassign_resource(struct pci_dev *dev, int resno, + resource_size_t addsize, resource_size_t min_align) { struct resource *res = dev->resource + resno; + const char *res_name = pci_resource_name(dev, resno); unsigned long flags; resource_size_t new_size; int ret; @@ -381,8 +385,8 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz flags = res->flags; res->flags |= IORESOURCE_UNSET; if (!res->parent) { - pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n", - resno, res); + pci_info(dev, "%s %pR: can't reassign; unassigned resource\n", + res_name, res); return -EINVAL; } @@ -391,15 +395,15 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz ret = _pci_assign_resource(dev, resno, new_size, min_align); if (ret) { res->flags = flags; - pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n", - resno, res, (unsigned long long) addsize); + pci_info(dev, "%s %pR: failed to expand by %#llx\n", + res_name, res, (unsigned long long) addsize); return ret; } res->flags &= ~IORESOURCE_UNSET; res->flags &= ~IORESOURCE_STARTALIGN; - pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n", - resno, res, (unsigned long long) addsize); + pci_info(dev, "%s %pR: reassigned; expanded by %#llx\n", + res_name, res, (unsigned long long) addsize); if (resno < PCI_BRIDGE_RESOURCES) pci_update_resource(dev, resno); @@ -409,8 +413,9 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz void pci_release_resource(struct pci_dev *dev, int resno) { struct resource *res = dev->resource + resno; + const char *res_name = pci_resource_name(dev, resno); - pci_info(dev, "BAR %d: releasing %pR\n", resno, res); + pci_info(dev, "%s %pR: releasing\n", res_name, res); if (!res->parent) return; @@ -480,6 +485,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask) u16 cmd, old_cmd; int i; struct resource *r; + const char *r_name; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; @@ -488,6 +494,8 @@ int pci_enable_resources(struct pci_dev *dev, int mask) if (!(mask & (1 << i))) continue; + r_name = pci_resource_name(dev, i); + if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) continue; if ((i == PCI_ROM_RESOURCE) && @@ -495,14 +503,14 @@ int pci_enable_resources(struct pci_dev *dev, int mask) continue; if (r->flags & IORESOURCE_UNSET) { - pci_err(dev, "can't enable device: BAR %d %pR not assigned\n", - i, r); + pci_err(dev, "%s %pR: not assigned; can't enable device\n", + r_name, r); return -EINVAL; } if (!r->parent) { - pci_err(dev, "can't enable device: BAR %d %pR not claimed\n", - i, r); + pci_err(dev, "%s %pR: not claimed; can't enable device\n", + r_name, r); return -EINVAL; } diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 5b921387eca6..5a4adf6c04cf 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -1308,13 +1308,6 @@ static void stdev_release(struct device *dev) { struct switchtec_dev *stdev = to_stdev(dev); - if (stdev->dma_mrpc) { - iowrite32(0, &stdev->mmio_mrpc->dma_en); - flush_wc_buf(stdev); - writeq(0, &stdev->mmio_mrpc->dma_addr); - dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc), - stdev->dma_mrpc, stdev->dma_mrpc_dma_addr); - } kfree(stdev); } @@ -1358,7 +1351,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev) return ERR_PTR(-ENOMEM); stdev->alive = true; - stdev->pdev = pdev; + stdev->pdev = pci_dev_get(pdev); INIT_LIST_HEAD(&stdev->mrpc_queue); mutex_init(&stdev->mrpc_mutex); stdev->mrpc_busy = 0; @@ -1391,6 +1384,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev) return stdev; err_put: + pci_dev_put(stdev->pdev); put_device(&stdev->dev); return ERR_PTR(rc); } @@ -1644,6 +1638,18 @@ static int switchtec_init_pci(struct switchtec_dev *stdev, return 0; } +static void switchtec_exit_pci(struct switchtec_dev *stdev) +{ + if (stdev->dma_mrpc) { + iowrite32(0, &stdev->mmio_mrpc->dma_en); + flush_wc_buf(stdev); + writeq(0, &stdev->mmio_mrpc->dma_addr); + dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc), + stdev->dma_mrpc, stdev->dma_mrpc_dma_addr); + stdev->dma_mrpc = NULL; + } +} + static int switchtec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -1666,7 +1672,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev, rc = switchtec_init_isr(stdev); if (rc) { dev_err(&stdev->dev, "failed to init isr.\n"); - goto err_put; + goto err_exit_pci; } iowrite32(SWITCHTEC_EVENT_CLEAR | @@ -1687,6 +1693,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev, err_devadd: stdev_kill(stdev); +err_exit_pci: + switchtec_exit_pci(stdev); err_put: ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt)); put_device(&stdev->dev); @@ -1703,6 +1711,9 @@ static void switchtec_pci_remove(struct pci_dev *pdev) ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt)); dev_info(&stdev->dev, "unregistered.\n"); stdev_kill(stdev); + switchtec_exit_pci(stdev); + pci_dev_put(stdev->pdev); + stdev->pdev = NULL; put_device(&stdev->dev); } |