diff options
Diffstat (limited to 'drivers/pci/controller/cadence')
| -rw-r--r-- | drivers/pci/controller/cadence/Kconfig | 45 | ||||
| -rw-r--r-- | drivers/pci/controller/cadence/Makefile | 5 | ||||
| -rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence-ep.c | 479 | ||||
| -rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence-host.c | 281 | ||||
| -rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence-plat.c | 174 | ||||
| -rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence.c | 253 | ||||
| -rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence.h | 399 | 
7 files changed, 1636 insertions, 0 deletions
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig new file mode 100644 index 000000000000..b76b3cf55ce5 --- /dev/null +++ b/drivers/pci/controller/cadence/Kconfig @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "Cadence PCIe controllers support" +	depends on PCI + +config PCIE_CADENCE +	bool + +config PCIE_CADENCE_HOST +	bool +	depends on OF +	select IRQ_DOMAIN +	select PCIE_CADENCE + +config PCIE_CADENCE_EP +	bool +	depends on OF +	depends on PCI_ENDPOINT +	select PCIE_CADENCE + +config PCIE_CADENCE_PLAT +	bool + +config PCIE_CADENCE_PLAT_HOST +	bool "Cadence PCIe platform host controller" +	depends on OF +	select PCIE_CADENCE_HOST +	select PCIE_CADENCE_PLAT +	help +	  Say Y here if you want to support the Cadence PCIe platform controller in +	  host mode. This PCIe controller may be embedded into many different +	  vendors SoCs. + +config PCIE_CADENCE_PLAT_EP +	bool "Cadence PCIe platform endpoint controller" +	depends on OF +	depends on PCI_ENDPOINT +	select PCIE_CADENCE_EP +	select PCIE_CADENCE_PLAT +	help +	  Say Y here if you want to support the Cadence PCIe  platform controller in +	  endpoint mode. This PCIe controller may be embedded into many +	  different vendors SoCs. + +endmenu diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile new file mode 100644 index 000000000000..232a3f20876a --- /dev/null +++ b/drivers/pci/controller/cadence/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o +obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o +obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o +obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c new file mode 100644 index 000000000000..1c173dad67d1 --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Cadence +// Cadence PCIe endpoint controller driver. +// Author: Cyrille Pitchen <[email protected]> + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/pci-epc.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/sizes.h> + +#include "pcie-cadence.h" + +#define CDNS_PCIE_EP_MIN_APERTURE		128	/* 128 bytes */ +#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE		0x1 +#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY	0x3 + +static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, +				     struct pci_epf_header *hdr) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; + +	cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); +	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); +	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); +	cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, +			       hdr->subclass_code | hdr->baseclass_code << 8); +	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, +			       hdr->cache_line_size); +	cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); +	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); + +	/* +	 * Vendor ID can only be modified from function 0, all other functions +	 * use the same vendor ID as function 0. +	 */ +	if (fn == 0) { +		/* Update the vendor IDs. */ +		u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | +			 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); + +		cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); +	} + +	return 0; +} + +static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, +				struct pci_epf_bar *epf_bar) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; +	dma_addr_t bar_phys = epf_bar->phys_addr; +	enum pci_barno bar = epf_bar->barno; +	int flags = epf_bar->flags; +	u32 addr0, addr1, reg, cfg, b, aperture, ctrl; +	u64 sz; + +	/* BAR size is 2^(aperture + 7) */ +	sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); +	/* +	 * roundup_pow_of_two() returns an unsigned long, which is not suited +	 * for 64bit values. +	 */ +	sz = 1ULL << fls64(sz - 1); +	aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ + +	if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { +		ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; +	} else { +		bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); +		bool is_64bits = sz > SZ_2G; + +		if (is_64bits && (bar & 1)) +			return -EINVAL; + +		if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) +			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; + +		if (is_64bits && is_prefetch) +			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; +		else if (is_prefetch) +			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; +		else if (is_64bits) +			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; +		else +			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; +	} + +	addr0 = lower_32_bits(bar_phys); +	addr1 = upper_32_bits(bar_phys); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), +			 addr0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), +			 addr1); + +	if (bar < BAR_4) { +		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); +		b = bar; +	} else { +		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); +		b = bar - BAR_4; +	} + +	cfg = cdns_pcie_readl(pcie, reg); +	cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | +		 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); +	cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | +		CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); +	cdns_pcie_writel(pcie, reg, cfg); + +	return 0; +} + +static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, +				   struct pci_epf_bar *epf_bar) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; +	enum pci_barno bar = epf_bar->barno; +	u32 reg, cfg, b, ctrl; + +	if (bar < BAR_4) { +		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); +		b = bar; +	} else { +		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); +		b = bar - BAR_4; +	} + +	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; +	cfg = cdns_pcie_readl(pcie, reg); +	cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | +		 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); +	cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); +	cdns_pcie_writel(pcie, reg, cfg); + +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); +} + +static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, +				 u64 pci_addr, size_t size) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; +	u32 r; + +	r = find_first_zero_bit(&ep->ob_region_map, +				sizeof(ep->ob_region_map) * BITS_PER_LONG); +	if (r >= ep->max_regions - 1) { +		dev_err(&epc->dev, "no free outbound region\n"); +		return -EINVAL; +	} + +	cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size); + +	set_bit(r, &ep->ob_region_map); +	ep->ob_addr[r] = addr; + +	return 0; +} + +static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, +				    phys_addr_t addr) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; +	u32 r; + +	for (r = 0; r < ep->max_regions - 1; r++) +		if (ep->ob_addr[r] == addr) +			break; + +	if (r == ep->max_regions - 1) +		return; + +	cdns_pcie_reset_outbound_region(pcie, r); + +	ep->ob_addr[r] = 0; +	clear_bit(r, &ep->ob_region_map); +} + +static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; +	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; +	u16 flags; + +	/* +	 * Set the Multiple Message Capable bitfield into the Message Control +	 * register. +	 */ +	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); +	flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); +	flags |= PCI_MSI_FLAGS_64BIT; +	flags &= ~PCI_MSI_FLAGS_MASKBIT; +	cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); + +	return 0; +} + +static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; +	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; +	u16 flags, mme; + +	/* Validate that the MSI feature is actually enabled. */ +	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); +	if (!(flags & PCI_MSI_FLAGS_ENABLE)) +		return -EINVAL; + +	/* +	 * Get the Multiple Message Enable bitfield from the Message Control +	 * register. +	 */ +	mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + +	return mme; +} + +static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, +				     u8 intx, bool is_asserted) +{ +	struct cdns_pcie *pcie = &ep->pcie; +	u32 offset; +	u16 status; +	u8 msg_code; + +	intx &= 3; + +	/* Set the outbound region if needed. */ +	if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || +		     ep->irq_pci_fn != fn)) { +		/* First region was reserved for IRQ writes. */ +		cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0, +							     ep->irq_phys_addr); +		ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; +		ep->irq_pci_fn = fn; +	} + +	if (is_asserted) { +		ep->irq_pending |= BIT(intx); +		msg_code = MSG_CODE_ASSERT_INTA + intx; +	} else { +		ep->irq_pending &= ~BIT(intx); +		msg_code = MSG_CODE_DEASSERT_INTA + intx; +	} + +	status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); +	if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { +		status ^= PCI_STATUS_INTERRUPT; +		cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); +	} + +	offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | +		 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | +		 CDNS_PCIE_MSG_NO_DATA; +	writel(0, ep->irq_cpu_addr + offset); +} + +static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx) +{ +	u16 cmd; + +	cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); +	if (cmd & PCI_COMMAND_INTX_DISABLE) +		return -EINVAL; + +	cdns_pcie_ep_assert_intx(ep, fn, intx, true); +	/* +	 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() +	 * from drivers/pci/dwc/pci-dra7xx.c +	 */ +	mdelay(1); +	cdns_pcie_ep_assert_intx(ep, fn, intx, false); +	return 0; +} + +static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, +				     u8 interrupt_num) +{ +	struct cdns_pcie *pcie = &ep->pcie; +	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; +	u16 flags, mme, data, data_mask; +	u8 msi_count; +	u64 pci_addr, pci_addr_mask = 0xff; + +	/* Check whether the MSI feature has been enabled by the PCI host. */ +	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); +	if (!(flags & PCI_MSI_FLAGS_ENABLE)) +		return -EINVAL; + +	/* Get the number of enabled MSIs */ +	mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; +	msi_count = 1 << mme; +	if (!interrupt_num || interrupt_num > msi_count) +		return -EINVAL; + +	/* Compute the data value to be written. */ +	data_mask = msi_count - 1; +	data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); +	data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); + +	/* Get the PCI address where to write the data into. */ +	pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); +	pci_addr <<= 32; +	pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); +	pci_addr &= GENMASK_ULL(63, 2); + +	/* Set the outbound region if needed. */ +	if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || +		     ep->irq_pci_fn != fn)) { +		/* First region was reserved for IRQ writes. */ +		cdns_pcie_set_outbound_region(pcie, fn, 0, +					      false, +					      ep->irq_phys_addr, +					      pci_addr & ~pci_addr_mask, +					      pci_addr_mask + 1); +		ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); +		ep->irq_pci_fn = fn; +	} +	writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); + +	return 0; +} + +static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, +				  enum pci_epc_irq_type type, +				  u16 interrupt_num) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + +	switch (type) { +	case PCI_EPC_IRQ_LEGACY: +		return cdns_pcie_ep_send_legacy_irq(ep, fn, 0); + +	case PCI_EPC_IRQ_MSI: +		return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); + +	default: +		break; +	} + +	return -EINVAL; +} + +static int cdns_pcie_ep_start(struct pci_epc *epc) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; +	struct pci_epf *epf; +	u32 cfg; + +	/* +	 * BIT(0) is hardwired to 1, hence function 0 is always enabled +	 * and can't be disabled anyway. +	 */ +	cfg = BIT(0); +	list_for_each_entry(epf, &epc->pci_epf, list) +		cfg |= BIT(epf->func_no); +	cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); + +	return 0; +} + +static const struct pci_epc_features cdns_pcie_epc_features = { +	.linkup_notifier = false, +	.msi_capable = true, +	.msix_capable = false, +}; + +static const struct pci_epc_features* +cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) +{ +	return &cdns_pcie_epc_features; +} + +static const struct pci_epc_ops cdns_pcie_epc_ops = { +	.write_header	= cdns_pcie_ep_write_header, +	.set_bar	= cdns_pcie_ep_set_bar, +	.clear_bar	= cdns_pcie_ep_clear_bar, +	.map_addr	= cdns_pcie_ep_map_addr, +	.unmap_addr	= cdns_pcie_ep_unmap_addr, +	.set_msi	= cdns_pcie_ep_set_msi, +	.get_msi	= cdns_pcie_ep_get_msi, +	.raise_irq	= cdns_pcie_ep_raise_irq, +	.start		= cdns_pcie_ep_start, +	.get_features	= cdns_pcie_ep_get_features, +}; + + +int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) +{ +	struct device *dev = ep->pcie.dev; +	struct platform_device *pdev = to_platform_device(dev); +	struct device_node *np = dev->of_node; +	struct cdns_pcie *pcie = &ep->pcie; +	struct resource *res; +	struct pci_epc *epc; +	int ret; + +	pcie->is_rc = false; + +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); +	pcie->reg_base = devm_ioremap_resource(dev, res); +	if (IS_ERR(pcie->reg_base)) { +		dev_err(dev, "missing \"reg\"\n"); +		return PTR_ERR(pcie->reg_base); +	} + +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); +	if (!res) { +		dev_err(dev, "missing \"mem\"\n"); +		return -EINVAL; +	} +	pcie->mem_res = res; + +	ret = of_property_read_u32(np, "cdns,max-outbound-regions", +				   &ep->max_regions); +	if (ret < 0) { +		dev_err(dev, "missing \"cdns,max-outbound-regions\"\n"); +		return ret; +	} +	ep->ob_addr = devm_kcalloc(dev, +				   ep->max_regions, sizeof(*ep->ob_addr), +				   GFP_KERNEL); +	if (!ep->ob_addr) +		return -ENOMEM; + +	/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ +	cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); + +	epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); +	if (IS_ERR(epc)) { +		dev_err(dev, "failed to create epc device\n"); +		ret = PTR_ERR(epc); +		goto err_init; +	} + +	epc_set_drvdata(epc, ep); + +	if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) +		epc->max_functions = 1; + +	ret = pci_epc_mem_init(epc, pcie->mem_res->start, +			       resource_size(pcie->mem_res)); +	if (ret < 0) { +		dev_err(dev, "failed to initialize the memory space\n"); +		goto err_init; +	} + +	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, +						  SZ_128K); +	if (!ep->irq_cpu_addr) { +		dev_err(dev, "failed to reserve memory space for MSI\n"); +		ret = -ENOMEM; +		goto free_epc_mem; +	} +	ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; +	/* Reserve region 0 for IRQs */ +	set_bit(0, &ep->ob_region_map); + +	return 0; + + free_epc_mem: +	pci_epc_mem_exit(epc); + + err_init: +	pm_runtime_put_sync(dev); + +	return ret; +} diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c new file mode 100644 index 000000000000..9b1c3966414b --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Cadence +// Cadence PCIe host controller driver. +// Author: Cyrille Pitchen <[email protected]> + +#include <linux/kernel.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "pcie-cadence.h" + +static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, +				      int where) +{ +	struct pci_host_bridge *bridge = pci_find_host_bridge(bus); +	struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); +	struct cdns_pcie *pcie = &rc->pcie; +	unsigned int busn = bus->number; +	u32 addr0, desc0; + +	if (busn == rc->bus_range->start) { +		/* +		 * Only the root port (devfn == 0) is connected to this bus. +		 * All other PCI devices are behind some bridge hence on another +		 * bus. +		 */ +		if (devfn) +			return NULL; + +		return pcie->reg_base + (where & 0xfff); +	} +	/* Check that the link is up */ +	if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1)) +		return NULL; +	/* Clear AXI link-down status */ +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0); + +	/* Update Output registers for AXI region 0. */ +	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) | +		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) | +		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0); + +	/* Configuration Type 0 or Type 1 access. */ +	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | +		CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); +	/* +	 * The bus number was already set once for all in desc1 by +	 * cdns_pcie_host_init_address_translation(). +	 */ +	if (busn == rc->bus_range->start + 1) +		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; +	else +		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1; +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0); + +	return rc->cfg_base + (where & 0xfff); +} + +static struct pci_ops cdns_pcie_host_ops = { +	.map_bus	= cdns_pci_map_bus, +	.read		= pci_generic_config_read, +	.write		= pci_generic_config_write, +}; + + +static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) +{ +	struct cdns_pcie *pcie = &rc->pcie; +	u32 value, ctrl; + +	/* +	 * Set the root complex BAR configuration register: +	 * - disable both BAR0 and BAR1. +	 * - enable Prefetchable Memory Base and Limit registers in type 1 +	 *   config space (64 bits). +	 * - enable IO Base and Limit registers in type 1 config +	 *   space (32 bits). +	 */ +	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; +	value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) | +		CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) | +		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE | +		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS | +		CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE | +		CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS; +	cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); + +	/* Set root port configuration space */ +	if (rc->vendor_id != 0xffff) +		cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); +	if (rc->device_id != 0xffff) +		cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); + +	cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0); +	cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0); +	cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); + +	return 0; +} + +static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) +{ +	struct cdns_pcie *pcie = &rc->pcie; +	struct resource *mem_res = pcie->mem_res; +	struct resource *bus_range = rc->bus_range; +	struct resource *cfg_res = rc->cfg_res; +	struct device *dev = pcie->dev; +	struct device_node *np = dev->of_node; +	struct of_pci_range_parser parser; +	struct of_pci_range range; +	u32 addr0, addr1, desc1; +	u64 cpu_addr; +	int r, err; + +	/* +	 * Reserve region 0 for PCI configure space accesses: +	 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by +	 * cdns_pci_map_bus(), other region registers are set here once for all. +	 */ +	addr1 = 0; /* Should be programmed to zero. */ +	desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); + +	cpu_addr = cfg_res->start - mem_res->start; +	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | +		(lower_32_bits(cpu_addr) & GENMASK(31, 8)); +	addr1 = upper_32_bits(cpu_addr); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1); + +	err = of_pci_range_parser_init(&parser, np); +	if (err) +		return err; + +	r = 1; +	for_each_of_pci_range(&parser, &range) { +		bool is_io; + +		if (r >= rc->max_regions) +			break; + +		if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) +			is_io = false; +		else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) +			is_io = true; +		else +			continue; + +		cdns_pcie_set_outbound_region(pcie, 0, r, is_io, +					      range.cpu_addr, +					      range.pci_addr, +					      range.size); +		r++; +	} + +	/* +	 * Set Root Port no BAR match Inbound Translation registers: +	 * needed for MSI and DMA. +	 * Root Port BAR0 and BAR1 are disabled, hence no need to set their +	 * inbound translation registers. +	 */ +	addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits); +	addr1 = 0; +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1); + +	return 0; +} + +static int cdns_pcie_host_init(struct device *dev, +			       struct list_head *resources, +			       struct cdns_pcie_rc *rc) +{ +	struct resource *bus_range = NULL; +	int err; + +	/* Parse our PCI ranges and request their resources */ +	err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range); +	if (err) +		return err; + +	rc->bus_range = bus_range; +	rc->pcie.bus = bus_range->start; + +	err = cdns_pcie_host_init_root_port(rc); +	if (err) +		goto err_out; + +	err = cdns_pcie_host_init_address_translation(rc); +	if (err) +		goto err_out; + +	return 0; + + err_out: +	pci_free_resource_list(resources); +	return err; +} + +int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) +{ +	struct device *dev = rc->pcie.dev; +	struct platform_device *pdev = to_platform_device(dev); +	struct device_node *np = dev->of_node; +	struct pci_host_bridge *bridge; +	struct list_head resources; +	struct cdns_pcie *pcie; +	struct resource *res; +	int ret; + +	bridge = pci_host_bridge_from_priv(rc); +	if (!bridge) +		return -ENOMEM; + +	pcie = &rc->pcie; +	pcie->is_rc = true; + +	rc->max_regions = 32; +	of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions); + +	rc->no_bar_nbits = 32; +	of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits); + +	rc->vendor_id = 0xffff; +	of_property_read_u16(np, "vendor-id", &rc->vendor_id); + +	rc->device_id = 0xffff; +	of_property_read_u16(np, "device-id", &rc->device_id); + +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); +	pcie->reg_base = devm_ioremap_resource(dev, res); +	if (IS_ERR(pcie->reg_base)) { +		dev_err(dev, "missing \"reg\"\n"); +		return PTR_ERR(pcie->reg_base); +	} + +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); +	rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); +	if (IS_ERR(rc->cfg_base)) { +		dev_err(dev, "missing \"cfg\"\n"); +		return PTR_ERR(rc->cfg_base); +	} +	rc->cfg_res = res; + +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); +	if (!res) { +		dev_err(dev, "missing \"mem\"\n"); +		return -EINVAL; +	} + +	pcie->mem_res = res; + +	ret = cdns_pcie_host_init(dev, &resources, rc); +	if (ret) +		goto err_init; + +	list_splice_init(&resources, &bridge->windows); +	bridge->dev.parent = dev; +	bridge->busnr = pcie->bus; +	bridge->ops = &cdns_pcie_host_ops; +	bridge->map_irq = of_irq_parse_and_map_pci; +	bridge->swizzle_irq = pci_common_swizzle; + +	ret = pci_host_probe(bridge); +	if (ret < 0) +		goto err_host_probe; + +	return 0; + + err_host_probe: +	pci_free_resource_list(&resources); + + err_init: +	pm_runtime_put_sync(dev); + +	return ret; +} diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c new file mode 100644 index 000000000000..f5c6bf6dfcb8 --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence PCIe platform  driver. + * + * Copyright (c) 2019, Cadence Design Systems + * Author: Tom Joseph <[email protected]> + */ +#include <linux/kernel.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/of_device.h> +#include "pcie-cadence.h" + +/** + * struct cdns_plat_pcie - private data for this PCIe platform driver + * @pcie: Cadence PCIe controller + * @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex, + *         if 0 it is in Endpoint mode. + */ +struct cdns_plat_pcie { +	struct cdns_pcie        *pcie; +	bool is_rc; +}; + +struct cdns_plat_pcie_of_data { +	bool is_rc; +}; + +static const struct of_device_id cdns_plat_pcie_of_match[]; + +static int cdns_plat_pcie_probe(struct platform_device *pdev) +{ +	const struct cdns_plat_pcie_of_data *data; +	struct cdns_plat_pcie *cdns_plat_pcie; +	const struct of_device_id *match; +	struct device *dev = &pdev->dev; +	struct pci_host_bridge *bridge; +	struct cdns_pcie_ep *ep; +	struct cdns_pcie_rc *rc; +	int phy_count; +	bool is_rc; +	int ret; + +	match = of_match_device(cdns_plat_pcie_of_match, dev); +	if (!match) +		return -EINVAL; + +	data = (struct cdns_plat_pcie_of_data *)match->data; +	is_rc = data->is_rc; + +	pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc); +	cdns_plat_pcie = devm_kzalloc(dev, sizeof(*cdns_plat_pcie), GFP_KERNEL); +	if (!cdns_plat_pcie) +		return -ENOMEM; + +	platform_set_drvdata(pdev, cdns_plat_pcie); +	if (is_rc) { +		if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_HOST)) +			return -ENODEV; + +		bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); +		if (!bridge) +			return -ENOMEM; + +		rc = pci_host_bridge_priv(bridge); +		rc->pcie.dev = dev; +		cdns_plat_pcie->pcie = &rc->pcie; +		cdns_plat_pcie->is_rc = is_rc; + +		ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie); +		if (ret) { +			dev_err(dev, "failed to init phy\n"); +			return ret; +		} +		pm_runtime_enable(dev); +		ret = pm_runtime_get_sync(dev); +		if (ret < 0) { +			dev_err(dev, "pm_runtime_get_sync() failed\n"); +			goto err_get_sync; +		} + +		ret = cdns_pcie_host_setup(rc); +		if (ret) +			goto err_init; +	} else { +		if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_EP)) +			return -ENODEV; + +		ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); +		if (!ep) +			return -ENOMEM; + +		ep->pcie.dev = dev; +		cdns_plat_pcie->pcie = &ep->pcie; +		cdns_plat_pcie->is_rc = is_rc; + +		ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie); +		if (ret) { +			dev_err(dev, "failed to init phy\n"); +			return ret; +		} + +		pm_runtime_enable(dev); +		ret = pm_runtime_get_sync(dev); +		if (ret < 0) { +			dev_err(dev, "pm_runtime_get_sync() failed\n"); +			goto err_get_sync; +		} + +		ret = cdns_pcie_ep_setup(ep); +		if (ret) +			goto err_init; +	} + + err_init: +	pm_runtime_put_sync(dev); + + err_get_sync: +	pm_runtime_disable(dev); +	cdns_pcie_disable_phy(cdns_plat_pcie->pcie); +	phy_count = cdns_plat_pcie->pcie->phy_count; +	while (phy_count--) +		device_link_del(cdns_plat_pcie->pcie->link[phy_count]); + +	return 0; +} + +static void cdns_plat_pcie_shutdown(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct cdns_pcie *pcie = dev_get_drvdata(dev); +	int ret; + +	ret = pm_runtime_put_sync(dev); +	if (ret < 0) +		dev_dbg(dev, "pm_runtime_put_sync failed\n"); + +	pm_runtime_disable(dev); + +	cdns_pcie_disable_phy(pcie); +} + +static const struct cdns_plat_pcie_of_data cdns_plat_pcie_host_of_data = { +	.is_rc = true, +}; + +static const struct cdns_plat_pcie_of_data cdns_plat_pcie_ep_of_data = { +	.is_rc = false, +}; + +static const struct of_device_id cdns_plat_pcie_of_match[] = { +	{ +		.compatible = "cdns,cdns-pcie-host", +		.data = &cdns_plat_pcie_host_of_data, +	}, +	{ +		.compatible = "cdns,cdns-pcie-ep", +		.data = &cdns_plat_pcie_ep_of_data, +	}, +	{}, +}; + +static struct platform_driver cdns_plat_pcie_driver = { +	.driver = { +		.name = "cdns-pcie", +		.of_match_table = cdns_plat_pcie_of_match, +		.pm	= &cdns_pcie_pm_ops, +	}, +	.probe = cdns_plat_pcie_probe, +	.shutdown = cdns_plat_pcie_shutdown, +}; +builtin_platform_driver(cdns_plat_pcie_driver); diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c new file mode 100644 index 000000000000..cd795f6fc1e2 --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Cadence +// Cadence PCIe controller driver. +// Author: Cyrille Pitchen <[email protected]> + +#include <linux/kernel.h> + +#include "pcie-cadence.h" + +void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, +				   u32 r, bool is_io, +				   u64 cpu_addr, u64 pci_addr, size_t size) +{ +	/* +	 * roundup_pow_of_two() returns an unsigned long, which is not suited +	 * for 64bit values. +	 */ +	u64 sz = 1ULL << fls64(size - 1); +	int nbits = ilog2(sz); +	u32 addr0, addr1, desc0, desc1; + +	if (nbits < 8) +		nbits = 8; + +	/* Set the PCI address */ +	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) | +		(lower_32_bits(pci_addr) & GENMASK(31, 8)); +	addr1 = upper_32_bits(pci_addr); + +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1); + +	/* Set the PCIe header descriptor */ +	if (is_io) +		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO; +	else +		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM; +	desc1 = 0; + +	/* +	 * Whatever Bit [23] is set or not inside DESC0 register of the outbound +	 * PCIe descriptor, the PCI function number must be set into +	 * Bits [26:24] of DESC0 anyway. +	 * +	 * In Root Complex mode, the function number is always 0 but in Endpoint +	 * mode, the PCIe controller may support more than one function. This +	 * function number needs to be set properly into the outbound PCIe +	 * descriptor. +	 * +	 * Besides, setting Bit [23] is mandatory when in Root Complex mode: +	 * then the driver must provide the bus, resp. device, number in +	 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function +	 * number, the device number is always 0 in Root Complex mode. +	 * +	 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence +	 * the PCIe controller will use the captured values for the bus and +	 * device numbers. +	 */ +	if (pcie->is_rc) { +		/* The device and function numbers are always 0. */ +		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | +			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); +		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); +	} else { +		/* +		 * Use captured values for bus and device numbers but still +		 * need to set the function number. +		 */ +		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); +	} + +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); + +	/* Set the CPU address */ +	cpu_addr -= pcie->mem_res->start; +	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | +		(lower_32_bits(cpu_addr) & GENMASK(31, 8)); +	addr1 = upper_32_bits(cpu_addr); + +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); +} + +void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, +						  u32 r, u64 cpu_addr) +{ +	u32 addr0, addr1, desc0, desc1; + +	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG; +	desc1 = 0; + +	/* See cdns_pcie_set_outbound_region() comments above. */ +	if (pcie->is_rc) { +		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | +			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); +		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); +	} else { +		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); +	} + +	/* Set the CPU address */ +	cpu_addr -= pcie->mem_res->start; +	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | +		(lower_32_bits(cpu_addr) & GENMASK(31, 8)); +	addr1 = upper_32_bits(cpu_addr); + +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); +} + +void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r) +{ +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); + +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0); + +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0); +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0); +} + +void cdns_pcie_disable_phy(struct cdns_pcie *pcie) +{ +	int i = pcie->phy_count; + +	while (i--) { +		phy_power_off(pcie->phy[i]); +		phy_exit(pcie->phy[i]); +	} +} + +int cdns_pcie_enable_phy(struct cdns_pcie *pcie) +{ +	int ret; +	int i; + +	for (i = 0; i < pcie->phy_count; i++) { +		ret = phy_init(pcie->phy[i]); +		if (ret < 0) +			goto err_phy; + +		ret = phy_power_on(pcie->phy[i]); +		if (ret < 0) { +			phy_exit(pcie->phy[i]); +			goto err_phy; +		} +	} + +	return 0; + +err_phy: +	while (--i >= 0) { +		phy_power_off(pcie->phy[i]); +		phy_exit(pcie->phy[i]); +	} + +	return ret; +} + +int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) +{ +	struct device_node *np = dev->of_node; +	int phy_count; +	struct phy **phy; +	struct device_link **link; +	int i; +	int ret; +	const char *name; + +	phy_count = of_property_count_strings(np, "phy-names"); +	if (phy_count < 1) { +		dev_err(dev, "no phy-names.  PHY will not be initialized\n"); +		pcie->phy_count = 0; +		return 0; +	} + +	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); +	if (!phy) +		return -ENOMEM; + +	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); +	if (!link) +		return -ENOMEM; + +	for (i = 0; i < phy_count; i++) { +		of_property_read_string_index(np, "phy-names", i, &name); +		phy[i] = devm_phy_get(dev, name); +		if (IS_ERR(phy[i])) { +			ret = PTR_ERR(phy[i]); +			goto err_phy; +		} +		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); +		if (!link[i]) { +			devm_phy_put(dev, phy[i]); +			ret = -EINVAL; +			goto err_phy; +		} +	} + +	pcie->phy_count = phy_count; +	pcie->phy = phy; +	pcie->link = link; + +	ret =  cdns_pcie_enable_phy(pcie); +	if (ret) +		goto err_phy; + +	return 0; + +err_phy: +	while (--i >= 0) { +		device_link_del(link[i]); +		devm_phy_put(dev, phy[i]); +	} + +	return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int cdns_pcie_suspend_noirq(struct device *dev) +{ +	struct cdns_pcie *pcie = dev_get_drvdata(dev); + +	cdns_pcie_disable_phy(pcie); + +	return 0; +} + +static int cdns_pcie_resume_noirq(struct device *dev) +{ +	struct cdns_pcie *pcie = dev_get_drvdata(dev); +	int ret; + +	ret = cdns_pcie_enable_phy(pcie); +	if (ret) { +		dev_err(dev, "failed to enable phy\n"); +		return ret; +	} + +	return 0; +} +#endif + +const struct dev_pm_ops cdns_pcie_pm_ops = { +	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, +				      cdns_pcie_resume_noirq) +}; diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h new file mode 100644 index 000000000000..a2b28b912ca4 --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -0,0 +1,399 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (c) 2017 Cadence +// Cadence PCIe controller driver. +// Author: Cyrille Pitchen <[email protected]> + +#ifndef _PCIE_CADENCE_H +#define _PCIE_CADENCE_H + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> + +/* + * Local Management Registers + */ +#define CDNS_PCIE_LM_BASE	0x00100000 + +/* Vendor ID Register */ +#define CDNS_PCIE_LM_ID		(CDNS_PCIE_LM_BASE + 0x0044) +#define  CDNS_PCIE_LM_ID_VENDOR_MASK	GENMASK(15, 0) +#define  CDNS_PCIE_LM_ID_VENDOR_SHIFT	0 +#define  CDNS_PCIE_LM_ID_VENDOR(vid) \ +	(((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK) +#define  CDNS_PCIE_LM_ID_SUBSYS_MASK	GENMASK(31, 16) +#define  CDNS_PCIE_LM_ID_SUBSYS_SHIFT	16 +#define  CDNS_PCIE_LM_ID_SUBSYS(sub) \ +	(((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK) + +/* Root Port Requestor ID Register */ +#define CDNS_PCIE_LM_RP_RID	(CDNS_PCIE_LM_BASE + 0x0228) +#define  CDNS_PCIE_LM_RP_RID_MASK	GENMASK(15, 0) +#define  CDNS_PCIE_LM_RP_RID_SHIFT	0 +#define  CDNS_PCIE_LM_RP_RID_(rid) \ +	(((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK) + +/* Endpoint Bus and Device Number Register */ +#define CDNS_PCIE_LM_EP_ID	(CDNS_PCIE_LM_BASE + 0x022c) +#define  CDNS_PCIE_LM_EP_ID_DEV_MASK	GENMASK(4, 0) +#define  CDNS_PCIE_LM_EP_ID_DEV_SHIFT	0 +#define  CDNS_PCIE_LM_EP_ID_BUS_MASK	GENMASK(15, 8) +#define  CDNS_PCIE_LM_EP_ID_BUS_SHIFT	8 + +/* Endpoint Function f BAR b Configuration Registers */ +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \ +	(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \ +	(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008) +#define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ +	(GENMASK(4, 0) << ((b) * 8)) +#define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ +	(((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) +#define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ +	(GENMASK(7, 5) << ((b) * 8)) +#define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ +	(((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) + +/* Endpoint Function Configuration Register */ +#define CDNS_PCIE_LM_EP_FUNC_CFG	(CDNS_PCIE_LM_BASE + 0x02c0) + +/* Root Complex BAR Configuration Register */ +#define CDNS_PCIE_LM_RC_BAR_CFG	(CDNS_PCIE_LM_BASE + 0x0300) +#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK	GENMASK(5, 0) +#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \ +	(((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK) +#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK		GENMASK(8, 6) +#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \ +	(((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK) +#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK	GENMASK(13, 9) +#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \ +	(((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK) +#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK		GENMASK(16, 14) +#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \ +	(((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK) +#define  CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE	BIT(17) +#define  CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS	0 +#define  CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS	BIT(18) +#define  CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE		BIT(19) +#define  CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS		0 +#define  CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS		BIT(20) +#define  CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE		BIT(31) + +/* BAR control values applicable to both Endpoint Function and Root Complex */ +#define  CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED		0x0 +#define  CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS		0x1 +#define  CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS		0x4 +#define  CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS	0x5 +#define  CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS		0x6 +#define  CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS	0x7 + + +/* + * Endpoint Function Registers (PCI configuration space for endpoint functions) + */ +#define CDNS_PCIE_EP_FUNC_BASE(fn)	(((fn) << 12) & GENMASK(19, 12)) + +#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET	0x90 + +/* + * Root Port Registers (PCI configuration space for the root port function) + */ +#define CDNS_PCIE_RP_BASE	0x00200000 + + +/* + * Address Translation Registers + */ +#define CDNS_PCIE_AT_BASE	0x00400000 + +/* Region r Outbound AXI to PCIe Address Translation Register 0 */ +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ +	(CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) +#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK	GENMASK(5, 0) +#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \ +	(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK) +#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK	GENMASK(19, 12) +#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ +	(((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) +#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK	GENMASK(27, 20) +#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ +	(((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) + +/* Region r Outbound AXI to PCIe Address Translation Register 1 */ +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ +	(CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) + +/* Region r Outbound PCIe Descriptor Register 0 */ +#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \ +	(CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) +#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK		GENMASK(3, 0) +#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM		0x2 +#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO		0x6 +#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0	0xa +#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1	0xb +#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG	0xc +#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG	0xd +/* Bit 23 MUST be set in RC mode. */ +#define  CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID	BIT(23) +#define  CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK	GENMASK(31, 24) +#define  CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ +	(((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) + +/* Region r Outbound PCIe Descriptor Register 1 */ +#define CDNS_PCIE_AT_OB_REGION_DESC1(r)	\ +	(CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020) +#define  CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK	GENMASK(7, 0) +#define  CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \ +	((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK) + +/* Region r AXI Region Base Address Register 0 */ +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ +	(CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) +#define  CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK	GENMASK(5, 0) +#define  CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \ +	(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK) + +/* Region r AXI Region Base Address Register 1 */ +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ +	(CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020) + +/* Root Port BAR Inbound PCIe to AXI Address Translation Register */ +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \ +	(CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008) +#define  CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK	GENMASK(5, 0) +#define  CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \ +	(((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK) +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \ +	(CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008) + +/* AXI link down register */ +#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) + +enum cdns_pcie_rp_bar { +	RP_BAR0, +	RP_BAR1, +	RP_NO_BAR +}; + +/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ +#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ +	(CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) +#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ +	(CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) + +/* Normal/Vendor specific message access: offset inside some outbound region */ +#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK	GENMASK(7, 5) +#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \ +	(((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK) +#define CDNS_PCIE_NORMAL_MSG_CODE_MASK		GENMASK(15, 8) +#define CDNS_PCIE_NORMAL_MSG_CODE(code) \ +	(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) +#define CDNS_PCIE_MSG_NO_DATA			BIT(16) + +struct cdns_pcie; + +enum cdns_pcie_msg_code { +	MSG_CODE_ASSERT_INTA	= 0x20, +	MSG_CODE_ASSERT_INTB	= 0x21, +	MSG_CODE_ASSERT_INTC	= 0x22, +	MSG_CODE_ASSERT_INTD	= 0x23, +	MSG_CODE_DEASSERT_INTA	= 0x24, +	MSG_CODE_DEASSERT_INTB	= 0x25, +	MSG_CODE_DEASSERT_INTC	= 0x26, +	MSG_CODE_DEASSERT_INTD	= 0x27, +}; + +enum cdns_pcie_msg_routing { +	/* Route to Root Complex */ +	MSG_ROUTING_TO_RC, + +	/* Use Address Routing */ +	MSG_ROUTING_BY_ADDR, + +	/* Use ID Routing */ +	MSG_ROUTING_BY_ID, + +	/* Route as Broadcast Message from Root Complex */ +	MSG_ROUTING_BCAST, + +	/* Local message; terminate at receiver (INTx messages) */ +	MSG_ROUTING_LOCAL, + +	/* Gather & route to Root Complex (PME_TO_Ack message) */ +	MSG_ROUTING_GATHER, +}; + +/** + * struct cdns_pcie - private data for Cadence PCIe controller drivers + * @reg_base: IO mapped register base + * @mem_res: start/end offsets in the physical system memory to map PCI accesses + * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. + * @bus: In Root Complex mode, the bus number + */ +struct cdns_pcie { +	void __iomem		*reg_base; +	struct resource		*mem_res; +	struct device		*dev; +	bool			is_rc; +	u8			bus; +	int			phy_count; +	struct phy		**phy; +	struct device_link	**link; +	const struct cdns_pcie_common_ops *ops; +}; + +/** + * struct cdns_pcie_rc - private data for this PCIe Root Complex driver + * @pcie: Cadence PCIe controller + * @dev: pointer to PCIe device + * @cfg_res: start/end offsets in the physical system memory to map PCI + *           configuration space accesses + * @bus_range: first/last buses behind the PCIe host controller + * @cfg_base: IO mapped window to access the PCI configuration space of a + *            single function at a time + * @max_regions: maximum number of regions supported by the hardware + * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address + *                translation (nbits sets into the "no BAR match" register) + * @vendor_id: PCI vendor ID + * @device_id: PCI device ID + */ +struct cdns_pcie_rc { +	struct cdns_pcie	pcie; +	struct resource		*cfg_res; +	struct resource		*bus_range; +	void __iomem		*cfg_base; +	u32			max_regions; +	u32			no_bar_nbits; +	u16			vendor_id; +	u16			device_id; +}; + +/** + * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver + * @pcie: Cadence PCIe controller + * @max_regions: maximum number of regions supported by hardware + * @ob_region_map: bitmask of mapped outbound regions + * @ob_addr: base addresses in the AXI bus where the outbound regions start + * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ + *		   dedicated outbound regions is mapped. + * @irq_cpu_addr: base address in the CPU space where a write access triggers + *		  the sending of a memory write (MSI) / normal message (legacy + *		  IRQ) TLP through the PCIe bus. + * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ + *		  dedicated outbound region. + * @irq_pci_fn: the latest PCI function that has updated the mapping of + *		the MSI/legacy IRQ dedicated outbound region. + * @irq_pending: bitmask of asserted legacy IRQs. + */ +struct cdns_pcie_ep { +	struct cdns_pcie	pcie; +	u32			max_regions; +	unsigned long		ob_region_map; +	phys_addr_t		*ob_addr; +	phys_addr_t		irq_phys_addr; +	void __iomem		*irq_cpu_addr; +	u64			irq_pci_addr; +	u8			irq_pci_fn; +	u8			irq_pending; +}; + + +/* Register access */ +static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) +{ +	writeb(value, pcie->reg_base + reg); +} + +static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value) +{ +	writew(value, pcie->reg_base + reg); +} + +static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) +{ +	writel(value, pcie->reg_base + reg); +} + +static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) +{ +	return readl(pcie->reg_base + reg); +} + +/* Root Port register access */ +static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, +				       u32 reg, u8 value) +{ +	writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); +} + +static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, +				       u32 reg, u16 value) +{ +	writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); +} + +/* Endpoint Function register access */ +static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, +					  u32 reg, u8 value) +{ +	writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, +					  u32 reg, u16 value) +{ +	writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, +					  u32 reg, u32 value) +{ +	writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg) +{ +	return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) +{ +	return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) +{ +	return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +#ifdef CONFIG_PCIE_CADENCE_HOST +int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); +#else +static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) +{ +	return 0; +} +#endif + +#ifdef CONFIG_PCIE_CADENCE_EP +int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep); +#else +static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) +{ +	return 0; +} +#endif +void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, +				   u32 r, bool is_io, +				   u64 cpu_addr, u64 pci_addr, size_t size); + +void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, +						  u32 r, u64 cpu_addr); + +void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); +void cdns_pcie_disable_phy(struct cdns_pcie *pcie); +int cdns_pcie_enable_phy(struct cdns_pcie *pcie); +int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie); +extern const struct dev_pm_ops cdns_pcie_pm_ops; + +#endif /* _PCIE_CADENCE_H */  |