diff options
Diffstat (limited to 'drivers/pci/controller/dwc/pcie-designware.c')
| -rw-r--r-- | drivers/pci/controller/dwc/pcie-designware.c | 205 | 
1 files changed, 200 insertions, 5 deletions
| diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 6d5d619ab2e9..8e33e6e59e68 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -12,6 +12,7 @@  #include <linux/bitops.h>  #include <linux/clk.h>  #include <linux/delay.h> +#include <linux/dma/edma.h>  #include <linux/gpio/consumer.h>  #include <linux/ioport.h>  #include <linux/of.h> @@ -142,6 +143,18 @@ int dw_pcie_get_resources(struct dw_pcie *pci)  	if (!pci->atu_size)  		pci->atu_size = SZ_4K; +	/* eDMA region can be mapped to a custom base address */ +	if (!pci->edma.reg_base) { +		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma"); +		if (res) { +			pci->edma.reg_base = devm_ioremap_resource(pci->dev, res); +			if (IS_ERR(pci->edma.reg_base)) +				return PTR_ERR(pci->edma.reg_base); +		} else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) { +			pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET; +		} +	} +  	/* LLDD is supposed to manually switch the clocks and resets state */  	if (dw_pcie_cap_is(pci, REQ_RES)) {  		ret = dw_pcie_get_clocks(pci); @@ -782,6 +795,188 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)  		 pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);  } +static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg) +{ +	u32 val = 0; +	int ret; + +	if (pci->ops && pci->ops->read_dbi) +		return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4); + +	ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val); +	if (ret) +		dev_err(pci->dev, "Read DMA address failed\n"); + +	return val; +} + +static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr) +{ +	struct platform_device *pdev = to_platform_device(dev); +	char name[6]; +	int ret; + +	if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH) +		return -EINVAL; + +	ret = platform_get_irq_byname_optional(pdev, "dma"); +	if (ret > 0) +		return ret; + +	snprintf(name, sizeof(name), "dma%u", nr); + +	return platform_get_irq_byname_optional(pdev, name); +} + +static struct dw_edma_core_ops dw_pcie_edma_ops = { +	.irq_vector = dw_pcie_edma_irq_vector, +}; + +static int dw_pcie_edma_find_chip(struct dw_pcie *pci) +{ +	u32 val; + +	/* +	 * Indirect eDMA CSRs access has been completely removed since v5.40a +	 * thus no space is now reserved for the eDMA channels viewport and +	 * former DMA CTRL register is no longer fixed to FFs. +	 */ +	if (dw_pcie_ver_is_ge(pci, 540A)) +		val = 0xFFFFFFFF; +	else +		val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL); + +	if (val == 0xFFFFFFFF && pci->edma.reg_base) { +		pci->edma.mf = EDMA_MF_EDMA_UNROLL; + +		val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL); +	} else if (val != 0xFFFFFFFF) { +		pci->edma.mf = EDMA_MF_EDMA_LEGACY; + +		pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE; +	} else { +		return -ENODEV; +	} + +	pci->edma.dev = pci->dev; + +	if (!pci->edma.ops) +		pci->edma.ops = &dw_pcie_edma_ops; + +	pci->edma.flags |= DW_EDMA_CHIP_LOCAL; + +	pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val); +	pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val); + +	/* Sanity check the channels count if the mapping was incorrect */ +	if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH || +	    !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH) +		return -EINVAL; + +	return 0; +} + +static int dw_pcie_edma_irq_verify(struct dw_pcie *pci) +{ +	struct platform_device *pdev = to_platform_device(pci->dev); +	u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt; +	char name[6]; +	int ret; + +	if (pci->edma.nr_irqs == 1) +		return 0; +	else if (pci->edma.nr_irqs > 1) +		return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0; + +	ret = platform_get_irq_byname_optional(pdev, "dma"); +	if (ret > 0) { +		pci->edma.nr_irqs = 1; +		return 0; +	} + +	for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) { +		snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs); + +		ret = platform_get_irq_byname_optional(pdev, name); +		if (ret <= 0) +			return -EINVAL; +	} + +	return 0; +} + +static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci) +{ +	struct dw_edma_region *ll; +	dma_addr_t paddr; +	int i; + +	for (i = 0; i < pci->edma.ll_wr_cnt; i++) { +		ll = &pci->edma.ll_region_wr[i]; +		ll->sz = DMA_LLP_MEM_SIZE; +		ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz, +						    &paddr, GFP_KERNEL); +		if (!ll->vaddr.mem) +			return -ENOMEM; + +		ll->paddr = paddr; +	} + +	for (i = 0; i < pci->edma.ll_rd_cnt; i++) { +		ll = &pci->edma.ll_region_rd[i]; +		ll->sz = DMA_LLP_MEM_SIZE; +		ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz, +						    &paddr, GFP_KERNEL); +		if (!ll->vaddr.mem) +			return -ENOMEM; + +		ll->paddr = paddr; +	} + +	return 0; +} + +int dw_pcie_edma_detect(struct dw_pcie *pci) +{ +	int ret; + +	/* Don't fail if no eDMA was found (for the backward compatibility) */ +	ret = dw_pcie_edma_find_chip(pci); +	if (ret) +		return 0; + +	/* Don't fail on the IRQs verification (for the backward compatibility) */ +	ret = dw_pcie_edma_irq_verify(pci); +	if (ret) { +		dev_err(pci->dev, "Invalid eDMA IRQs found\n"); +		return 0; +	} + +	ret = dw_pcie_edma_ll_alloc(pci); +	if (ret) { +		dev_err(pci->dev, "Couldn't allocate LLP memory\n"); +		return ret; +	} + +	/* Don't fail if the DW eDMA driver can't find the device */ +	ret = dw_edma_probe(&pci->edma); +	if (ret && ret != -ENODEV) { +		dev_err(pci->dev, "Couldn't register eDMA device\n"); +		return ret; +	} + +	dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n", +		 pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F", +		 pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt); + +	return 0; +} + +void dw_pcie_edma_remove(struct dw_pcie *pci) +{ +	dw_edma_remove(&pci->edma); +} +  void dw_pcie_setup(struct dw_pcie *pci)  {  	u32 val; @@ -806,11 +1001,6 @@ void dw_pcie_setup(struct dw_pcie *pci)  		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);  	} -	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); -	val &= ~PORT_LINK_FAST_LINK_MODE; -	val |= PORT_LINK_DLL_LINK_EN; -	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); -  	if (dw_pcie_cap_is(pci, CDM_CHECK)) {  		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);  		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | @@ -818,6 +1008,11 @@ void dw_pcie_setup(struct dw_pcie *pci)  		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);  	} +	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); +	val &= ~PORT_LINK_FAST_LINK_MODE; +	val |= PORT_LINK_DLL_LINK_EN; +	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); +  	if (!pci->num_lanes) {  		dev_dbg(pci->dev, "Using h/w default number of lanes\n");  		return; |