diff options
Diffstat (limited to 'drivers/pci/controller/cadence/pcie-cadence-ep.c')
| -rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence-ep.c | 137 | 
1 files changed, 124 insertions, 13 deletions
| diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 1c15c8352125..254a3e1eff50 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -8,7 +8,6 @@  #include <linux/of.h>  #include <linux/pci-epc.h>  #include <linux/platform_device.h> -#include <linux/pm_runtime.h>  #include <linux/sizes.h>  #include "pcie-cadence.h" @@ -52,6 +51,7 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,  				struct pci_epf_bar *epf_bar)  {  	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie_epf *epf = &ep->epf[fn];  	struct cdns_pcie *pcie = &ep->pcie;  	dma_addr_t bar_phys = epf_bar->phys_addr;  	enum pci_barno bar = epf_bar->barno; @@ -112,6 +112,8 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,  		CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));  	cdns_pcie_writel(pcie, reg, cfg); +	epf->epf_bar[bar] = epf_bar; +  	return 0;  } @@ -119,6 +121,7 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,  				   struct pci_epf_bar *epf_bar)  {  	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie_epf *epf = &ep->epf[fn];  	struct cdns_pcie *pcie = &ep->pcie;  	enum pci_barno bar = epf_bar->barno;  	u32 reg, cfg, b, ctrl; @@ -140,6 +143,8 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,  	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);  	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); + +	epf->epf_bar[bar] = NULL;  }  static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, @@ -156,7 +161,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,  		return -EINVAL;  	} -	cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size); +	cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);  	set_bit(r, &ep->ob_region_map);  	ep->ob_addr[r] = addr; @@ -225,10 +230,55 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)  	return mme;  } +static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; +	u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; +	u32 val, reg; + +	reg = cap + PCI_MSIX_FLAGS; +	val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); +	if (!(val & PCI_MSIX_FLAGS_ENABLE)) +		return -EINVAL; + +	val &= PCI_MSIX_FLAGS_QSIZE; + +	return val; +} + +static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts, +				 enum pci_barno bir, u32 offset) +{ +	struct cdns_pcie_ep *ep = epc_get_drvdata(epc); +	struct cdns_pcie *pcie = &ep->pcie; +	u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; +	u32 val, reg; + +	reg = cap + PCI_MSIX_FLAGS; +	val = cdns_pcie_ep_fn_readw(pcie, fn, reg); +	val &= ~PCI_MSIX_FLAGS_QSIZE; +	val |= interrupts; +	cdns_pcie_ep_fn_writew(pcie, fn, reg, val); + +	/* Set MSIX BAR and offset */ +	reg = cap + PCI_MSIX_TABLE; +	val = offset | bir; +	cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + +	/* Set PBA BAR and offset.  BAR must match MSIX BAR */ +	reg = cap + PCI_MSIX_PBA; +	val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; +	cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + +	return 0; +} +  static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,  				     u8 intx, bool is_asserted)  {  	struct cdns_pcie *pcie = &ep->pcie; +	unsigned long flags;  	u32 offset;  	u16 status;  	u8 msg_code; @@ -239,7 +289,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,  	if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||  		     ep->irq_pci_fn != fn)) {  		/* First region was reserved for IRQ writes. */ -		cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0, +		cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,  							     ep->irq_phys_addr);  		ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;  		ep->irq_pci_fn = fn; @@ -253,11 +303,13 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,  		msg_code = MSG_CODE_DEASSERT_INTA + intx;  	} +	spin_lock_irqsave(&ep->lock, flags);  	status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);  	if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {  		status ^= PCI_STATUS_INTERRUPT;  		cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);  	} +	spin_unlock_irqrestore(&ep->lock, flags);  	offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |  		 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | @@ -318,7 +370,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,  	if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||  		     ep->irq_pci_fn != fn)) {  		/* First region was reserved for IRQ writes. */ -		cdns_pcie_set_outbound_region(pcie, fn, 0, +		cdns_pcie_set_outbound_region(pcie, 0, fn, 0,  					      false,  					      ep->irq_phys_addr,  					      pci_addr & ~pci_addr_mask, @@ -331,6 +383,51 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,  	return 0;  } +static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, +				      u16 interrupt_num) +{ +	u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; +	u32 tbl_offset, msg_data, reg; +	struct cdns_pcie *pcie = &ep->pcie; +	struct pci_epf_msix_tbl *msix_tbl; +	struct cdns_pcie_epf *epf; +	u64 pci_addr_mask = 0xff; +	u64 msg_addr; +	u16 flags; +	u8 bir; + +	/* Check whether the MSI-X feature has been enabled by the PCI host. */ +	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); +	if (!(flags & PCI_MSIX_FLAGS_ENABLE)) +		return -EINVAL; + +	reg = cap + PCI_MSIX_TABLE; +	tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); +	bir = tbl_offset & PCI_MSIX_TABLE_BIR; +	tbl_offset &= PCI_MSIX_TABLE_OFFSET; + +	epf = &ep->epf[fn]; +	msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; +	msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; +	msg_data = msix_tbl[(interrupt_num - 1)].msg_data; + +	/* Set the outbound region if needed. */ +	if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || +	    ep->irq_pci_fn != fn) { +		/* First region was reserved for IRQ writes. */ +		cdns_pcie_set_outbound_region(pcie, 0, fn, 0, +					      false, +					      ep->irq_phys_addr, +					      msg_addr & ~pci_addr_mask, +					      pci_addr_mask + 1); +		ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); +		ep->irq_pci_fn = fn; +	} +	writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); + +	return 0; +} +  static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,  				  enum pci_epc_irq_type type,  				  u16 interrupt_num) @@ -344,6 +441,9 @@ static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,  	case PCI_EPC_IRQ_MSI:  		return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); +	case PCI_EPC_IRQ_MSIX: +		return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num); +  	default:  		break;  	} @@ -355,8 +455,10 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)  {  	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);  	struct cdns_pcie *pcie = &ep->pcie; +	struct device *dev = pcie->dev;  	struct pci_epf *epf;  	u32 cfg; +	int ret;  	/*  	 * BIT(0) is hardwired to 1, hence function 0 is always enabled @@ -367,13 +469,19 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)  		cfg |= BIT(epf->func_no);  	cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); +	ret = cdns_pcie_start_link(pcie); +	if (ret) { +		dev_err(dev, "Failed to start link\n"); +		return ret; +	} +  	return 0;  }  static const struct pci_epc_features cdns_pcie_epc_features = {  	.linkup_notifier = false,  	.msi_capable = true, -	.msix_capable = false, +	.msix_capable = true,  };  static const struct pci_epc_features* @@ -390,6 +498,8 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {  	.unmap_addr	= cdns_pcie_ep_unmap_addr,  	.set_msi	= cdns_pcie_ep_set_msi,  	.get_msi	= cdns_pcie_ep_get_msi, +	.set_msix	= cdns_pcie_ep_set_msix, +	.get_msix	= cdns_pcie_ep_get_msix,  	.raise_irq	= cdns_pcie_ep_raise_irq,  	.start		= cdns_pcie_ep_start,  	.get_features	= cdns_pcie_ep_get_features, @@ -408,8 +518,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)  	pcie->is_rc = false; -	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); -	pcie->reg_base = devm_ioremap_resource(dev, res); +	pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");  	if (IS_ERR(pcie->reg_base)) {  		dev_err(dev, "missing \"reg\"\n");  		return PTR_ERR(pcie->reg_base); @@ -440,8 +549,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)  	epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);  	if (IS_ERR(epc)) {  		dev_err(dev, "failed to create epc device\n"); -		ret = PTR_ERR(epc); -		goto err_init; +		return PTR_ERR(epc);  	}  	epc_set_drvdata(epc, ep); @@ -449,11 +557,16 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)  	if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)  		epc->max_functions = 1; +	ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), +			       GFP_KERNEL); +	if (!ep->epf) +		return -ENOMEM; +  	ret = pci_epc_mem_init(epc, pcie->mem_res->start,  			       resource_size(pcie->mem_res), PAGE_SIZE);  	if (ret < 0) {  		dev_err(dev, "failed to initialize the memory space\n"); -		goto err_init; +		return ret;  	}  	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, @@ -466,14 +579,12 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)  	ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;  	/* Reserve region 0 for IRQs */  	set_bit(0, &ep->ob_region_map); +	spin_lock_init(&ep->lock);  	return 0;   free_epc_mem:  	pci_epc_mem_exit(epc); - err_init: -	pm_runtime_put_sync(dev); -  	return ret;  } |