diff options
Diffstat (limited to 'drivers/pci/controller/plda')
-rw-r--r-- | drivers/pci/controller/plda/Kconfig | 30 | ||||
-rw-r--r-- | drivers/pci/controller/plda/Makefile | 4 | ||||
-rw-r--r-- | drivers/pci/controller/plda/pcie-microchip-host.c | 729 | ||||
-rw-r--r-- | drivers/pci/controller/plda/pcie-plda-host.c | 651 | ||||
-rw-r--r-- | drivers/pci/controller/plda/pcie-plda.h | 273 | ||||
-rw-r--r-- | drivers/pci/controller/plda/pcie-starfive.c | 488 |
6 files changed, 2175 insertions, 0 deletions
diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig new file mode 100644 index 000000000000..c0e14146d7e4 --- /dev/null +++ b/drivers/pci/controller/plda/Kconfig @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "PLDA-based PCIe controllers" + depends on PCI + +config PCIE_PLDA_HOST + bool + +config PCIE_MICROCHIP_HOST + tristate "Microchip AXI PCIe controller" + depends on PCI_MSI && OF + select PCI_HOST_COMMON + select PCIE_PLDA_HOST + help + Say Y here if you want kernel to support the Microchip AXI PCIe + Host Bridge driver. + +config PCIE_STARFIVE_HOST + tristate "StarFive PCIe host controller" + depends on PCI_MSI && OF + depends on ARCH_STARFIVE || COMPILE_TEST + select PCIE_PLDA_HOST + help + Say Y here if you want to support the StarFive PCIe controller in + host mode. StarFive PCIe controller uses PLDA PCIe core. + + If you choose to build this driver as module it will be dynamically + linked and module will be called pcie-starfive.ko. + +endmenu diff --git a/drivers/pci/controller/plda/Makefile b/drivers/pci/controller/plda/Makefile new file mode 100644 index 000000000000..0ac6851bed48 --- /dev/null +++ b/drivers/pci/controller/plda/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PCIE_PLDA_HOST) += pcie-plda-host.o +obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o +obj-$(CONFIG_PCIE_STARFIVE_HOST) += pcie-starfive.o diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c new file mode 100644 index 000000000000..48f60a04b740 --- /dev/null +++ b/drivers/pci/controller/plda/pcie-microchip-host.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip AXI PCIe Bridge host controller driver + * + * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved. + * + * Author: Daire McNamara <daire.mcnamara@microchip.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/pci-ecam.h> +#include <linux/platform_device.h> + +#include "../../pci.h" +#include "pcie-plda.h" + +/* PCIe Bridge Phy and Controller Phy offsets */ +#define MC_PCIE1_BRIDGE_ADDR 0x00008000u +#define MC_PCIE1_CTRL_ADDR 0x0000a000u + +#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR) +#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR) + +/* PCIe Controller Phy Regs */ +#define SEC_ERROR_EVENT_CNT 0x20 +#define DED_ERROR_EVENT_CNT 0x24 +#define SEC_ERROR_INT 0x28 +#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0) +#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4) +#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8) +#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12) +#define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0) +#define NUM_SEC_ERROR_INTS (4) +#define SEC_ERROR_INT_MASK 0x2c +#define DED_ERROR_INT 0x30 +#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0) +#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4) +#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8) +#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12) +#define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0) +#define NUM_DED_ERROR_INTS (4) +#define DED_ERROR_INT_MASK 0x34 +#define ECC_CONTROL 0x38 +#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0) +#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1) +#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2) +#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3) +#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4) +#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5) +#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6) +#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7) +#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8) +#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9) +#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10) +#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11) +#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12) +#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13) +#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14) +#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15) +#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24) +#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25) +#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26) +#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27) +#define PCIE_EVENT_INT 0x14c +#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0) +#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1) +#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2) +#define PCIE_EVENT_INT_MASK GENMASK(2, 0) +#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16) +#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17) +#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18) +#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16) +#define PCIE_EVENT_INT_ENB_SHIFT 16 +#define NUM_PCIE_EVENTS (3) + +/* PCIe Config space MSI capability structure */ +#define MC_MSI_CAP_CTRL_OFFSET 0xe0u + +/* Events */ +#define EVENT_PCIE_L2_EXIT 0 +#define EVENT_PCIE_HOTRST_EXIT 1 +#define EVENT_PCIE_DLUP_EXIT 2 +#define EVENT_SEC_TX_RAM_SEC_ERR 3 +#define EVENT_SEC_RX_RAM_SEC_ERR 4 +#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5 +#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6 +#define EVENT_DED_TX_RAM_DED_ERR 7 +#define EVENT_DED_RX_RAM_DED_ERR 8 +#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9 +#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10 +#define EVENT_LOCAL_DMA_END_ENGINE_0 11 +#define EVENT_LOCAL_DMA_END_ENGINE_1 12 +#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13 +#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14 +#define NUM_MC_EVENTS 15 +#define EVENT_LOCAL_A_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_AXI_POST_ERR) +#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_AXI_FETCH_ERR) +#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_AXI_DISCARD_ERR) +#define EVENT_LOCAL_A_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_AXI_DOORBELL) +#define EVENT_LOCAL_P_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_PCIE_POST_ERR) +#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_PCIE_FETCH_ERR) +#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_PCIE_DISCARD_ERR) +#define EVENT_LOCAL_P_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_PCIE_DOORBELL) +#define EVENT_LOCAL_PM_MSI_INT_INTX (NUM_MC_EVENTS + PLDA_INTX) +#define EVENT_LOCAL_PM_MSI_INT_MSI (NUM_MC_EVENTS + PLDA_MSI) +#define EVENT_LOCAL_PM_MSI_INT_AER_EVT (NUM_MC_EVENTS + PLDA_AER_EVENT) +#define EVENT_LOCAL_PM_MSI_INT_EVENTS (NUM_MC_EVENTS + PLDA_MISC_EVENTS) +#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR (NUM_MC_EVENTS + PLDA_SYS_ERR) +#define NUM_EVENTS (NUM_MC_EVENTS + PLDA_INT_EVENT_NUM) + +#define PCIE_EVENT_CAUSE(x, s) \ + [EVENT_PCIE_ ## x] = { __stringify(x), s } + +#define SEC_ERROR_CAUSE(x, s) \ + [EVENT_SEC_ ## x] = { __stringify(x), s } + +#define DED_ERROR_CAUSE(x, s) \ + [EVENT_DED_ ## x] = { __stringify(x), s } + +#define LOCAL_EVENT_CAUSE(x, s) \ + [EVENT_LOCAL_ ## x] = { __stringify(x), s } + +#define PCIE_EVENT(x) \ + .base = MC_PCIE_CTRL_ADDR, \ + .offset = PCIE_EVENT_INT, \ + .mask_offset = PCIE_EVENT_INT, \ + .mask_high = 1, \ + .mask = PCIE_EVENT_INT_ ## x ## _INT, \ + .enb_mask = PCIE_EVENT_INT_ENB_MASK + +#define SEC_EVENT(x) \ + .base = MC_PCIE_CTRL_ADDR, \ + .offset = SEC_ERROR_INT, \ + .mask_offset = SEC_ERROR_INT_MASK, \ + .mask = SEC_ERROR_INT_ ## x ## _INT, \ + .mask_high = 1, \ + .enb_mask = 0 + +#define DED_EVENT(x) \ + .base = MC_PCIE_CTRL_ADDR, \ + .offset = DED_ERROR_INT, \ + .mask_offset = DED_ERROR_INT_MASK, \ + .mask_high = 1, \ + .mask = DED_ERROR_INT_ ## x ## _INT, \ + .enb_mask = 0 + +#define LOCAL_EVENT(x) \ + .base = MC_PCIE_BRIDGE_ADDR, \ + .offset = ISTATUS_LOCAL, \ + .mask_offset = IMASK_LOCAL, \ + .mask_high = 0, \ + .mask = x ## _MASK, \ + .enb_mask = 0 + +#define PCIE_EVENT_TO_EVENT_MAP(x) \ + { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x } + +#define SEC_ERROR_TO_EVENT_MAP(x) \ + { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x } + +#define DED_ERROR_TO_EVENT_MAP(x) \ + { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x } + +#define LOCAL_STATUS_TO_EVENT_MAP(x) \ + { x ## _MASK, EVENT_LOCAL_ ## x } + +struct event_map { + u32 reg_mask; + u32 event_bit; +}; + + +struct mc_pcie { + struct plda_pcie_rp plda; + void __iomem *axi_base_addr; +}; + +struct cause { + const char *sym; + const char *str; +}; + +static const struct cause event_cause[NUM_EVENTS] = { + PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"), + PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"), + PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"), + SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"), + SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"), + SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"), + SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"), + DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"), + DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"), + DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"), + DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"), + LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"), + LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"), + LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"), + LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"), + LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"), + LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"), + LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"), + LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"), + LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"), + LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"), + LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"), +}; + +static struct event_map pcie_event_to_event[] = { + PCIE_EVENT_TO_EVENT_MAP(L2_EXIT), + PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT), + PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT), +}; + +static struct event_map sec_error_to_event[] = { + SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR), + SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR), + SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR), + SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR), +}; + +static struct event_map ded_error_to_event[] = { + DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR), + DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR), + DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR), + DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR), +}; + +static struct event_map local_status_to_event[] = { + LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0), + LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1), + LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0), + LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1), + LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR), + LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR), + LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR), + LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL), + LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR), + LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR), + LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR), + LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL), + LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX), + LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI), + LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT), + LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS), + LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR), +}; + +static struct { + u32 base; + u32 offset; + u32 mask; + u32 shift; + u32 enb_mask; + u32 mask_high; + u32 mask_offset; +} event_descs[] = { + { PCIE_EVENT(L2_EXIT) }, + { PCIE_EVENT(HOTRST_EXIT) }, + { PCIE_EVENT(DLUP_EXIT) }, + { SEC_EVENT(TX_RAM_SEC_ERR) }, + { SEC_EVENT(RX_RAM_SEC_ERR) }, + { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) }, + { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) }, + { DED_EVENT(TX_RAM_DED_ERR) }, + { DED_EVENT(RX_RAM_DED_ERR) }, + { DED_EVENT(PCIE2AXI_RAM_DED_ERR) }, + { DED_EVENT(AXI2PCIE_RAM_DED_ERR) }, + { LOCAL_EVENT(DMA_END_ENGINE_0) }, + { LOCAL_EVENT(DMA_END_ENGINE_1) }, + { LOCAL_EVENT(DMA_ERROR_ENGINE_0) }, + { LOCAL_EVENT(DMA_ERROR_ENGINE_1) }, + { LOCAL_EVENT(A_ATR_EVT_POST_ERR) }, + { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) }, + { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) }, + { LOCAL_EVENT(A_ATR_EVT_DOORBELL) }, + { LOCAL_EVENT(P_ATR_EVT_POST_ERR) }, + { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) }, + { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) }, + { LOCAL_EVENT(P_ATR_EVT_DOORBELL) }, + { LOCAL_EVENT(PM_MSI_INT_INTX) }, + { LOCAL_EVENT(PM_MSI_INT_MSI) }, + { LOCAL_EVENT(PM_MSI_INT_AER_EVT) }, + { LOCAL_EVENT(PM_MSI_INT_EVENTS) }, + { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) }, +}; + +static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" }; + +static struct mc_pcie *port; + +static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam) +{ + struct plda_msi *msi = &port->plda.msi; + u16 reg; + u8 queue_size; + + /* Fixup MSI enable flag */ + reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS); + reg |= PCI_MSI_FLAGS_ENABLE; + writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS); + + /* Fixup PCI MSI queue flags */ + queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg); + reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size); + writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS); + + /* Fixup MSI addr fields */ + writel_relaxed(lower_32_bits(msi->vector_phy), + ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO); + writel_relaxed(upper_32_bits(msi->vector_phy), + ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI); +} + +static inline u32 reg_to_event(u32 reg, struct event_map field) +{ + return (reg & field.reg_mask) ? BIT(field.event_bit) : 0; +} + +static u32 pcie_events(struct mc_pcie *port) +{ + void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; + u32 reg = readl_relaxed(ctrl_base_addr + PCIE_EVENT_INT); + u32 val = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++) + val |= reg_to_event(reg, pcie_event_to_event[i]); + + return val; +} + +static u32 sec_errors(struct mc_pcie *port) +{ + void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; + u32 reg = readl_relaxed(ctrl_base_addr + SEC_ERROR_INT); + u32 val = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++) + val |= reg_to_event(reg, sec_error_to_event[i]); + + return val; +} + +static u32 ded_errors(struct mc_pcie *port) +{ + void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; + u32 reg = readl_relaxed(ctrl_base_addr + DED_ERROR_INT); + u32 val = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++) + val |= reg_to_event(reg, ded_error_to_event[i]); + + return val; +} + +static u32 local_events(struct mc_pcie *port) +{ + void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; + u32 reg = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL); + u32 val = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++) + val |= reg_to_event(reg, local_status_to_event[i]); + + return val; +} + +static u32 mc_get_events(struct plda_pcie_rp *port) +{ + struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda); + u32 events = 0; + + events |= pcie_events(mc_port); + events |= sec_errors(mc_port); + events |= ded_errors(mc_port); + events |= local_events(mc_port); + + return events; +} + +static irqreturn_t mc_event_handler(int irq, void *dev_id) +{ + struct plda_pcie_rp *port = dev_id; + struct device *dev = port->dev; + struct irq_data *data; + + data = irq_domain_get_irq_data(port->event_domain, irq); + + if (event_cause[data->hwirq].str) + dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str); + else + dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq); + + return IRQ_HANDLED; +} + +static void mc_ack_event_irq(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda); + u32 event = data->hwirq; + void __iomem *addr; + u32 mask; + + addr = mc_port->axi_base_addr + event_descs[event].base + + event_descs[event].offset; + mask = event_descs[event].mask; + mask |= event_descs[event].enb_mask; + + writel_relaxed(mask, addr); +} + +static void mc_mask_event_irq(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda); + u32 event = data->hwirq; + void __iomem *addr; + u32 mask; + u32 val; + + addr = mc_port->axi_base_addr + event_descs[event].base + + event_descs[event].mask_offset; + mask = event_descs[event].mask; + if (event_descs[event].enb_mask) { + mask <<= PCIE_EVENT_INT_ENB_SHIFT; + mask &= PCIE_EVENT_INT_ENB_MASK; + } + + if (!event_descs[event].mask_high) + mask = ~mask; + + raw_spin_lock(&port->lock); + val = readl_relaxed(addr); + if (event_descs[event].mask_high) + val |= mask; + else + val &= mask; + + writel_relaxed(val, addr); + raw_spin_unlock(&port->lock); +} + +static void mc_unmask_event_irq(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda); + u32 event = data->hwirq; + void __iomem *addr; + u32 mask; + u32 val; + + addr = mc_port->axi_base_addr + event_descs[event].base + + event_descs[event].mask_offset; + mask = event_descs[event].mask; + + if (event_descs[event].enb_mask) + mask <<= PCIE_EVENT_INT_ENB_SHIFT; + + if (event_descs[event].mask_high) + mask = ~mask; + + if (event_descs[event].enb_mask) + mask &= PCIE_EVENT_INT_ENB_MASK; + + raw_spin_lock(&port->lock); + val = readl_relaxed(addr); + if (event_descs[event].mask_high) + val &= mask; + else + val |= mask; + writel_relaxed(val, addr); + raw_spin_unlock(&port->lock); +} + +static struct irq_chip mc_event_irq_chip = { + .name = "Microchip PCIe EVENT", + .irq_ack = mc_ack_event_irq, + .irq_mask = mc_mask_event_irq, + .irq_unmask = mc_unmask_event_irq, +}; + +static inline void mc_pcie_deinit_clk(void *data) +{ + struct clk *clk = data; + + clk_disable_unprepare(clk); +} + +static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id) +{ + struct clk *clk; + int ret; + + clk = devm_clk_get_optional(dev, id); + if (IS_ERR(clk)) + return clk; + if (!clk) + return clk; + + ret = clk_prepare_enable(clk); + if (ret) + return ERR_PTR(ret); + + devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk); + + return clk; +} + +static int mc_pcie_init_clks(struct device *dev) +{ + int i; + struct clk *fic; + + /* + * PCIe may be clocked via Fabric Interface using between 1 and 4 + * clocks. Scan DT for clocks and enable them if present + */ + for (i = 0; i < ARRAY_SIZE(poss_clks); i++) { + fic = mc_pcie_init_clk(dev, poss_clks[i]); + if (IS_ERR(fic)) + return PTR_ERR(fic); + } + + return 0; +} + +static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq, + int event) +{ + return devm_request_irq(plda->dev, event_irq, mc_event_handler, + 0, event_cause[event].sym, plda); +} + +static const struct plda_event_ops mc_event_ops = { + .get_events = mc_get_events, +}; + +static const struct plda_event mc_event = { + .request_event_irq = mc_request_event_irq, + .intx_event = EVENT_LOCAL_PM_MSI_INT_INTX, + .msi_event = EVENT_LOCAL_PM_MSI_INT_MSI, +}; + +static inline void mc_clear_secs(struct mc_pcie *port) +{ + void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; + + writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr + + SEC_ERROR_INT); + writel_relaxed(0, ctrl_base_addr + SEC_ERROR_EVENT_CNT); +} + +static inline void mc_clear_deds(struct mc_pcie *port) +{ + void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; + + writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr + + DED_ERROR_INT); + writel_relaxed(0, ctrl_base_addr + DED_ERROR_EVENT_CNT); +} + +static void mc_disable_interrupts(struct mc_pcie *port) +{ + void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; + void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; + u32 val; + + /* Ensure ECC bypass is enabled */ + val = ECC_CONTROL_TX_RAM_ECC_BYPASS | + ECC_CONTROL_RX_RAM_ECC_BYPASS | + ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS | + ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS; + writel_relaxed(val, ctrl_base_addr + ECC_CONTROL); + + /* Disable SEC errors and clear any outstanding */ + writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr + + SEC_ERROR_INT_MASK); + mc_clear_secs(port); + + /* Disable DED errors and clear any outstanding */ + writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr + + DED_ERROR_INT_MASK); + mc_clear_deds(port); + + /* Disable local interrupts and clear any outstanding */ + writel_relaxed(0, bridge_base_addr + IMASK_LOCAL); + writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_LOCAL); + writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_MSI); + + /* Disable PCIe events and clear any outstanding */ + val = PCIE_EVENT_INT_L2_EXIT_INT | + PCIE_EVENT_INT_HOTRST_EXIT_INT | + PCIE_EVENT_INT_DLUP_EXIT_INT | + PCIE_EVENT_INT_L2_EXIT_INT_MASK | + PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK | + PCIE_EVENT_INT_DLUP_EXIT_INT_MASK; + writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT); + + /* Disable host interrupts and clear any outstanding */ + writel_relaxed(0, bridge_base_addr + IMASK_HOST); + writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST); +} + +static int mc_platform_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct platform_device *pdev = to_platform_device(dev); + struct pci_host_bridge *bridge = platform_get_drvdata(pdev); + void __iomem *bridge_base_addr = + port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; + int ret; + + /* Configure address translation table 0 for PCIe config space */ + plda_pcie_setup_window(bridge_base_addr, 0, cfg->res.start, + cfg->res.start, + resource_size(&cfg->res)); + + /* Need some fixups in config space */ + mc_pcie_enable_msi(port, cfg->win); + + /* Configure non-config space outbound ranges */ + ret = plda_pcie_setup_iomems(bridge, &port->plda); + if (ret) + return ret; + + port->plda.event_ops = &mc_event_ops; + port->plda.event_irq_chip = &mc_event_irq_chip; + port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0); + + /* Address translation is up; safe to enable interrupts */ + ret = plda_init_interrupts(pdev, &port->plda, &mc_event); + if (ret) + return ret; + + return 0; +} + +static int mc_host_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + void __iomem *bridge_base_addr; + struct plda_pcie_rp *plda; + int ret; + u32 val; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + plda = &port->plda; + plda->dev = dev; + + port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(port->axi_base_addr)) + return PTR_ERR(port->axi_base_addr); + + mc_disable_interrupts(port); + + bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; + plda->bridge_addr = bridge_base_addr; + plda->num_events = NUM_EVENTS; + + /* Allow enabling MSI by disabling MSI-X */ + val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0); + val &= ~MSIX_CAP_MASK; + writel(val, bridge_base_addr + PCIE_PCI_IRQ_DW0); + + /* Pick num vectors from bitfile programmed onto FPGA fabric */ + val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0); + val &= NUM_MSI_MSGS_MASK; + val >>= NUM_MSI_MSGS_SHIFT; + + plda->msi.num_vectors = 1 << val; + + /* Pick vector address from design */ + plda->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR); + + ret = mc_pcie_init_clks(dev); + if (ret) { + dev_err(dev, "failed to get clock resources, error %d\n", ret); + return -ENODEV; + } + + return pci_host_common_probe(pdev); +} + +static const struct pci_ecam_ops mc_ecam_ops = { + .init = mc_platform_init, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; + +static const struct of_device_id mc_pcie_of_match[] = { + { + .compatible = "microchip,pcie-host-1.0", + .data = &mc_ecam_ops, + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, mc_pcie_of_match); + +static struct platform_driver mc_pcie_driver = { + .probe = mc_host_probe, + .driver = { + .name = "microchip-pcie", + .of_match_table = mc_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; + +builtin_platform_driver(mc_pcie_driver); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Microchip PCIe host controller driver"); +MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>"); diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c new file mode 100644 index 000000000000..a18923d7cea6 --- /dev/null +++ b/drivers/pci/controller/plda/pcie-plda-host.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PLDA PCIe XpressRich host controller driver + * + * Copyright (C) 2023 Microchip Co. Ltd + * StarFive Co. Ltd + * + * Author: Daire McNamara <daire.mcnamara@microchip.com> + */ + +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/msi.h> +#include <linux/pci_regs.h> +#include <linux/pci-ecam.h> + +#include "pcie-plda.h" + +void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct plda_pcie_rp *pcie = bus->sysdata; + + return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); +} +EXPORT_SYMBOL_GPL(plda_pcie_map_bus); + +static void plda_handle_msi(struct irq_desc *desc) +{ + struct plda_pcie_rp *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct device *dev = port->dev; + struct plda_msi *msi = &port->msi; + void __iomem *bridge_base_addr = port->bridge_addr; + unsigned long status; + u32 bit; + int ret; + + chained_irq_enter(chip, desc); + + status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL); + if (status & PM_MSI_INT_MSI_MASK) { + writel_relaxed(status & PM_MSI_INT_MSI_MASK, + bridge_base_addr + ISTATUS_LOCAL); + status = readl_relaxed(bridge_base_addr + ISTATUS_MSI); + for_each_set_bit(bit, &status, msi->num_vectors) { + ret = generic_handle_domain_irq(msi->dev_domain, bit); + if (ret) + dev_err_ratelimited(dev, "bad MSI IRQ %d\n", + bit); + } + } + + chained_irq_exit(chip, desc); +} + +static void plda_msi_bottom_irq_ack(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + void __iomem *bridge_base_addr = port->bridge_addr; + u32 bitpos = data->hwirq; + + writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI); +} + +static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + phys_addr_t addr = port->msi.vector_phy; + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = data->hwirq; + + dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n", + (int)data->hwirq, msg->address_hi, msg->address_lo); +} + +static int plda_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static struct irq_chip plda_msi_bottom_irq_chip = { + .name = "PLDA MSI", + .irq_ack = plda_msi_bottom_irq_ack, + .irq_compose_msi_msg = plda_compose_msi_msg, + .irq_set_affinity = plda_msi_set_affinity, +}; + +static int plda_irq_msi_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, + void *args) +{ + struct plda_pcie_rp *port = domain->host_data; + struct plda_msi *msi = &port->msi; + unsigned long bit; + + mutex_lock(&msi->lock); + bit = find_first_zero_bit(msi->used, msi->num_vectors); + if (bit >= msi->num_vectors) { + mutex_unlock(&msi->lock); + return -ENOSPC; + } + + set_bit(bit, msi->used); + + irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip, + domain->host_data, handle_edge_irq, NULL, NULL); + + mutex_unlock(&msi->lock); + + return 0; +} + +static void plda_irq_msi_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d); + struct plda_msi *msi = &port->msi; + + mutex_lock(&msi->lock); + + if (test_bit(d->hwirq, msi->used)) + __clear_bit(d->hwirq, msi->used); + else + dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq); + + mutex_unlock(&msi->lock); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = plda_irq_msi_domain_alloc, + .free = plda_irq_msi_domain_free, +}; + +static struct irq_chip plda_msi_irq_chip = { + .name = "PLDA PCIe MSI", + .irq_ack = irq_chip_ack_parent, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info plda_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &plda_msi_irq_chip, +}; + +static int plda_allocate_msi_domains(struct plda_pcie_rp *port) +{ + struct device *dev = port->dev; + struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); + struct plda_msi *msi = &port->msi; + + mutex_init(&port->msi.lock); + + msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors, + &msi_domain_ops, port); + if (!msi->dev_domain) { + dev_err(dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + msi->msi_domain = pci_msi_create_irq_domain(fwnode, + &plda_msi_domain_info, + msi->dev_domain); + if (!msi->msi_domain) { + dev_err(dev, "failed to create MSI domain\n"); + irq_domain_remove(msi->dev_domain); + return -ENOMEM; + } + + return 0; +} + +static void plda_handle_intx(struct irq_desc *desc) +{ + struct plda_pcie_rp *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct device *dev = port->dev; + void __iomem *bridge_base_addr = port->bridge_addr; + unsigned long status; + u32 bit; + int ret; + + chained_irq_enter(chip, desc); + + status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL); + if (status & PM_MSI_INT_INTX_MASK) { + status &= PM_MSI_INT_INTX_MASK; + status >>= PM_MSI_INT_INTX_SHIFT; + for_each_set_bit(bit, &status, PCI_NUM_INTX) { + ret = generic_handle_domain_irq(port->intx_domain, bit); + if (ret) + dev_err_ratelimited(dev, "bad INTx IRQ %d\n", + bit); + } + } + + chained_irq_exit(chip, desc); +} + +static void plda_ack_intx_irq(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + void __iomem *bridge_base_addr = port->bridge_addr; + u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); + + writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL); +} + +static void plda_mask_intx_irq(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + void __iomem *bridge_base_addr = port->bridge_addr; + unsigned long flags; + u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = readl_relaxed(bridge_base_addr + IMASK_LOCAL); + val &= ~mask; + writel_relaxed(val, bridge_base_addr + IMASK_LOCAL); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static void plda_unmask_intx_irq(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + void __iomem *bridge_base_addr = port->bridge_addr; + unsigned long flags; + u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = readl_relaxed(bridge_base_addr + IMASK_LOCAL); + val |= mask; + writel_relaxed(val, bridge_base_addr + IMASK_LOCAL); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static struct irq_chip plda_intx_irq_chip = { + .name = "PLDA PCIe INTx", + .irq_ack = plda_ack_intx_irq, + .irq_mask = plda_mask_intx_irq, + .irq_unmask = plda_unmask_intx_irq, +}; + +static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = plda_pcie_intx_map, +}; + +static u32 plda_get_events(struct plda_pcie_rp *port) +{ + u32 events, val, origin; + + origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL); + + /* MSI event and sys events */ + val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT; + events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1); + + /* INTx events */ + if (origin & PM_MSI_INT_INTX_MASK) + events |= BIT(PM_MSI_INT_INTX_SHIFT); + + /* remains are same with register */ + events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0); + + return events; +} + +static irqreturn_t plda_event_handler(int irq, void *dev_id) +{ + return IRQ_HANDLED; +} + +static void plda_handle_event(struct irq_desc *desc) +{ + struct plda_pcie_rp *port = irq_desc_get_handler_data(desc); + unsigned long events; + u32 bit; + struct irq_chip *chip = irq_desc_get_chip(desc); + + chained_irq_enter(chip, desc); + + events = port->event_ops->get_events(port); + + events &= port->events_bitmap; + for_each_set_bit(bit, &events, port->num_events) + generic_handle_domain_irq(port->event_domain, bit); + + chained_irq_exit(chip, desc); +} + +static u32 plda_hwirq_to_mask(int hwirq) +{ + u32 mask; + + /* hwirq 23 - 0 are the same with register */ + if (hwirq < EVENT_PM_MSI_INT_INTX) + mask = BIT(hwirq); + else if (hwirq == EVENT_PM_MSI_INT_INTX) + mask = PM_MSI_INT_INTX_MASK; + else + mask = BIT(hwirq + PCI_NUM_INTX - 1); + + return mask; +} + +static void plda_ack_event_irq(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + + writel_relaxed(plda_hwirq_to_mask(data->hwirq), + port->bridge_addr + ISTATUS_LOCAL); +} + +static void plda_mask_event_irq(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + u32 mask, val; + + mask = plda_hwirq_to_mask(data->hwirq); + + raw_spin_lock(&port->lock); + val = readl_relaxed(port->bridge_addr + IMASK_LOCAL); + val &= ~mask; + writel_relaxed(val, port->bridge_addr + IMASK_LOCAL); + raw_spin_unlock(&port->lock); +} + +static void plda_unmask_event_irq(struct irq_data *data) +{ + struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data); + u32 mask, val; + + mask = plda_hwirq_to_mask(data->hwirq); + + raw_spin_lock(&port->lock); + val = readl_relaxed(port->bridge_addr + IMASK_LOCAL); + val |= mask; + writel_relaxed(val, port->bridge_addr + IMASK_LOCAL); + raw_spin_unlock(&port->lock); +} + +static struct irq_chip plda_event_irq_chip = { + .name = "PLDA PCIe EVENT", + .irq_ack = plda_ack_event_irq, + .irq_mask = plda_mask_event_irq, + .irq_unmask = plda_unmask_event_irq, +}; + +static const struct plda_event_ops plda_event_ops = { + .get_events = plda_get_events, +}; + +static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct plda_pcie_rp *port = (void *)domain->host_data; + + irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops plda_event_domain_ops = { + .map = plda_pcie_event_map, +}; + +static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port) +{ + struct device *dev = port->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; + + /* Setup INTx */ + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "failed to find PCIe Intc node\n"); + return -EINVAL; + } + + port->event_domain = irq_domain_add_linear(pcie_intc_node, + port->num_events, + &plda_event_domain_ops, + port); + if (!port->event_domain) { + dev_err(dev, "failed to get event domain\n"); + of_node_put(pcie_intc_node); + return -ENOMEM; + } + + irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS); + + port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, port); + if (!port->intx_domain) { + dev_err(dev, "failed to get an INTx IRQ domain\n"); + of_node_put(pcie_intc_node); + return -ENOMEM; + } + + irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); + + of_node_put(pcie_intc_node); + raw_spin_lock_init(&port->lock); + + return plda_allocate_msi_domains(port); +} + +int plda_init_interrupts(struct platform_device *pdev, + struct plda_pcie_rp *port, + const struct plda_event *event) +{ + struct device *dev = &pdev->dev; + int event_irq, ret; + u32 i; + + if (!port->event_ops) + port->event_ops = &plda_event_ops; + + if (!port->event_irq_chip) + port->event_irq_chip = &plda_event_irq_chip; + + ret = plda_pcie_init_irq_domains(port); + if (ret) { + dev_err(dev, "failed creating IRQ domains\n"); + return ret; + } + + port->irq = platform_get_irq(pdev, 0); + if (port->irq < 0) + return -ENODEV; + + for_each_set_bit(i, &port->events_bitmap, port->num_events) { + event_irq = irq_create_mapping(port->event_domain, i); + if (!event_irq) { + dev_err(dev, "failed to map hwirq %d\n", i); + return -ENXIO; + } + + if (event->request_event_irq) + ret = event->request_event_irq(port, event_irq, i); + else + ret = devm_request_irq(dev, event_irq, + plda_event_handler, + 0, NULL, port); + + if (ret) { + dev_err(dev, "failed to request IRQ %d\n", event_irq); + return ret; + } + } + + port->intx_irq = irq_create_mapping(port->event_domain, + event->intx_event); + if (!port->intx_irq) { + dev_err(dev, "failed to map INTx interrupt\n"); + return -ENXIO; + } + + /* Plug the INTx chained handler */ + irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port); + + port->msi_irq = irq_create_mapping(port->event_domain, + event->msi_event); + if (!port->msi_irq) + return -ENXIO; + + /* Plug the MSI chained handler */ + irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port); + + /* Plug the main event chained handler */ + irq_set_chained_handler_and_data(port->irq, plda_handle_event, port); + + return 0; +} +EXPORT_SYMBOL_GPL(plda_init_interrupts); + +void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index, + phys_addr_t axi_addr, phys_addr_t pci_addr, + size_t size) +{ + u32 atr_sz = ilog2(size) - 1; + u32 val; + + if (index == 0) + val = PCIE_CONFIG_INTERFACE; + else + val = PCIE_TX_RX_INTERFACE; + + writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + + ATR0_AXI4_SLV0_TRSL_PARAM); + + val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) | + ATR_IMPL_ENABLE; + writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + + ATR0_AXI4_SLV0_SRCADDR_PARAM); + + val = upper_32_bits(axi_addr); + writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + + ATR0_AXI4_SLV0_SRC_ADDR); + + val = lower_32_bits(pci_addr); + writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + + ATR0_AXI4_SLV0_TRSL_ADDR_LSB); + + val = upper_32_bits(pci_addr); + writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + + ATR0_AXI4_SLV0_TRSL_ADDR_UDW); + + val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM); + val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT); + writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM); + writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR); +} +EXPORT_SYMBOL_GPL(plda_pcie_setup_window); + +int plda_pcie_setup_iomems(struct pci_host_bridge *bridge, + struct plda_pcie_rp *port) +{ + void __iomem *bridge_base_addr = port->bridge_addr; + struct resource_entry *entry; + u64 pci_addr; + u32 index = 1; + + resource_list_for_each_entry(entry, &bridge->windows) { + if (resource_type(entry->res) == IORESOURCE_MEM) { + pci_addr = entry->res->start - entry->offset; + plda_pcie_setup_window(bridge_base_addr, index, + entry->res->start, pci_addr, + resource_size(entry->res)); + index++; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems); + +static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie) +{ + irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); + irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL); + irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL); + + irq_domain_remove(pcie->msi.msi_domain); + irq_domain_remove(pcie->msi.dev_domain); + + irq_domain_remove(pcie->intx_domain); + irq_domain_remove(pcie->event_domain); +} + +int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops, + const struct plda_event *plda_event) +{ + struct device *dev = port->dev; + struct pci_host_bridge *bridge; + struct platform_device *pdev = to_platform_device(dev); + struct resource *cfg_res; + int ret; + + pdev = to_platform_device(dev); + + port->bridge_addr = + devm_platform_ioremap_resource_byname(pdev, "apb"); + + if (IS_ERR(port->bridge_addr)) + return dev_err_probe(dev, PTR_ERR(port->bridge_addr), + "failed to map reg memory\n"); + + cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + if (!cfg_res) + return dev_err_probe(dev, -ENODEV, + "failed to get config memory\n"); + + port->config_base = devm_ioremap_resource(dev, cfg_res); + if (IS_ERR(port->config_base)) + return dev_err_probe(dev, PTR_ERR(port->config_base), + "failed to map config memory\n"); + + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) + return dev_err_probe(dev, -ENOMEM, + "failed to alloc bridge\n"); + + if (port->host_ops && port->host_ops->host_init) { + ret = port->host_ops->host_init(port); + if (ret) + return ret; + } + + port->bridge = bridge; + plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0, + resource_size(cfg_res)); + plda_pcie_setup_iomems(bridge, port); + plda_set_default_msi(&port->msi); + ret = plda_init_interrupts(pdev, port, plda_event); + if (ret) + goto err_host; + + /* Set default bus ops */ + bridge->ops = ops; + bridge->sysdata = port; + + ret = pci_host_probe(bridge); + if (ret < 0) { + dev_err_probe(dev, ret, "failed to probe pci host\n"); + goto err_probe; + } + + return ret; + +err_probe: + plda_pcie_irq_domain_deinit(port); +err_host: + if (port->host_ops && port->host_ops->host_deinit) + port->host_ops->host_deinit(port); + + return ret; +} +EXPORT_SYMBOL_GPL(plda_pcie_host_init); + +void plda_pcie_host_deinit(struct plda_pcie_rp *port) +{ + pci_stop_root_bus(port->bridge->bus); + pci_remove_root_bus(port->bridge->bus); + + plda_pcie_irq_domain_deinit(port); + + if (port->host_ops && port->host_ops->host_deinit) + port->host_ops->host_deinit(port); +} +EXPORT_SYMBOL_GPL(plda_pcie_host_deinit); diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h new file mode 100644 index 000000000000..0e7dc0d8e5ba --- /dev/null +++ b/drivers/pci/controller/plda/pcie-plda.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PLDA PCIe host controller driver + */ + +#ifndef _PCIE_PLDA_H +#define _PCIE_PLDA_H + +/* Number of MSI IRQs */ +#define PLDA_MAX_NUM_MSI_IRQS 32 + +/* PCIe Bridge Phy Regs */ +#define GEN_SETTINGS 0x80 +#define RP_ENABLE 1 +#define PCIE_PCI_IDS_DW1 0x9c +#define IDS_CLASS_CODE_SHIFT 16 +#define REVISION_ID_MASK GENMASK(7, 0) +#define CLASS_CODE_ID_MASK GENMASK(31, 8) +#define PCIE_PCI_IRQ_DW0 0xa8 +#define MSIX_CAP_MASK BIT(31) +#define NUM_MSI_MSGS_MASK GENMASK(6, 4) +#define NUM_MSI_MSGS_SHIFT 4 +#define PCI_MISC 0xb4 +#define PHY_FUNCTION_DIS BIT(15) +#define PCIE_WINROM 0xfc +#define PREF_MEM_WIN_64_SUPPORT BIT(3) + +#define IMASK_LOCAL 0x180 +#define DMA_END_ENGINE_0_MASK 0x00000000u +#define DMA_END_ENGINE_0_SHIFT 0 +#define DMA_END_ENGINE_1_MASK 0x00000000u +#define DMA_END_ENGINE_1_SHIFT 1 +#define DMA_ERROR_ENGINE_0_MASK 0x00000100u +#define DMA_ERROR_ENGINE_0_SHIFT 8 +#define DMA_ERROR_ENGINE_1_MASK 0x00000200u +#define DMA_ERROR_ENGINE_1_SHIFT 9 +#define A_ATR_EVT_POST_ERR_MASK 0x00010000u +#define A_ATR_EVT_POST_ERR_SHIFT 16 +#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u +#define A_ATR_EVT_FETCH_ERR_SHIFT 17 +#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u +#define A_ATR_EVT_DISCARD_ERR_SHIFT 18 +#define A_ATR_EVT_DOORBELL_MASK 0x00000000u +#define A_ATR_EVT_DOORBELL_SHIFT 19 +#define P_ATR_EVT_POST_ERR_MASK 0x00100000u +#define P_ATR_EVT_POST_ERR_SHIFT 20 +#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u +#define P_ATR_EVT_FETCH_ERR_SHIFT 21 +#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u +#define P_ATR_EVT_DISCARD_ERR_SHIFT 22 +#define P_ATR_EVT_DOORBELL_MASK 0x00000000u +#define P_ATR_EVT_DOORBELL_SHIFT 23 +#define PM_MSI_INT_INTA_MASK 0x01000000u +#define PM_MSI_INT_INTA_SHIFT 24 +#define PM_MSI_INT_INTB_MASK 0x02000000u +#define PM_MSI_INT_INTB_SHIFT 25 +#define PM_MSI_INT_INTC_MASK 0x04000000u +#define PM_MSI_INT_INTC_SHIFT 26 +#define PM_MSI_INT_INTD_MASK 0x08000000u +#define PM_MSI_INT_INTD_SHIFT 27 +#define PM_MSI_INT_INTX_MASK 0x0f000000u +#define PM_MSI_INT_INTX_SHIFT 24 +#define PM_MSI_INT_MSI_MASK 0x10000000u +#define PM_MSI_INT_MSI_SHIFT 28 +#define PM_MSI_INT_AER_EVT_MASK 0x20000000u +#define PM_MSI_INT_AER_EVT_SHIFT 29 +#define PM_MSI_INT_EVENTS_MASK 0x40000000u +#define PM_MSI_INT_EVENTS_SHIFT 30 +#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u +#define PM_MSI_INT_SYS_ERR_SHIFT 31 +#define SYS_AND_MSI_MASK GENMASK(31, 28) +#define NUM_LOCAL_EVENTS 15 +#define ISTATUS_LOCAL 0x184 +#define IMASK_HOST 0x188 +#define ISTATUS_HOST 0x18c +#define IMSI_ADDR 0x190 +#define ISTATUS_MSI 0x194 +#define PMSG_SUPPORT_RX 0x3f0 +#define PMSG_LTR_SUPPORT BIT(2) + +/* PCIe Master table init defines */ +#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u +#define ATR0_PCIE_ATR_SIZE 0x25 +#define ATR0_PCIE_ATR_SIZE_SHIFT 1 +#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u +#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u +#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu +#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u + +/* PCIe AXI slave table init defines */ +#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u +#define ATR_SIZE_SHIFT 1 +#define ATR_IMPL_ENABLE 1 +#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u +#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u +#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu +#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u +#define PCIE_TX_RX_INTERFACE 0x00000000u +#define PCIE_CONFIG_INTERFACE 0x00000001u + +#define CONFIG_SPACE_ADDR_OFFSET 0x1000u + +#define ATR_ENTRY_SIZE 32 + +enum plda_int_event { + PLDA_AXI_POST_ERR, + PLDA_AXI_FETCH_ERR, + PLDA_AXI_DISCARD_ERR, + PLDA_AXI_DOORBELL, + PLDA_PCIE_POST_ERR, + PLDA_PCIE_FETCH_ERR, + PLDA_PCIE_DISCARD_ERR, + PLDA_PCIE_DOORBELL, + PLDA_INTX, + PLDA_MSI, + PLDA_AER_EVENT, + PLDA_MISC_EVENTS, + PLDA_SYS_ERR, + PLDA_INT_EVENT_NUM +}; + +#define PLDA_NUM_DMA_EVENTS 16 + +#define EVENT_PM_MSI_INT_INTX (PLDA_NUM_DMA_EVENTS + PLDA_INTX) +#define EVENT_PM_MSI_INT_MSI (PLDA_NUM_DMA_EVENTS + PLDA_MSI) +#define PLDA_MAX_EVENT_NUM (PLDA_NUM_DMA_EVENTS + PLDA_INT_EVENT_NUM) + +/* + * PLDA interrupt register + * + * 31 27 23 15 7 0 + * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+ + * |12|11|10|9| intx |7|6|5|4|3|2|1|0| DMA error | DMA end | + * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+ + * event bit + * 0-7 (0-7) DMA interrupt end : reserved for vendor implement + * 8-15 (8-15) DMA error : reserved for vendor implement + * 16 (16) AXI post error (PLDA_AXI_POST_ERR) + * 17 (17) AXI fetch error (PLDA_AXI_FETCH_ERR) + * 18 (18) AXI discard error (PLDA_AXI_DISCARD_ERR) + * 19 (19) AXI doorbell (PLDA_PCIE_DOORBELL) + * 20 (20) PCIe post error (PLDA_PCIE_POST_ERR) + * 21 (21) PCIe fetch error (PLDA_PCIE_FETCH_ERR) + * 22 (22) PCIe discard error (PLDA_PCIE_DISCARD_ERR) + * 23 (23) PCIe doorbell (PLDA_PCIE_DOORBELL) + * 24 (27-24) INTx interruts (PLDA_INTX) + * 25 (28): MSI interrupt (PLDA_MSI) + * 26 (29): AER event (PLDA_AER_EVENT) + * 27 (30): PM/LTR/Hotplug (PLDA_MISC_EVENTS) + * 28 (31): System error (PLDA_SYS_ERR) + */ + +struct plda_pcie_rp; + +struct plda_event_ops { + u32 (*get_events)(struct plda_pcie_rp *pcie); +}; + +struct plda_pcie_host_ops { + int (*host_init)(struct plda_pcie_rp *pcie); + void (*host_deinit)(struct plda_pcie_rp *pcie); +}; + +struct plda_msi { + struct mutex lock; /* Protect used bitmap */ + struct irq_domain *msi_domain; + struct irq_domain *dev_domain; + u32 num_vectors; + u64 vector_phy; + DECLARE_BITMAP(used, PLDA_MAX_NUM_MSI_IRQS); +}; + +struct plda_pcie_rp { + struct device *dev; + struct pci_host_bridge *bridge; + struct irq_domain *intx_domain; + struct irq_domain *event_domain; + raw_spinlock_t lock; + struct plda_msi msi; + const struct plda_event_ops *event_ops; + const struct irq_chip *event_irq_chip; + const struct plda_pcie_host_ops *host_ops; + void __iomem *bridge_addr; + void __iomem *config_base; + unsigned long events_bitmap; + int irq; + int msi_irq; + int intx_irq; + int num_events; +}; + +struct plda_event { + int (*request_event_irq)(struct plda_pcie_rp *pcie, + int event_irq, int event); + int intx_event; + int msi_event; +}; + +void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, + int where); +int plda_init_interrupts(struct platform_device *pdev, + struct plda_pcie_rp *port, + const struct plda_event *event); +void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index, + phys_addr_t axi_addr, phys_addr_t pci_addr, + size_t size); +int plda_pcie_setup_iomems(struct pci_host_bridge *bridge, + struct plda_pcie_rp *port); +int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops, + const struct plda_event *plda_event); +void plda_pcie_host_deinit(struct plda_pcie_rp *pcie); + +static inline void plda_set_default_msi(struct plda_msi *msi) +{ + msi->vector_phy = IMSI_ADDR; + msi->num_vectors = PLDA_MAX_NUM_MSI_IRQS; +} + +static inline void plda_pcie_enable_root_port(struct plda_pcie_rp *plda) +{ + u32 value; + + value = readl_relaxed(plda->bridge_addr + GEN_SETTINGS); + value |= RP_ENABLE; + writel_relaxed(value, plda->bridge_addr + GEN_SETTINGS); +} + +static inline void plda_pcie_set_standard_class(struct plda_pcie_rp *plda) +{ + u32 value; + + /* set class code and reserve revision id */ + value = readl_relaxed(plda->bridge_addr + PCIE_PCI_IDS_DW1); + value &= REVISION_ID_MASK; + value |= (PCI_CLASS_BRIDGE_PCI << IDS_CLASS_CODE_SHIFT); + writel_relaxed(value, plda->bridge_addr + PCIE_PCI_IDS_DW1); +} + +static inline void plda_pcie_set_pref_win_64bit(struct plda_pcie_rp *plda) +{ + u32 value; + + value = readl_relaxed(plda->bridge_addr + PCIE_WINROM); + value |= PREF_MEM_WIN_64_SUPPORT; + writel_relaxed(value, plda->bridge_addr + PCIE_WINROM); +} + +static inline void plda_pcie_disable_ltr(struct plda_pcie_rp *plda) +{ + u32 value; + + value = readl_relaxed(plda->bridge_addr + PMSG_SUPPORT_RX); + value &= ~PMSG_LTR_SUPPORT; + writel_relaxed(value, plda->bridge_addr + PMSG_SUPPORT_RX); +} + +static inline void plda_pcie_disable_func(struct plda_pcie_rp *plda) +{ + u32 value; + + value = readl_relaxed(plda->bridge_addr + PCI_MISC); + value |= PHY_FUNCTION_DIS; + writel_relaxed(value, plda->bridge_addr + PCI_MISC); +} + +static inline void plda_pcie_write_rc_bar(struct plda_pcie_rp *plda, u64 val) +{ + void __iomem *addr = plda->bridge_addr + CONFIG_SPACE_ADDR_OFFSET; + + writel_relaxed(lower_32_bits(val), addr + PCI_BASE_ADDRESS_0); + writel_relaxed(upper_32_bits(val), addr + PCI_BASE_ADDRESS_1); +} +#endif /* _PCIE_PLDA_H */ diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c new file mode 100644 index 000000000000..c9933ecf6833 --- /dev/null +++ b/drivers/pci/controller/plda/pcie-starfive.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe host controller driver for StarFive JH7110 Soc. + * + * Copyright (C) 2023 StarFive Technology Co., Ltd. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include "../../pci.h" + +#include "pcie-plda.h" + +#define PCIE_FUNC_NUM 4 + +/* system control */ +#define STG_SYSCON_PCIE0_BASE 0x48 +#define STG_SYSCON_PCIE1_BASE 0x1f8 + +#define STG_SYSCON_AR_OFFSET 0x78 +#define STG_SYSCON_AXI4_SLVL_AR_MASK GENMASK(22, 8) +#define STG_SYSCON_AXI4_SLVL_PHY_AR(x) FIELD_PREP(GENMASK(20, 17), x) +#define STG_SYSCON_AW_OFFSET 0x7c +#define STG_SYSCON_AXI4_SLVL_AW_MASK GENMASK(14, 0) +#define STG_SYSCON_AXI4_SLVL_PHY_AW(x) FIELD_PREP(GENMASK(12, 9), x) +#define STG_SYSCON_CLKREQ BIT(22) +#define STG_SYSCON_CKREF_SRC_MASK GENMASK(19, 18) +#define STG_SYSCON_RP_NEP_OFFSET 0xe8 +#define STG_SYSCON_K_RP_NEP BIT(8) +#define STG_SYSCON_LNKSTA_OFFSET 0x170 +#define DATA_LINK_ACTIVE BIT(5) + +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +struct starfive_jh7110_pcie { + struct plda_pcie_rp plda; + struct reset_control *resets; + struct clk_bulk_data *clks; + struct regmap *reg_syscon; + struct gpio_desc *power_gpio; + struct gpio_desc *reset_gpio; + struct phy *phy; + + unsigned int stg_pcie_base; + int num_clks; +}; + +/* + * JH7110 PCIe port BAR0/1 can be configured as 64-bit prefetchable memory + * space. PCIe read and write requests targeting BAR0/1 are routed to so called + * 'Bridge Configuration space' in PLDA IP datasheet, which contains the bridge + * internal registers, such as interrupt, DMA and ATU registers... + * JH7110 can access the Bridge Configuration space by local bus, and don`t + * want the bridge internal registers accessed by the DMA from EP devices. + * Thus, they are unimplemented and should be hidden here. + */ +static bool starfive_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, + int offset) +{ + if (pci_is_root_bus(bus) && !devfn && + (offset == PCI_BASE_ADDRESS_0 || offset == PCI_BASE_ADDRESS_1)) + return true; + + return false; +} + +static int starfive_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + if (starfive_pcie_hide_rc_bar(bus, devfn, where)) + return PCIBIOS_SUCCESSFUL; + + return pci_generic_config_write(bus, devfn, where, size, value); +} + +static int starfive_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + if (starfive_pcie_hide_rc_bar(bus, devfn, where)) { + *value = 0; + return PCIBIOS_SUCCESSFUL; + } + + return pci_generic_config_read(bus, devfn, where, size, value); +} + +static int starfive_pcie_parse_dt(struct starfive_jh7110_pcie *pcie, + struct device *dev) +{ + int domain_nr; + + pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks); + if (pcie->num_clks < 0) + return dev_err_probe(dev, pcie->num_clks, + "failed to get pcie clocks\n"); + + pcie->resets = devm_reset_control_array_get_exclusive(dev); + if (IS_ERR(pcie->resets)) + return dev_err_probe(dev, PTR_ERR(pcie->resets), + "failed to get pcie resets"); + + pcie->reg_syscon = + syscon_regmap_lookup_by_phandle(dev->of_node, + "starfive,stg-syscon"); + + if (IS_ERR(pcie->reg_syscon)) + return dev_err_probe(dev, PTR_ERR(pcie->reg_syscon), + "failed to parse starfive,stg-syscon\n"); + + pcie->phy = devm_phy_optional_get(dev, NULL); + if (IS_ERR(pcie->phy)) + return dev_err_probe(dev, PTR_ERR(pcie->phy), + "failed to get pcie phy\n"); + + /* + * The PCIe domain numbers are set to be static in JH7110 DTS. + * As the STG system controller defines different bases in PCIe RP0 & + * RP1, we use them to identify which controller is doing the hardware + * initialization. + */ + domain_nr = of_get_pci_domain_nr(dev->of_node); + + if (domain_nr < 0 || domain_nr > 1) + return dev_err_probe(dev, -ENODEV, + "failed to get valid pcie domain\n"); + + if (domain_nr == 0) + pcie->stg_pcie_base = STG_SYSCON_PCIE0_BASE; + else + pcie->stg_pcie_base = STG_SYSCON_PCIE1_BASE; + + pcie->reset_gpio = devm_gpiod_get_optional(dev, "perst", + GPIOD_OUT_HIGH); + if (IS_ERR(pcie->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(pcie->reset_gpio), + "failed to get perst-gpio\n"); + + pcie->power_gpio = devm_gpiod_get_optional(dev, "enable", + GPIOD_OUT_LOW); + if (IS_ERR(pcie->power_gpio)) + return dev_err_probe(dev, PTR_ERR(pcie->power_gpio), + "failed to get power-gpio\n"); + + return 0; +} + +static struct pci_ops starfive_pcie_ops = { + .map_bus = plda_pcie_map_bus, + .read = starfive_pcie_config_read, + .write = starfive_pcie_config_write, +}; + +static int starfive_pcie_clk_rst_init(struct starfive_jh7110_pcie *pcie) +{ + struct device *dev = pcie->plda.dev; + int ret; + + ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); + if (ret) + return dev_err_probe(dev, ret, "failed to enable clocks\n"); + + ret = reset_control_deassert(pcie->resets); + if (ret) { + clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks); + dev_err_probe(dev, ret, "failed to deassert resets\n"); + } + + return ret; +} + +static void starfive_pcie_clk_rst_deinit(struct starfive_jh7110_pcie *pcie) +{ + reset_control_assert(pcie->resets); + clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks); +} + +static bool starfive_pcie_link_up(struct plda_pcie_rp *plda) +{ + struct starfive_jh7110_pcie *pcie = + container_of(plda, struct starfive_jh7110_pcie, plda); + int ret; + u32 stg_reg_val; + + ret = regmap_read(pcie->reg_syscon, + pcie->stg_pcie_base + STG_SYSCON_LNKSTA_OFFSET, + &stg_reg_val); + if (ret) { + dev_err(pcie->plda.dev, "failed to read link status\n"); + return false; + } + + return !!(stg_reg_val & DATA_LINK_ACTIVE); +} + +static int starfive_pcie_host_wait_for_link(struct starfive_jh7110_pcie *pcie) +{ + int retries; + + /* Check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (starfive_pcie_link_up(&pcie->plda)) { + dev_info(pcie->plda.dev, "port link up\n"); + return 0; + } + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + return -ETIMEDOUT; +} + +static int starfive_pcie_enable_phy(struct device *dev, + struct starfive_jh7110_pcie *pcie) +{ + int ret; + + if (!pcie->phy) + return 0; + + ret = phy_init(pcie->phy); + if (ret) + return dev_err_probe(dev, ret, + "failed to initialize pcie phy\n"); + + ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE); + if (ret) { + dev_err_probe(dev, ret, "failed to set pcie mode\n"); + goto err_phy_on; + } + + ret = phy_power_on(pcie->phy); + if (ret) { + dev_err_probe(dev, ret, "failed to power on pcie phy\n"); + goto err_phy_on; + } + + return 0; + +err_phy_on: + phy_exit(pcie->phy); + return ret; +} + +static void starfive_pcie_disable_phy(struct starfive_jh7110_pcie *pcie) +{ + phy_power_off(pcie->phy); + phy_exit(pcie->phy); +} + +static void starfive_pcie_host_deinit(struct plda_pcie_rp *plda) +{ + struct starfive_jh7110_pcie *pcie = + container_of(plda, struct starfive_jh7110_pcie, plda); + + starfive_pcie_clk_rst_deinit(pcie); + if (pcie->power_gpio) + gpiod_set_value_cansleep(pcie->power_gpio, 0); + starfive_pcie_disable_phy(pcie); +} + +static int starfive_pcie_host_init(struct plda_pcie_rp *plda) +{ + struct starfive_jh7110_pcie *pcie = + container_of(plda, struct starfive_jh7110_pcie, plda); + struct device *dev = plda->dev; + int ret; + int i; + + ret = starfive_pcie_enable_phy(dev, pcie); + if (ret) + return ret; + + regmap_update_bits(pcie->reg_syscon, + pcie->stg_pcie_base + STG_SYSCON_RP_NEP_OFFSET, + STG_SYSCON_K_RP_NEP, STG_SYSCON_K_RP_NEP); + + regmap_update_bits(pcie->reg_syscon, + pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET, + STG_SYSCON_CKREF_SRC_MASK, + FIELD_PREP(STG_SYSCON_CKREF_SRC_MASK, 2)); + + regmap_update_bits(pcie->reg_syscon, + pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET, + STG_SYSCON_CLKREQ, STG_SYSCON_CLKREQ); + + ret = starfive_pcie_clk_rst_init(pcie); + if (ret) + return ret; + + if (pcie->power_gpio) + gpiod_set_value_cansleep(pcie->power_gpio, 1); + + if (pcie->reset_gpio) + gpiod_set_value_cansleep(pcie->reset_gpio, 1); + + /* Disable physical functions except #0 */ + for (i = 1; i < PCIE_FUNC_NUM; i++) { + regmap_update_bits(pcie->reg_syscon, + pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET, + STG_SYSCON_AXI4_SLVL_AR_MASK, + STG_SYSCON_AXI4_SLVL_PHY_AR(i)); + + regmap_update_bits(pcie->reg_syscon, + pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET, + STG_SYSCON_AXI4_SLVL_AW_MASK, + STG_SYSCON_AXI4_SLVL_PHY_AW(i)); + + plda_pcie_disable_func(plda); + } + + regmap_update_bits(pcie->reg_syscon, + pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET, + STG_SYSCON_AXI4_SLVL_AR_MASK, 0); + regmap_update_bits(pcie->reg_syscon, + pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET, + STG_SYSCON_AXI4_SLVL_AW_MASK, 0); + + plda_pcie_enable_root_port(plda); + plda_pcie_write_rc_bar(plda, 0); + + /* PCIe PCI Standard Configuration Identification Settings. */ + plda_pcie_set_standard_class(plda); + + /* + * The LTR message receiving is enabled by the register "PCIe Message + * Reception" as default, but the forward id & addr are uninitialized. + * If we do not disable LTR message forwarding here, or set a legal + * forwarding address, the kernel will get stuck. + * To workaround, disable the LTR message forwarding here before using + * this feature. + */ + plda_pcie_disable_ltr(plda); + + /* + * Enable the prefetchable memory window 64-bit addressing in JH7110. + * The 64-bits prefetchable address translation configurations in ATU + * can be work after enable the register setting below. + */ + plda_pcie_set_pref_win_64bit(plda); + + /* + * Ensure that PERST has been asserted for at least 100 ms, + * the sleep value is T_PVPERL from PCIe CEM spec r2.0 (Table 2-4) + */ + msleep(100); + if (pcie->reset_gpio) + gpiod_set_value_cansleep(pcie->reset_gpio, 0); + + /* + * With a Downstream Port (<=5GT/s), software must wait a minimum + * of 100ms following exit from a conventional reset before + * sending a configuration request to the device. + */ + msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS); + + if (starfive_pcie_host_wait_for_link(pcie)) + dev_info(dev, "port link down\n"); + + return 0; +} + +static const struct plda_pcie_host_ops sf_host_ops = { + .host_init = starfive_pcie_host_init, + .host_deinit = starfive_pcie_host_deinit, +}; + +static const struct plda_event stf_pcie_event = { + .intx_event = EVENT_PM_MSI_INT_INTX, + .msi_event = EVENT_PM_MSI_INT_MSI +}; + +static int starfive_pcie_probe(struct platform_device *pdev) +{ + struct starfive_jh7110_pcie *pcie; + struct device *dev = &pdev->dev; + struct plda_pcie_rp *plda; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + plda = &pcie->plda; + plda->dev = dev; + + ret = starfive_pcie_parse_dt(pcie, dev); + if (ret) + return ret; + + plda->host_ops = &sf_host_ops; + plda->num_events = PLDA_MAX_EVENT_NUM; + /* mask doorbell event */ + plda->events_bitmap = GENMASK(PLDA_INT_EVENT_NUM - 1, 0) + & ~BIT(PLDA_AXI_DOORBELL) + & ~BIT(PLDA_PCIE_DOORBELL); + plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS; + ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops, + &stf_pcie_event); + if (ret) + return ret; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + platform_set_drvdata(pdev, pcie); + + return 0; +} + +static void starfive_pcie_remove(struct platform_device *pdev) +{ + struct starfive_jh7110_pcie *pcie = platform_get_drvdata(pdev); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + plda_pcie_host_deinit(&pcie->plda); + platform_set_drvdata(pdev, NULL); +} + +static int starfive_pcie_suspend_noirq(struct device *dev) +{ + struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks); + starfive_pcie_disable_phy(pcie); + + return 0; +} + +static int starfive_pcie_resume_noirq(struct device *dev) +{ + struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev); + int ret; + + ret = starfive_pcie_enable_phy(dev, pcie); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); + if (ret) { + dev_err(dev, "failed to enable clocks\n"); + starfive_pcie_disable_phy(pcie); + return ret; + } + + return 0; +} + +static const struct dev_pm_ops starfive_pcie_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(starfive_pcie_suspend_noirq, + starfive_pcie_resume_noirq) +}; + +static const struct of_device_id starfive_pcie_of_match[] = { + { .compatible = "starfive,jh7110-pcie", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, starfive_pcie_of_match); + +static struct platform_driver starfive_pcie_driver = { + .driver = { + .name = "pcie-starfive", + .of_match_table = of_match_ptr(starfive_pcie_of_match), + .pm = pm_sleep_ptr(&starfive_pcie_pm_ops), + }, + .probe = starfive_pcie_probe, + .remove_new = starfive_pcie_remove, +}; +module_platform_driver(starfive_pcie_driver); + +MODULE_DESCRIPTION("StarFive JH7110 PCIe host driver"); +MODULE_LICENSE("GPL v2"); |