diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2022-08-02 10:06:12 -0700 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2022-08-02 10:06:12 -0700 |
commit | 8bb5e7f4dcd9b9ef22a3ea25c9066a8a968f12dd (patch) | |
tree | 0f1383880607a227142f9388a066959926233ff1 /drivers/bus | |
parent | 2a96271fb66c499e4a89d76a89d3d01170c10bef (diff) | |
parent | 7c744d00990ea999d27f306f6db5ccb61b1304b2 (diff) |
Merge branch 'next' into for-linus
Prepare input updates for 5.20 (or 6.0) merge window.
Diffstat (limited to 'drivers/bus')
33 files changed, 4126 insertions, 1015 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 3c68e174a113..7bfe998f3514 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -152,6 +152,17 @@ config QCOM_EBI2 Interface 2, which can be used to connect things like NAND Flash, SRAM, ethernet adapters, FPGAs and LCD displays. +config QCOM_SSC_BLOCK_BUS + bool "Qualcomm SSC Block Bus Init Driver" + depends on ARCH_QCOM + help + Say y here to enable support for initializing the bus that connects + the SSC block's internal bus to the cNoC (configurantion NoC) on + (some) qcom SoCs. + The SSC (Snapdragon Sensor Core) block contains a gpio controller, + i2c/spi/uart controllers, a hexagon core, and a clock controller + which provides clocks for the above. + config SUN50I_DE2_BUS bool "Allwinner A64 DE2 Bus Driver" default ARM64 diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 52c2f35a26a9..d90eed189a65 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o +obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o obj-$(CONFIG_OF) += simple-pm-bus.o @@ -39,4 +40,4 @@ obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o # MHI -obj-$(CONFIG_MHI_BUS) += mhi/ +obj-y += mhi/ diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index 183d5cc37d42..b0c3704777e9 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -536,7 +536,6 @@ static struct platform_driver brcmstb_gisb_arb_driver = { .name = "brcm-gisb-arb", .of_match_table = brcmstb_gisb_arb_of_match, .pm = &brcmstb_gisb_arb_pm_ops, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c index b25ff941e7c7..63b1b4a76671 100644 --- a/drivers/bus/bt1-apb.c +++ b/drivers/bus/bt1-apb.c @@ -175,10 +175,9 @@ static int bt1_apb_request_rst(struct bt1_apb *apb) int ret; apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst"); - if (IS_ERR(apb->prst)) { - dev_warn(apb->dev, "Couldn't get reset control line\n"); - return PTR_ERR(apb->prst); - } + if (IS_ERR(apb->prst)) + return dev_err_probe(apb->dev, PTR_ERR(apb->prst), + "Couldn't get reset control line\n"); ret = reset_control_deassert(apb->prst); if (ret) @@ -199,10 +198,9 @@ static int bt1_apb_request_clk(struct bt1_apb *apb) int ret; apb->pclk = devm_clk_get(apb->dev, "pclk"); - if (IS_ERR(apb->pclk)) { - dev_err(apb->dev, "Couldn't get APB clock descriptor\n"); - return PTR_ERR(apb->pclk); - } + if (IS_ERR(apb->pclk)) + return dev_err_probe(apb->dev, PTR_ERR(apb->pclk), + "Couldn't get APB clock descriptor\n"); ret = clk_prepare_enable(apb->pclk); if (ret) { diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c index e7a6744acc7b..70e49a6e5374 100644 --- a/drivers/bus/bt1-axi.c +++ b/drivers/bus/bt1-axi.c @@ -135,10 +135,9 @@ static int bt1_axi_request_rst(struct bt1_axi *axi) int ret; axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst"); - if (IS_ERR(axi->arst)) { - dev_warn(axi->dev, "Couldn't get reset control line\n"); - return PTR_ERR(axi->arst); - } + if (IS_ERR(axi->arst)) + return dev_err_probe(axi->dev, PTR_ERR(axi->arst), + "Couldn't get reset control line\n"); ret = reset_control_deassert(axi->arst); if (ret) @@ -159,10 +158,9 @@ static int bt1_axi_request_clk(struct bt1_axi *axi) int ret; axi->aclk = devm_clk_get(axi->dev, "aclk"); - if (IS_ERR(axi->aclk)) { - dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n"); - return PTR_ERR(axi->aclk); - } + if (IS_ERR(axi->aclk)) + return dev_err_probe(axi->dev, PTR_ERR(axi->aclk), + "Couldn't get AXI Interconnect clock\n"); ret = clk_prepare_enable(axi->aclk); if (ret) { diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 8fd4a356a86e..6143dbf31f31 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/acpi.h> #include <linux/iommu.h> +#include <linux/dma-map-ops.h> #include "fsl-mc-private.h" @@ -140,15 +141,33 @@ static int fsl_mc_dma_configure(struct device *dev) { struct device *dma_dev = dev; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); + struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); u32 input_id = mc_dev->icid; + int ret; while (dev_is_fsl_mc(dma_dev)) dma_dev = dma_dev->parent; if (dev_of_node(dma_dev)) - return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id); + ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id); + else + ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); - return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); + if (!ret && !mc_drv->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); + } + + return ret; +} + +static void fsl_mc_dma_cleanup(struct device *dev) +{ + struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); + + if (!mc_drv->driver_managed_dma) + iommu_device_unuse_default_domain(dev); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, @@ -166,31 +185,14 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - char *driver_override, *old = mc_dev->driver_override; - char *cp; + int ret; if (WARN_ON(dev->bus != &fsl_mc_bus_type)) return -EINVAL; - if (count >= (PAGE_SIZE - 1)) - return -EINVAL; - - driver_override = kstrndup(buf, count, GFP_KERNEL); - if (!driver_override) - return -ENOMEM; - - cp = strchr(driver_override, '\n'); - if (cp) - *cp = '\0'; - - if (strlen(driver_override)) { - mc_dev->driver_override = driver_override; - } else { - kfree(driver_override); - mc_dev->driver_override = NULL; - } - - kfree(old); + ret = driver_set_override(dev, &mc_dev->driver_override, buf, count); + if (ret) + return ret; return count; } @@ -312,6 +314,7 @@ struct bus_type fsl_mc_bus_type = { .match = fsl_mc_bus_match, .uevent = fsl_mc_bus_uevent, .dma_configure = fsl_mc_dma_configure, + .dma_cleanup = fsl_mc_dma_cleanup, .dev_groups = fsl_mc_dev_groups, .bus_groups = fsl_mc_bus_groups, }; @@ -1236,14 +1239,14 @@ error_cleanup_mc_io: static int fsl_mc_bus_remove(struct platform_device *pdev) { struct fsl_mc *mc = platform_get_drvdata(pdev); + struct fsl_mc_io *mc_io; if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)) return -EINVAL; + mc_io = mc->root_mc_bus_dev->mc_io; fsl_mc_device_remove(mc->root_mc_bus_dev); - - fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); - mc->root_mc_bus_dev->mc_io = NULL; + fsl_destroy_mc_io(mc_io); bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb); diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c index 5e0e4393ce4d..0cfe859a4ac4 100644 --- a/drivers/bus/fsl-mc/fsl-mc-msi.c +++ b/drivers/bus/fsl-mc/fsl-mc-msi.c @@ -224,8 +224,12 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count) if (error) return error; + msi_lock_descs(dev); if (msi_first_desc(dev, MSI_DESC_ALL)) - return -EINVAL; + error = -EINVAL; + msi_unlock_descs(dev); + if (error) + return error; /* * NOTE: Calling this function will trigger the invocation of the diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index bccb275b65ba..828c66bbaa67 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -64,6 +64,11 @@ struct cs_timing_state { struct cs_timing cs[MAX_CS_COUNT]; }; +struct weim_priv { + void __iomem *base; + struct cs_timing_state timing_state; +}; + static const struct of_device_id weim_id_table[] = { /* i.MX1/21 */ { .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, }, @@ -128,21 +133,26 @@ err: } /* Parse and set the timing for this device. */ -static int weim_timing_setup(struct device *dev, - struct device_node *np, void __iomem *base, - const struct imx_weim_devtype *devtype, - struct cs_timing_state *ts) +static int weim_timing_setup(struct device *dev, struct device_node *np, + const struct imx_weim_devtype *devtype) { u32 cs_idx, value[MAX_CS_REGS_COUNT]; int i, ret; int reg_idx, num_regs; struct cs_timing *cst; + struct weim_priv *priv; + struct cs_timing_state *ts; + void __iomem *base; if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT)) return -EINVAL; if (WARN_ON(devtype->cs_count > MAX_CS_COUNT)) return -EINVAL; + priv = dev_get_drvdata(dev); + base = priv->base; + ts = &priv->timing_state; + ret = of_property_read_u32_array(np, "fsl,weim-cs-timing", value, devtype->cs_regs_count); if (ret) @@ -189,14 +199,15 @@ static int weim_timing_setup(struct device *dev, return 0; } -static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) +static int weim_parse_dt(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(weim_id_table, &pdev->dev); const struct imx_weim_devtype *devtype = of_id->data; struct device_node *child; int ret, have_child = 0; - struct cs_timing_state ts = {}; + struct weim_priv *priv; + void __iomem *base; u32 reg; if (devtype == &imx50_weim_devtype) { @@ -205,6 +216,9 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) return ret; } + priv = dev_get_drvdata(&pdev->dev); + base = priv->base; + if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) { if (devtype->wcr_bcm) { reg = readl(base + devtype->wcr_offset); @@ -229,7 +243,7 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) } for_each_available_child_of_node(pdev->dev.of_node, child) { - ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts); + ret = weim_timing_setup(&pdev->dev, child, devtype); if (ret) dev_warn(&pdev->dev, "%pOF set timing failed.\n", child); @@ -248,17 +262,25 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) static int weim_probe(struct platform_device *pdev) { + struct weim_priv *priv; struct resource *res; struct clk *clk; void __iomem *base; int ret; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + /* get the resource */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); + priv->base = base; + dev_set_drvdata(&pdev->dev, priv); + /* get the clock */ clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) @@ -269,7 +291,7 @@ static int weim_probe(struct platform_device *pdev) return ret; /* parse the device node */ - ret = weim_parse_dt(pdev, base); + ret = weim_parse_dt(pdev); if (ret) clk_disable_unprepare(clk); else @@ -278,6 +300,80 @@ static int weim_probe(struct platform_device *pdev) return ret; } +#if IS_ENABLED(CONFIG_OF_DYNAMIC) +static int of_weim_notify(struct notifier_block *nb, unsigned long action, + void *arg) +{ + const struct imx_weim_devtype *devtype; + struct of_reconfig_data *rd = arg; + const struct of_device_id *of_id; + struct platform_device *pdev; + int ret = NOTIFY_OK; + + switch (of_reconfig_get_state_change(action, rd)) { + case OF_RECONFIG_CHANGE_ADD: + of_id = of_match_node(weim_id_table, rd->dn->parent); + if (!of_id) + return NOTIFY_OK; /* not for us */ + + devtype = of_id->data; + + pdev = of_find_device_by_node(rd->dn->parent); + if (!pdev) { + pr_err("%s: could not find platform device for '%pOF'\n", + __func__, rd->dn->parent); + + return notifier_from_errno(-EINVAL); + } + + if (weim_timing_setup(&pdev->dev, rd->dn, devtype)) + dev_warn(&pdev->dev, + "Failed to setup timing for '%pOF'\n", rd->dn); + + if (!of_node_check_flag(rd->dn, OF_POPULATED)) { + if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) { + dev_err(&pdev->dev, + "Failed to create child device '%pOF'\n", + rd->dn); + ret = notifier_from_errno(-EINVAL); + } + } + + platform_device_put(pdev); + + break; + case OF_RECONFIG_CHANGE_REMOVE: + if (!of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; /* device already destroyed */ + + of_id = of_match_node(weim_id_table, rd->dn->parent); + if (!of_id) + return NOTIFY_OK; /* not for us */ + + pdev = of_find_device_by_node(rd->dn); + if (!pdev) { + pr_err("Could not find platform device for '%pOF'\n", + rd->dn); + + ret = notifier_from_errno(-EINVAL); + } else { + of_platform_device_destroy(&pdev->dev, NULL); + platform_device_put(pdev); + } + + break; + default: + break; + } + + return ret; +} + +static struct notifier_block weim_of_notifier = { + .notifier_call = of_weim_notify, +}; +#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ + static struct platform_driver weim_driver = { .driver = { .name = "imx-weim", @@ -285,7 +381,27 @@ static struct platform_driver weim_driver = { }, .probe = weim_probe, }; -module_platform_driver(weim_driver); + +static int __init weim_init(void) +{ +#if IS_ENABLED(CONFIG_OF_DYNAMIC) + WARN_ON(of_reconfig_notifier_register(&weim_of_notifier)); +#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ + + return platform_driver_register(&weim_driver); +} +module_init(weim_init); + +static void __exit weim_exit(void) +{ +#if IS_ENABLED(CONFIG_OF_DYNAMIC) + of_reconfig_notifier_unregister(&weim_of_notifier); +#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ + + return platform_driver_unregister(&weim_driver); + +} +module_exit(weim_exit); MODULE_AUTHOR("Freescale Semiconductor Inc."); MODULE_DESCRIPTION("i.MX EIM Controller Driver"); diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index da5cd0c9fc62..b39a11e6c624 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -2,30 +2,8 @@ # # MHI bus # -# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +# Copyright (c) 2021, Linaro Ltd. # -config MHI_BUS - tristate "Modem Host Interface (MHI) bus" - help - Bus driver for MHI protocol. Modem Host Interface (MHI) is a - communication protocol used by the host processors to control - and communicate with modem devices over a high speed peripheral - bus or shared memory. - -config MHI_BUS_DEBUG - bool "Debugfs support for the MHI bus" - depends on MHI_BUS && DEBUG_FS - help - Enable debugfs support for use with the MHI transport. Allows - reading and/or modifying some values within the MHI controller - for debug and test purposes. - -config MHI_BUS_PCI_GENERIC - tristate "MHI PCI controller driver" - depends on MHI_BUS - depends on PCI - help - This driver provides MHI PCI controller driver for devices such as - Qualcomm SDX55 based PCIe modems. - +source "drivers/bus/mhi/host/Kconfig" +source "drivers/bus/mhi/ep/Kconfig" diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 0a2d778d6fb4..46981331b38f 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,6 +1,5 @@ -# core layer -obj-y += core/ - -obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o -mhi_pci_generic-y += pci_generic.o +# Host MHI stack +obj-y += host/ +# Endpoint MHI stack +obj-y += ep/ diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h new file mode 100644 index 000000000000..f794b9c8049e --- /dev/null +++ b/drivers/bus/mhi/common.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ + +#ifndef _MHI_COMMON_H +#define _MHI_COMMON_H + +#include <linux/bitfield.h> +#include <linux/mhi.h> + +/* MHI registers */ +#define MHIREGLEN 0x00 +#define MHIVER 0x08 +#define MHICFG 0x10 +#define CHDBOFF 0x18 +#define ERDBOFF 0x20 +#define BHIOFF 0x28 +#define BHIEOFF 0x2c +#define DEBUGOFF 0x30 +#define MHICTRL 0x38 +#define MHISTATUS 0x48 +#define CCABAP_LOWER 0x58 +#define CCABAP_HIGHER 0x5c +#define ECABAP_LOWER 0x60 +#define ECABAP_HIGHER 0x64 +#define CRCBAP_LOWER 0x68 +#define CRCBAP_HIGHER 0x6c +#define CRDB_LOWER 0x70 +#define CRDB_HIGHER 0x74 +#define MHICTRLBASE_LOWER 0x80 +#define MHICTRLBASE_HIGHER 0x84 +#define MHICTRLLIMIT_LOWER 0x88 +#define MHICTRLLIMIT_HIGHER 0x8c +#define MHIDATABASE_LOWER 0x98 +#define MHIDATABASE_HIGHER 0x9c +#define MHIDATALIMIT_LOWER 0xa0 +#define MHIDATALIMIT_HIGHER 0xa4 + +/* MHI BHI registers */ +#define BHI_BHIVERSION_MINOR 0x00 +#define BHI_BHIVERSION_MAJOR 0x04 +#define BHI_IMGADDR_LOW 0x08 +#define BHI_IMGADDR_HIGH 0x0c +#define BHI_IMGSIZE 0x10 +#define BHI_RSVD1 0x14 +#define BHI_IMGTXDB 0x18 +#define BHI_RSVD2 0x1c +#define BHI_INTVEC 0x20 +#define BHI_RSVD3 0x24 +#define BHI_EXECENV 0x28 +#define BHI_STATUS 0x2c +#define BHI_ERRCODE 0x30 +#define BHI_ERRDBG1 0x34 +#define BHI_ERRDBG2 0x38 +#define BHI_ERRDBG3 0x3c +#define BHI_SERIALNU 0x40 +#define BHI_SBLANTIROLLVER 0x44 +#define BHI_NUMSEG 0x48 +#define BHI_MSMHWID(n) (0x4c + (0x4 * (n))) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n))) +#define BHI_RSVD5 0xc4 + +/* BHI register bits */ +#define BHI_TXDB_SEQNUM_BMSK GENMASK(29, 0) +#define BHI_TXDB_SEQNUM_SHFT 0 +#define BHI_STATUS_MASK GENMASK(31, 30) +#define BHI_STATUS_ERROR 0x03 +#define BHI_STATUS_SUCCESS 0x02 +#define BHI_STATUS_RESET 0x00 + +/* MHI BHIE registers */ +#define BHIE_MSMSOCID_OFFS 0x00 +#define BHIE_TXVECADDR_LOW_OFFS 0x2c +#define BHIE_TXVECADDR_HIGH_OFFS 0x30 +#define BHIE_TXVECSIZE_OFFS 0x34 +#define BHIE_TXVECDB_OFFS 0x3c +#define BHIE_TXVECSTATUS_OFFS 0x44 +#define BHIE_RXVECADDR_LOW_OFFS 0x60 +#define BHIE_RXVECADDR_HIGH_OFFS 0x64 +#define BHIE_RXVECSIZE_OFFS 0x68 +#define BHIE_RXVECDB_OFFS 0x70 +#define BHIE_RXVECSTATUS_OFFS 0x78 + +/* BHIE register bits */ +#define BHIE_TXVECDB_SEQNUM_BMSK GENMASK(29, 0) +#define BHIE_TXVECDB_SEQNUM_SHFT 0 +#define BHIE_TXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT 0 +#define BHIE_TXVECSTATUS_STATUS_BMSK GENMASK(31, 30) +#define BHIE_TXVECSTATUS_STATUS_SHFT 30 +#define BHIE_TXVECSTATUS_STATUS_RESET 0x00 +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL 0x02 +#define BHIE_TXVECSTATUS_STATUS_ERROR 0x03 +#define BHIE_RXVECDB_SEQNUM_BMSK GENMASK(29, 0) +#define BHIE_RXVECDB_SEQNUM_SHFT 0 +#define BHIE_RXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT 0 +#define BHIE_RXVECSTATUS_STATUS_BMSK GENMASK(31, 30) +#define BHIE_RXVECSTATUS_STATUS_SHFT 30 +#define BHIE_RXVECSTATUS_STATUS_RESET 0x00 +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL 0x02 +#define BHIE_RXVECSTATUS_STATUS_ERROR 0x03 + +/* MHI register bits */ +#define MHICFG_NHWER_MASK GENMASK(31, 24) +#define MHICFG_NER_MASK GENMASK(23, 16) +#define MHICFG_NHWCH_MASK GENMASK(15, 8) +#define MHICFG_NCH_MASK GENMASK(7, 0) +#define MHICTRL_MHISTATE_MASK GENMASK(15, 8) +#define MHICTRL_RESET_MASK BIT(1) +#define MHISTATUS_MHISTATE_MASK GENMASK(15, 8) +#define MHISTATUS_SYSERR_MASK BIT(2) +#define MHISTATUS_READY_MASK BIT(0) + +/* Command Ring Element macros */ +/* No operation command */ +#define MHI_TRE_CMD_NOOP_PTR 0 +#define MHI_TRE_CMD_NOOP_DWORD0 0 +#define MHI_TRE_CMD_NOOP_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), MHI_CMD_NOP)) + +/* Channel reset command */ +#define MHI_TRE_CMD_RESET_PTR 0 +#define MHI_TRE_CMD_RESET_DWORD0 0 +#define MHI_TRE_CMD_RESET_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \ + FIELD_PREP(GENMASK(23, 16), \ + MHI_CMD_RESET_CHAN)) + +/* Channel stop command */ +#define MHI_TRE_CMD_STOP_PTR 0 +#define MHI_TRE_CMD_STOP_DWORD0 0 +#define MHI_TRE_CMD_STOP_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \ + FIELD_PREP(GENMASK(23, 16), \ + MHI_CMD_STOP_CHAN)) + +/* Channel start command */ +#define MHI_TRE_CMD_START_PTR 0 +#define MHI_TRE_CMD_START_DWORD0 0 +#define MHI_TRE_CMD_START_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \ + FIELD_PREP(GENMASK(23, 16), \ + MHI_CMD_START_CHAN)) + +#define MHI_TRE_GET_DWORD(tre, word) le32_to_cpu((tre)->dword[(word)]) +#define MHI_TRE_GET_CMD_CHID(tre) FIELD_GET(GENMASK(31, 24), MHI_TRE_GET_DWORD(tre, 1)) +#define MHI_TRE_GET_CMD_TYPE(tre) FIELD_GET(GENMASK(23, 16), MHI_TRE_GET_DWORD(tre, 1)) + +/* Event descriptor macros */ +#define MHI_TRE_EV_PTR(ptr) cpu_to_le64(ptr) +#define MHI_TRE_EV_DWORD0(code, len) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code) | \ + FIELD_PREP(GENMASK(15, 0), len)) +#define MHI_TRE_EV_DWORD1(chid, type) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \ + FIELD_PREP(GENMASK(23, 16), type)) +#define MHI_TRE_GET_EV_PTR(tre) le64_to_cpu((tre)->ptr) +#define MHI_TRE_GET_EV_CODE(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_LEN(tre) FIELD_GET(GENMASK(15, 0), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_CHID(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1))) +#define MHI_TRE_GET_EV_TYPE(tre) FIELD_GET(GENMASK(23, 16), (MHI_TRE_GET_DWORD(tre, 1))) +#define MHI_TRE_GET_EV_STATE(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_EXECENV(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) +#define MHI_TRE_GET_EV_TIME(tre) MHI_TRE_GET_EV_PTR(tre) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_VEID(tre) FIELD_GET(GENMASK(23, 16), (MHI_TRE_GET_DWORD(tre, 0))) +#define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1))) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0))) + +/* State change event */ +#define MHI_SC_EV_PTR 0 +#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state)) +#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + +/* EE event */ +#define MHI_EE_EV_PTR 0 +#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee)) +#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + + +/* Command Completion event */ +#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr) +#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code)) +#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + +/* Transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr) +#define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len)) +#define MHI_TRE_TYPE_TRANSFER 2 +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \ + MHI_TRE_TYPE_TRANSFER) | \ + FIELD_PREP(BIT(10), bei) | \ + FIELD_PREP(BIT(9), ieot) | \ + FIELD_PREP(BIT(8), ieob) | \ + FIELD_PREP(BIT(0), chain)) +#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr) +#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0)) +#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1)))) + +/* RSC transfer descriptor macros */ +#define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr) +#define MHI_RSCTRE_DATA_DWORD0(cookie) cpu_to_le32(cookie) +#define MHI_RSCTRE_DATA_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \ + MHI_PKT_TYPE_COALESCING)) + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_COALESCING = 0x8, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, /* End of transfer event */ + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, /* End of block event */ + MHI_EV_CC_OOB = 0x5, /* Out of block event */ + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +/* Channel state */ +enum mhi_ch_state { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_ENABLED, + MHI_CH_STATE_RUNNING, + MHI_CH_STATE_SUSPENDED, + MHI_CH_STATE_STOP, + MHI_CH_STATE_ERROR, +}; + +enum mhi_cmd_type { + MHI_CMD_NOP = 1, + MHI_CMD_RESET_CHAN = 16, + MHI_CMD_STOP_CHAN = 17, + MHI_CMD_START_CHAN = 18, +}; + +#define EV_CTX_RESERVED_MASK GENMASK(7, 0) +#define EV_CTX_INTMODC_MASK GENMASK(15, 8) +#define EV_CTX_INTMODT_MASK GENMASK(31, 16) +struct mhi_event_ctxt { + __le32 intmod; + __le32 ertype; + __le32 msivec; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) +#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) +#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) +#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) +struct mhi_chan_ctxt { + __le32 chcfg; + __le32 chtype; + __le32 erindex; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +struct mhi_cmd_ctxt { + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +struct mhi_ring_element { + __le64 ptr; + __le32 dword[2]; +}; + +static inline const char *mhi_state_str(enum mhi_state state) +{ + switch (state) { + case MHI_STATE_RESET: + return "RESET"; + case MHI_STATE_READY: + return "READY"; + case MHI_STATE_M0: + return "M0"; + case MHI_STATE_M1: + return "M1"; + case MHI_STATE_M2: + return "M2"; + case MHI_STATE_M3: + return "M3"; + case MHI_STATE_M3_FAST: + return "M3 FAST"; + case MHI_STATE_BHI: + return "BHI"; + case MHI_STATE_SYS_ERR: + return "SYS ERROR"; + default: + return "Unknown state"; + } +}; + +#endif /* _MHI_COMMON_H */ diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h deleted file mode 100644 index e2e10474a9d9..000000000000 --- a/drivers/bus/mhi/core/internal.h +++ /dev/null @@ -1,722 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. - * - */ - -#ifndef _MHI_INT_H -#define _MHI_INT_H - -#include <linux/mhi.h> - -extern struct bus_type mhi_bus_type; - -#define MHIREGLEN (0x0) -#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) -#define MHIREGLEN_MHIREGLEN_SHIFT (0) - -#define MHIVER (0x8) -#define MHIVER_MHIVER_MASK (0xFFFFFFFF) -#define MHIVER_MHIVER_SHIFT (0) - -#define MHICFG (0x10) -#define MHICFG_NHWER_MASK (0xFF000000) -#define MHICFG_NHWER_SHIFT (24) -#define MHICFG_NER_MASK (0xFF0000) -#define MHICFG_NER_SHIFT (16) -#define MHICFG_NHWCH_MASK (0xFF00) -#define MHICFG_NHWCH_SHIFT (8) -#define MHICFG_NCH_MASK (0xFF) -#define MHICFG_NCH_SHIFT (0) - -#define CHDBOFF (0x18) -#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) -#define CHDBOFF_CHDBOFF_SHIFT (0) - -#define ERDBOFF (0x20) -#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) -#define ERDBOFF_ERDBOFF_SHIFT (0) - -#define BHIOFF (0x28) -#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) -#define BHIOFF_BHIOFF_SHIFT (0) - -#define BHIEOFF (0x2C) -#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) -#define BHIEOFF_BHIEOFF_SHIFT (0) - -#define DEBUGOFF (0x30) -#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) -#define DEBUGOFF_DEBUGOFF_SHIFT (0) - -#define MHICTRL (0x38) -#define MHICTRL_MHISTATE_MASK (0x0000FF00) -#define MHICTRL_MHISTATE_SHIFT (8) -#define MHICTRL_RESET_MASK (0x2) -#define MHICTRL_RESET_SHIFT (1) - -#define MHISTATUS (0x48) -#define MHISTATUS_MHISTATE_MASK (0x0000FF00) -#define MHISTATUS_MHISTATE_SHIFT (8) -#define MHISTATUS_SYSERR_MASK (0x4) -#define MHISTATUS_SYSERR_SHIFT (2) -#define MHISTATUS_READY_MASK (0x1) -#define MHISTATUS_READY_SHIFT (0) - -#define CCABAP_LOWER (0x58) -#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) -#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) - -#define CCABAP_HIGHER (0x5C) -#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) -#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) - -#define ECABAP_LOWER (0x60) -#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) -#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) - -#define ECABAP_HIGHER (0x64) -#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) -#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) - -#define CRCBAP_LOWER (0x68) -#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) -#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) - -#define CRCBAP_HIGHER (0x6C) -#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) -#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) - -#define CRDB_LOWER (0x70) -#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) -#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) - -#define CRDB_HIGHER (0x74) -#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) -#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) - -#define MHICTRLBASE_LOWER (0x80) -#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) -#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) - -#define MHICTRLBASE_HIGHER (0x84) -#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) -#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) - -#define MHICTRLLIMIT_LOWER (0x88) -#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) -#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) - -#define MHICTRLLIMIT_HIGHER (0x8C) -#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) -#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) - -#define MHIDATABASE_LOWER (0x98) -#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) -#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) - -#define MHIDATABASE_HIGHER (0x9C) -#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) -#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) - -#define MHIDATALIMIT_LOWER (0xA0) -#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) -#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) - -#define MHIDATALIMIT_HIGHER (0xA4) -#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) -#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) - -/* Host request register */ -#define MHI_SOC_RESET_REQ_OFFSET (0xB0) -#define MHI_SOC_RESET_REQ BIT(0) - -/* MHI BHI offfsets */ -#define BHI_BHIVERSION_MINOR (0x00) -#define BHI_BHIVERSION_MAJOR (0x04) -#define BHI_IMGADDR_LOW (0x08) -#define BHI_IMGADDR_HIGH (0x0C) -#define BHI_IMGSIZE (0x10) -#define BHI_RSVD1 (0x14) -#define BHI_IMGTXDB (0x18) -#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) -#define BHI_TXDB_SEQNUM_SHFT (0) -#define BHI_RSVD2 (0x1C) -#define BHI_INTVEC (0x20) -#define BHI_RSVD3 (0x24) -#define BHI_EXECENV (0x28) -#define BHI_STATUS (0x2C) -#define BHI_ERRCODE (0x30) -#define BHI_ERRDBG1 (0x34) -#define BHI_ERRDBG2 (0x38) -#define BHI_ERRDBG3 (0x3C) -#define BHI_SERIALNU (0x40) -#define BHI_SBLANTIROLLVER (0x44) -#define BHI_NUMSEG (0x48) -#define BHI_MSMHWID(n) (0x4C + (0x4 * (n))) -#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n))) -#define BHI_RSVD5 (0xC4) -#define BHI_STATUS_MASK (0xC0000000) -#define BHI_STATUS_SHIFT (30) -#define BHI_STATUS_ERROR (3) -#define BHI_STATUS_SUCCESS (2) -#define BHI_STATUS_RESET (0) - -/* MHI BHIE offsets */ -#define BHIE_MSMSOCID_OFFS (0x0000) -#define BHIE_TXVECADDR_LOW_OFFS (0x002C) -#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) -#define BHIE_TXVECSIZE_OFFS (0x0034) -#define BHIE_TXVECDB_OFFS (0x003C) -#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) -#define BHIE_TXVECDB_SEQNUM_SHFT (0) -#define BHIE_TXVECSTATUS_OFFS (0x0044) -#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) -#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) -#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) -#define BHIE_TXVECSTATUS_STATUS_SHFT (30) -#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) -#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) -#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) -#define BHIE_RXVECADDR_LOW_OFFS (0x0060) -#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) -#define BHIE_RXVECSIZE_OFFS (0x0068) -#define BHIE_RXVECDB_OFFS (0x0070) -#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) -#define BHIE_RXVECDB_SEQNUM_SHFT (0) -#define BHIE_RXVECSTATUS_OFFS (0x0078) -#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) -#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) -#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) -#define BHIE_RXVECSTATUS_STATUS_SHFT (30) -#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) -#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) -#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) - -#define SOC_HW_VERSION_OFFS (0x224) -#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000) -#define SOC_HW_VERSION_FAM_NUM_SHFT (28) -#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000) -#define SOC_HW_VERSION_DEV_NUM_SHFT (16) -#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00) -#define SOC_HW_VERSION_MAJOR_VER_SHFT (8) -#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF) -#define SOC_HW_VERSION_MINOR_VER_SHFT (0) - -#define EV_CTX_RESERVED_MASK GENMASK(7, 0) -#define EV_CTX_INTMODC_MASK GENMASK(15, 8) -#define EV_CTX_INTMODC_SHIFT 8 -#define EV_CTX_INTMODT_MASK GENMASK(31, 16) -#define EV_CTX_INTMODT_SHIFT 16 -struct mhi_event_ctxt { - __u32 intmod; - __u32 ertype; - __u32 msivec; - - __u64 rbase __packed __aligned(4); - __u64 rlen __packed __aligned(4); - __u64 rp __packed __aligned(4); - __u64 wp __packed __aligned(4); -}; - -#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) -#define CHAN_CTX_CHSTATE_SHIFT 0 -#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) -#define CHAN_CTX_BRSTMODE_SHIFT 8 -#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) -#define CHAN_CTX_POLLCFG_SHIFT 10 -#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) -struct mhi_chan_ctxt { - __u32 chcfg; - __u32 chtype; - __u32 erindex; - - __u64 rbase __packed __aligned(4); - __u64 rlen __packed __aligned(4); - __u64 rp __packed __aligned(4); - __u64 wp __packed __aligned(4); -}; - -struct mhi_cmd_ctxt { - __u32 reserved0; - __u32 reserved1; - __u32 reserved2; - - __u64 rbase __packed __aligned(4); - __u64 rlen __packed __aligned(4); - __u64 rp __packed __aligned(4); - __u64 wp __packed __aligned(4); -}; - -struct mhi_ctxt { - struct mhi_event_ctxt *er_ctxt; - struct mhi_chan_ctxt *chan_ctxt; - struct mhi_cmd_ctxt *cmd_ctxt; - dma_addr_t er_ctxt_addr; - dma_addr_t chan_ctxt_addr; - dma_addr_t cmd_ctxt_addr; -}; - -struct mhi_tre { - u64 ptr; - u32 dword[2]; -}; - -struct bhi_vec_entry { - u64 dma_addr; - u64 size; -}; - -enum mhi_cmd_type { - MHI_CMD_NOP = 1, - MHI_CMD_RESET_CHAN = 16, - MHI_CMD_STOP_CHAN = 17, - MHI_CMD_START_CHAN = 18, -}; - -/* No operation command */ -#define MHI_TRE_CMD_NOOP_PTR (0) -#define MHI_TRE_CMD_NOOP_DWORD0 (0) -#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) - -/* Channel reset command */ -#define MHI_TRE_CMD_RESET_PTR (0) -#define MHI_TRE_CMD_RESET_DWORD0 (0) -#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ - (MHI_CMD_RESET_CHAN << 16)) - -/* Channel stop command */ -#define MHI_TRE_CMD_STOP_PTR (0) -#define MHI_TRE_CMD_STOP_DWORD0 (0) -#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \ - (MHI_CMD_STOP_CHAN << 16)) - -/* Channel start command */ -#define MHI_TRE_CMD_START_PTR (0) -#define MHI_TRE_CMD_START_DWORD0 (0) -#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ - (MHI_CMD_START_CHAN << 16)) - -#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) - -/* Event descriptor macros */ -#define MHI_TRE_EV_PTR(ptr) (ptr) -#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) -#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) -#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) -#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) -#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) -#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF) - -/* Transfer descriptor macros */ -#define MHI_TRE_DATA_PTR(ptr) (ptr) -#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) -#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ - | (ieot << 9) | (ieob << 8) | chain) - -/* RSC transfer descriptor macros */ -#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) -#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) -#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) - -enum mhi_pkt_type { - MHI_PKT_TYPE_INVALID = 0x0, - MHI_PKT_TYPE_NOOP_CMD = 0x1, - MHI_PKT_TYPE_TRANSFER = 0x2, - MHI_PKT_TYPE_COALESCING = 0x8, - MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, - MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, - MHI_PKT_TYPE_START_CHAN_CMD = 0x12, - MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, - MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, - MHI_PKT_TYPE_TX_EVENT = 0x22, - MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, - MHI_PKT_TYPE_EE_EVENT = 0x40, - MHI_PKT_TYPE_TSYNC_EVENT = 0x48, - MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, - MHI_PKT_TYPE_STALE_EVENT, /* internal event */ -}; - -/* MHI transfer completion events */ -enum mhi_ev_ccs { - MHI_EV_CC_INVALID = 0x0, - MHI_EV_CC_SUCCESS = 0x1, - MHI_EV_CC_EOT = 0x2, /* End of transfer event */ - MHI_EV_CC_OVERFLOW = 0x3, - MHI_EV_CC_EOB = 0x4, /* End of block event */ - MHI_EV_CC_OOB = 0x5, /* Out of block event */ - MHI_EV_CC_DB_MODE = 0x6, - MHI_EV_CC_UNDEFINED_ERR = 0x10, - MHI_EV_CC_BAD_TRE = 0x11, -}; - -enum mhi_ch_state { - MHI_CH_STATE_DISABLED = 0x0, - MHI_CH_STATE_ENABLED = 0x1, - MHI_CH_STATE_RUNNING = 0x2, - MHI_CH_STATE_SUSPENDED = 0x3, - MHI_CH_STATE_STOP = 0x4, - MHI_CH_STATE_ERROR = 0x5, -}; - -enum mhi_ch_state_type { - MHI_CH_STATE_TYPE_RESET, - MHI_CH_STATE_TYPE_STOP, - MHI_CH_STATE_TYPE_START, - MHI_CH_STATE_TYPE_MAX, -}; - -extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; -#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \ - "INVALID_STATE" : \ - mhi_ch_state_type_str[(state)]) - -#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ - mode != MHI_DB_BRST_ENABLE) - -extern const char * const mhi_ee_str[MHI_EE_MAX]; -#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ - "INVALID_EE" : mhi_ee_str[ee]) - -#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ - ee == MHI_EE_EDL) -#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS) -#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL) -#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \ - ee == MHI_EE_FP) - -enum dev_st_transition { - DEV_ST_TRANSITION_PBL, - DEV_ST_TRANSITION_READY, - DEV_ST_TRANSITION_SBL, - DEV_ST_TRANSITION_MISSION_MODE, - DEV_ST_TRANSITION_FP, - DEV_ST_TRANSITION_SYS_ERR, - DEV_ST_TRANSITION_DISABLE, - DEV_ST_TRANSITION_MAX, -}; - -extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; -#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ - "INVALID_STATE" : dev_state_tran_str[state]) - -extern const char * const mhi_state_str[MHI_STATE_MAX]; -#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ - !mhi_state_str[state]) ? \ - "INVALID_STATE" : mhi_state_str[state]) - -/* internal power states */ -enum mhi_pm_state { - MHI_PM_STATE_DISABLE, - MHI_PM_STATE_POR, - MHI_PM_STATE_M0, - MHI_PM_STATE_M2, - MHI_PM_STATE_M3_ENTER, - MHI_PM_STATE_M3, - MHI_PM_STATE_M3_EXIT, - MHI_PM_STATE_FW_DL_ERR, - MHI_PM_STATE_SYS_ERR_DETECT, - MHI_PM_STATE_SYS_ERR_PROCESS, - MHI_PM_STATE_SHUTDOWN_PROCESS, - MHI_PM_STATE_LD_ERR_FATAL_DETECT, - MHI_PM_STATE_MAX -}; - -#define MHI_PM_DISABLE BIT(0) -#define MHI_PM_POR BIT(1) -#define MHI_PM_M0 BIT(2) -#define MHI_PM_M2 BIT(3) -#define MHI_PM_M3_ENTER BIT(4) -#define MHI_PM_M3 BIT(5) -#define MHI_PM_M3_EXIT BIT(6) -/* firmware download failure state */ -#define MHI_PM_FW_DL_ERR BIT(7) -#define MHI_PM_SYS_ERR_DETECT BIT(8) -#define MHI_PM_SYS_ERR_PROCESS BIT(9) -#define MHI_PM_SHUTDOWN_PROCESS BIT(10) -/* link not accessible */ -#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) - -#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ - MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ - MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ - MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) -#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) -#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) -#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \ - mhi_cntrl->db_access) -#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ - MHI_PM_M2 | MHI_PM_M3_EXIT)) -#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) -#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) -#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ - MHI_PM_IN_ERROR_STATE(pm_state)) -#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ - (MHI_PM_M3_ENTER | MHI_PM_M3)) - -#define NR_OF_CMD_RINGS 1 -#define CMD_EL_PER_RING 128 -#define PRIMARY_CMD_RING 0 -#define MHI_DEV_WAKE_DB 127 -#define MHI_MAX_MTU 0xffff -#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) - -enum mhi_er_type { - MHI_ER_TYPE_INVALID = 0x0, - MHI_ER_TYPE_VALID = 0x1, -}; - -struct db_cfg { - bool reset_req; - bool db_mode; - u32 pollcfg; - enum mhi_db_brst_mode brstmode; - dma_addr_t db_val; - void (*process_db)(struct mhi_controller *mhi_cntrl, - struct db_cfg *db_cfg, void __iomem *io_addr, - dma_addr_t db_val); -}; - -struct mhi_pm_transitions { - enum mhi_pm_state from_state; - u32 to_states; -}; - -struct state_transition { - struct list_head node; - enum dev_st_transition state; -}; - -struct mhi_ring { - dma_addr_t dma_handle; - dma_addr_t iommu_base; - u64 *ctxt_wp; /* point to ctxt wp */ - void *pre_aligned; - void *base; - void *rp; - void *wp; - size_t el_size; - size_t len; - size_t elements; - size_t alloc_size; - void __iomem *db_addr; -}; - -struct mhi_cmd { - struct mhi_ring ring; - spinlock_t lock; -}; - -struct mhi_buf_info { - void *v_addr; - void *bb_addr; - void *wp; - void *cb_buf; - dma_addr_t p_addr; - size_t len; - enum dma_data_direction dir; - bool used; /* Indicates whether the buffer is used or not */ - bool pre_mapped; /* Already pre-mapped by client */ -}; - -struct mhi_event { - struct mhi_controller *mhi_cntrl; - struct mhi_chan *mhi_chan; /* dedicated to channel */ - u32 er_index; - u32 intmod; - u32 irq; - int chan; /* this event ring is dedicated to a channel (optional) */ - u32 priority; - enum mhi_er_data_type data_type; - struct mhi_ring ring; - struct db_cfg db_cfg; - struct tasklet_struct task; - spinlock_t lock; - int (*process_event)(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, - u32 event_quota); - bool hw_ring; - bool cl_manage; - bool offload_ev; /* managed by a device driver */ -}; - -struct mhi_chan { - const char *name; - /* - * Important: When consuming, increment tre_ring first and when - * releasing, decrement buf_ring first. If tre_ring has space, buf_ring - * is guranteed to have space so we do not need to check both rings. - */ - struct mhi_ring buf_ring; - struct mhi_ring tre_ring; - u32 chan; - u32 er_index; - u32 intmod; - enum mhi_ch_type type; - enum dma_data_direction dir; - struct db_cfg db_cfg; - enum mhi_ch_ee_mask ee_mask; - enum mhi_ch_state ch_state; - enum mhi_ev_ccs ccs; - struct mhi_device *mhi_dev; - void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); - struct mutex mutex; - struct completion completion; - rwlock_t lock; - struct list_head node; - bool lpm_notify; - bool configured; - bool offload_ch; - bool pre_alloc; - bool wake_capable; -}; - -/* Default MHI timeout */ -#define MHI_TIMEOUT_MS (1000) - -/* debugfs related functions */ -#ifdef CONFIG_MHI_BUS_DEBUG -void mhi_create_debugfs(struct mhi_controller *mhi_cntrl); -void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl); -void mhi_debugfs_init(void); -void mhi_debugfs_exit(void); -#else -static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) -{ -} - -static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) -{ -} - -static inline void mhi_debugfs_init(void) -{ -} - -static inline void mhi_debugfs_exit(void) -{ -} -#endif - -struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); - -int mhi_destroy_device(struct device *dev, void *data); -void mhi_create_devices(struct mhi_controller *mhi_cntrl); - -int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, - struct image_info **image_info, size_t alloc_size); -void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, - struct image_info *image_info); - -/* Power management APIs */ -enum mhi_pm_state __must_check mhi_tryset_pm_state( - struct mhi_controller *mhi_cntrl, - enum mhi_pm_state state); -const char *to_mhi_pm_state_str(enum mhi_pm_state state); -int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, - enum dev_st_transition state); -void mhi_pm_st_worker(struct work_struct *work); -void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); -int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); -int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); -void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); -int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); -int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); -int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, - enum mhi_cmd_type cmd); -int mhi_download_amss_image(struct mhi_controller *mhi_cntrl); -static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) -{ - return (mhi_cntrl->dev_state >= MHI_STATE_M0 && - mhi_cntrl->dev_state <= MHI_STATE_M3_FAST); -} - -static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) -{ - pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); - mhi_cntrl->runtime_get(mhi_cntrl); - mhi_cntrl->runtime_put(mhi_cntrl); -} - -/* Register access methods */ -void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, - void __iomem *db_addr, dma_addr_t db_val); -void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, - struct db_cfg *db_mode, void __iomem *db_addr, - dma_addr_t db_val); -int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, - void __iomem *base, u32 offset, u32 *out); -int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, - void __iomem *base, u32 offset, u32 mask, - u32 shift, u32 *out); -int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, - void __iomem *base, u32 offset, u32 mask, - u32 shift, u32 val, u32 delayus); -void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 val); -void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 mask, u32 shift, u32 val); -void mhi_ring_er_db(struct mhi_event *mhi_event); -void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, - dma_addr_t db_val); -void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); -void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); - -/* Initialization methods */ -int mhi_init_mmio(struct mhi_controller *mhi_cntrl); -int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); -void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); -int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); -void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); -void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, - struct image_info *img_info); -void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); - -/* Automatically allocate and queue inbound buffers */ -#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) -int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, unsigned int flags); - -int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); -void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); -void mhi_reset_chan(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); - -/* Event processing methods */ -void mhi_ctrl_ev_task(unsigned long data); -void mhi_ev_task(unsigned long data); -int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, u32 event_quota); -int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, u32 event_quota); - -/* ISR handlers */ -irqreturn_t mhi_irq_handler(int irq_number, void *dev); -irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); -irqreturn_t mhi_intvec_handler(int irq_number, void *dev); - -int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, - struct mhi_buf_info *info, enum mhi_flags flags); -int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info); -int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info); -void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info); -void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info); - -#endif /* _MHI_INT_H */ diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig new file mode 100644 index 000000000000..90ab3b040672 --- /dev/null +++ b/drivers/bus/mhi/ep/Kconfig @@ -0,0 +1,10 @@ +config MHI_BUS_EP + tristate "Modem Host Interface (MHI) bus Endpoint implementation" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by a host processor to control + and communicate a modem device over a high speed peripheral + bus or shared memory. + + MHI_BUS_EP implements the MHI protocol for the endpoint devices, + such as SDX55 modem connected to the host machine over PCIe. diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile new file mode 100644 index 000000000000..aad85f180b70 --- /dev/null +++ b/drivers/bus/mhi/ep/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o +mhi_ep-y := main.o mmio.o ring.o sm.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h new file mode 100644 index 000000000000..a2125fa5fe2f --- /dev/null +++ b/drivers/bus/mhi/ep/internal.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ + +#ifndef _MHI_EP_INTERNAL_ +#define _MHI_EP_INTERNAL_ + +#include <linux/bitfield.h> + +#include "../common.h" + +extern struct bus_type mhi_ep_bus_type; + +#define MHI_REG_OFFSET 0x100 +#define BHI_REG_OFFSET 0x200 + +/* MHI registers */ +#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN) +#define EP_MHIVER (MHI_REG_OFFSET + MHIVER) +#define EP_MHICFG (MHI_REG_OFFSET + MHICFG) +#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF) +#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF) +#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF) +#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF) +#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF) +#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL) +#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS) +#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER) +#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER) +#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER) +#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER) +#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER) +#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER) +#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER) +#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER) +#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER) +#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER) +#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER) +#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER) +#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER) +#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER) +#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER) +#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER) + +/* MHI BHI registers */ +#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC) +#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV) + +/* MHI Doorbell registers */ +#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n)) +#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n)) +#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n)) +#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n)) + +#define MHI_CTRL_INT_STATUS 0x4 +#define MHI_CTRL_INT_STATUS_MSK BIT(0) +#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1) +#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n)) +#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n)) + +#define MHI_CTRL_INT_CLEAR 0x4c +#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2) +#define MHI_CTRL_INT_CRDB_CLEAR BIT(1) +#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0) + +#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n)) +#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n)) +#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) + +/* + * Unlike the usual "masking" convention, writing "1" to a bit in this register + * enables the interrupt and writing "0" will disable it.. + */ +#define MHI_CTRL_INT_MASK 0x94 +#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0) +#define MHI_CTRL_MHICTRL_MASK BIT(0) +#define MHI_CTRL_CRDB_MASK BIT(1) + +#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n)) +#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n)) +#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0) + +#define NR_OF_CMD_RINGS 1 +#define MHI_MASK_ROWS_CH_DB 4 +#define MHI_MASK_ROWS_EV_DB 4 +#define MHI_MASK_CH_LEN 32 +#define MHI_MASK_EV_LEN 32 + +/* Generic context */ +struct mhi_generic_ctx { + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +enum mhi_ep_ring_type { + RING_TYPE_CMD, + RING_TYPE_ER, + RING_TYPE_CH, +}; + +/* Ring element */ +union mhi_ep_ring_ctx { + struct mhi_cmd_ctxt cmd; + struct mhi_event_ctxt ev; + struct mhi_chan_ctxt ch; + struct mhi_generic_ctx generic; +}; + +struct mhi_ep_ring_item { + struct list_head node; + struct mhi_ep_ring *ring; +}; + +struct mhi_ep_ring { + struct mhi_ep_cntrl *mhi_cntrl; + union mhi_ep_ring_ctx *ring_ctx; + struct mhi_ring_element *ring_cache; + enum mhi_ep_ring_type type; + u64 rbase; + size_t rd_offset; + size_t wr_offset; + size_t ring_size; + u32 db_offset_h; + u32 db_offset_l; + u32 ch_id; + u32 er_index; + u32 irq_vector; + bool started; +}; + +struct mhi_ep_cmd { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_event { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_state_transition { + struct list_head node; + enum mhi_state state; +}; + +struct mhi_ep_chan { + char *name; + struct mhi_ep_device *mhi_dev; + struct mhi_ep_ring ring; + struct mutex lock; + void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); + enum mhi_ch_state state; + enum dma_data_direction dir; + u64 tre_loc; + u32 tre_size; + u32 tre_bytes_left; + u32 chan; + bool skip_td; +}; + +/* MHI Ring related functions */ +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id); +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring); +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx); +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr); +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element); +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring); +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring); + +/* MMIO related functions */ +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset); +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val); +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val); +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask); +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl); +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring); +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value); +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset); +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); + +/* MHI EP core functions */ +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state); +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env); +bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state, + enum mhi_state mhi_state); +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state); +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl); + +#endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c new file mode 100644 index 000000000000..40109a79017a --- /dev/null +++ b/drivers/bus/mhi/ep/main.c @@ -0,0 +1,1591 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MHI Endpoint bus stack + * + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/dma-direction.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/mhi_ep.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include "internal.h" + +#define M0_WAIT_DELAY_MS 100 +#define M0_WAIT_COUNT 100 + +static DEFINE_IDA(mhi_ep_cntrl_ida); + +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +static int mhi_ep_destroy_device(struct device *dev, void *data); + +static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, + struct mhi_ring_element *el, bool bei) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + union mhi_ep_ring_ctx *ctx; + struct mhi_ep_ring *ring; + int ret; + + mutex_lock(&mhi_cntrl->event_lock); + ring = &mhi_cntrl->mhi_event[ring_idx].ring; + ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; + if (!ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); + if (ret) { + dev_err(dev, "Error starting event ring (%u)\n", ring_idx); + goto err_unlock; + } + } + + /* Add element to the event ring */ + ret = mhi_ep_ring_add_element(ring, el); + if (ret) { + dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); + goto err_unlock; + } + + mutex_unlock(&mhi_cntrl->event_lock); + + /* + * Raise IRQ to host only if the BEI flag is not set in TRE. Host might + * set this flag for interrupt moderation as per MHI protocol. + */ + if (!bei) + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + + return 0; + +err_unlock: + mutex_unlock(&mhi_cntrl->event_lock); + + return ret; +} + +static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) +{ + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); + event.dword[0] = MHI_TRE_EV_DWORD0(code, len); + event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + + return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); +} + +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_SC_EV_DWORD0(state); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_EE_EV_DWORD0(exec_env); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) +{ + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); + event.dword[0] = MHI_CC_EV_DWORD0(code); + event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_ring *ch_ring; + u32 tmp, ch_id; + int ret; + + ch_id = MHI_TRE_GET_CMD_CHID(el); + mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; + + switch (MHI_TRE_GET_CMD_TYPE(el)) { + case MHI_PKT_TYPE_START_CHAN_CMD: + dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); + + mutex_lock(&mhi_chan->lock); + /* Initialize and configure the corresponding channel ring */ + if (!ch_ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, + (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); + if (ret) { + dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, + MHI_EV_CC_UNDEFINED_ERR); + if (ret) + dev_err(dev, "Error sending completion event: %d\n", ret); + + goto err_unlock; + } + } + + /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + + /* + * Create MHI device only during UL channel start. Since the MHI + * channels operate in a pair, we'll associate both UL and DL + * channels to the same device. + * + * We also need to check for mhi_dev != NULL because, the host + * will issue START_CHAN command during resume and we don't + * destroy the device during suspend. + */ + if (!(ch_id % 2) && !mhi_chan->mhi_dev) { + ret = mhi_ep_create_device(mhi_cntrl, ch_id); + if (ret) { + dev_err(dev, "Error creating device for channel (%u)\n", ch_id); + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + } + + /* Finally, enable DB for the channel */ + mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); + + break; + case MHI_PKT_TYPE_STOP_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Disable DB for the channel */ + mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); + + /* Send channel disconnect status to client drivers */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to STOP */ + mhi_chan->state = MHI_CH_STATE_STOP; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + case MHI_PKT_TYPE_RESET_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Stop and reset the transfer ring */ + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + + /* Send channel disconnect status to client driver */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to DISABLED */ + mhi_chan->state = MHI_CH_STATE_DISABLED; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + default: + dev_err(dev, "Invalid command received: %lu for channel (%u)\n", + MHI_TRE_GET_CMD_TYPE(el), ch_id); + return -EINVAL; + } + + return 0; + +err_unlock: + mutex_unlock(&mhi_chan->lock); + + return ret; +} + +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) +{ + struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : + mhi_dev->ul_chan; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + return !!(ring->rd_offset == ring->wr_offset); +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); + +static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_ring *ring, + struct mhi_result *result, + u32 len) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t tr_len, read_offset, write_offset; + struct mhi_ring_element *el; + bool tr_done = false; + void *write_addr; + u64 read_addr; + u32 buf_left; + int ret; + + buf_left = len; + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + return -ENODEV; + } + + el = &ring->ring_cache[ring->rd_offset]; + + /* Check if there is data pending to be read from previous read operation */ + if (mhi_chan->tre_bytes_left) { + dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); + tr_len = min(buf_left, mhi_chan->tre_bytes_left); + } else { + mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); + mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); + mhi_chan->tre_bytes_left = mhi_chan->tre_size; + + tr_len = min(buf_left, mhi_chan->tre_size); + } + + read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; + write_offset = len - buf_left; + read_addr = mhi_chan->tre_loc + read_offset; + write_addr = result->buf_addr + write_offset; + + dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); + return ret; + } + + buf_left -= tr_len; + mhi_chan->tre_bytes_left -= tr_len; + + /* + * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been + * read completely: + * + * 1. Send completion event to the host based on the flags set in TRE. + * 2. Increment the local read offset of the transfer ring. + */ + if (!mhi_chan->tre_bytes_left) { + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + + tr_done = true; + } + + mhi_ep_ring_inc_index(ring); + } + + result->bytes_xferd += tr_len; + } while (buf_left && !tr_done); + + return 0; +} + +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct mhi_result result = {}; + u32 len = MHI_EP_DEFAULT_MTU; + struct mhi_ep_chan *mhi_chan; + int ret; + + mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + /* + * Bail out if transfer callback is not registered for the channel. + * This is most likely due to the client driver not loaded at this point. + */ + if (!mhi_chan->xfer_cb) { + dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); + return -ENODEV; + } + + if (ring->ch_id % 2) { + /* DL channel */ + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } else { + /* UL channel */ + result.buf_addr = kzalloc(len, GFP_KERNEL); + if (!result.buf_addr) + return -ENOMEM; + + do { + ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + kfree(result.buf_addr); + return ret; + } + + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + result.bytes_xferd = 0; + memset(result.buf_addr, 0, len); + + /* Read until the ring becomes empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + + kfree(result.buf_addr); + } + + return 0; +} + +/* TODO: Handle partially formed TDs */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) +{ + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ring_element *el; + u32 buf_left, read_offset; + struct mhi_ep_ring *ring; + enum mhi_ev_ccs code; + void *read_addr; + u64 write_addr; + size_t tr_len; + u32 tre_len; + int ret; + + buf_left = skb->len; + ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + mutex_lock(&mhi_chan->lock); + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + ret = -ENODEV; + goto err_exit; + } + + if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { + dev_err(dev, "TRE not available!\n"); + ret = -ENOSPC; + goto err_exit; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = MHI_TRE_DATA_GET_LEN(el); + + tr_len = min(buf_left, tre_len); + read_offset = skb->len - buf_left; + read_addr = skb->data + read_offset; + write_addr = MHI_TRE_DATA_GET_PTR(el); + + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); + goto err_exit; + } + + buf_left -= tr_len; + /* + * For all TREs queued by the host for DL channel, only the EOT flag will be set. + * If the packet doesn't fit into a single TRE, send the OVERFLOW event to + * the host so that the host can adjust the packet boundary to next TREs. Else send + * the EOT event to the host indicating the packet boundary. + */ + if (buf_left) + code = MHI_EV_CC_OVERFLOW; + else + code = MHI_EV_CC_EOT; + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + goto err_exit; + } + + mhi_ep_ring_inc_index(ring); + } while (buf_left); + + mutex_unlock(&mhi_chan->lock); + + return 0; + +err_exit: + mutex_unlock(&mhi_chan->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); + +static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Update the number of event rings (NER) programmed by the host */ + mhi_ep_mmio_update_ner(mhi_cntrl); + + dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", + mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + /* Get the channel context base pointer from host */ + mhi_ep_mmio_get_chc_base(mhi_cntrl); + + /* Allocate and map memory for caching host channel context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, + &mhi_cntrl->ch_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ch_ctx_cache, + ch_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); + return ret; + } + + /* Get the event context base pointer from host */ + mhi_ep_mmio_get_erc_base(mhi_cntrl); + + /* Allocate and map memory for caching host event context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, + &mhi_cntrl->ev_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ev_ctx_cache, + ev_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); + goto err_ch_ctx; + } + + /* Get the command context base pointer from host */ + mhi_ep_mmio_get_crc_base(mhi_cntrl); + + /* Allocate and map memory for caching host command context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, + &mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->cmd_ctx_cache, + cmd_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); + goto err_ev_ctx; + } + + /* Initialize command ring */ + ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, + (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); + if (ret) { + dev_err(dev, "Failed to start the command ring\n"); + goto err_cmd_ctx; + } + + return ret; + +err_cmd_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + +err_ev_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + +err_ch_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); + + return ret; +} + +static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); +} + +static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) +{ + /* + * Doorbell interrupts are enabled when the corresponding channel gets started. + * Enabling all interrupts here triggers spurious irqs as some of the interrupts + * associated with hw channels always get triggered. + */ + mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); +} + +static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + bool mhi_reset; + u32 count = 0; + int ret; + + /* Wait for Host to set the M0 state */ + do { + msleep(M0_WAIT_DELAY_MS); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + /* Clear the MHI reset if host is in reset state */ + mhi_ep_mmio_clear_reset(mhi_cntrl); + dev_info(dev, "Detected Host reset while waiting for M0\n"); + } + count++; + } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); + + if (state != MHI_STATE_M0) { + dev_err(dev, "Host failed to enter M0\n"); + return -ETIMEDOUT; + } + + ret = mhi_ep_cache_host_cfg(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to cache host config\n"); + return ret; + } + + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* Enable all interrupts now */ + mhi_ep_enable_int(mhi_cntrl); + + return 0; +} + +static void mhi_ep_cmd_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ring_element *el; + int ret; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + return; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) + return; + + /* + * Process command ring element till write offset. In case of an error, just try to + * process next element. + */ + while (ring->rd_offset != ring->wr_offset) { + el = &ring->ring_cache[ring->rd_offset]; + + ret = mhi_ep_process_cmd_ring(ring, el); + if (ret) + dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); + + mhi_ep_ring_inc_index(ring); + } +} + +static void mhi_ep_ch_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring_item *itr, *tmp; + struct mhi_ring_element *el; + struct mhi_ep_ring *ring; + struct mhi_ep_chan *chan; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + /* Process each queued channel ring. In case of an error, just process next element. */ + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + ring = itr->ring; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + kfree(itr); + continue; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) { + kfree(itr); + continue; + } + + el = &ring->ring_cache[ring->rd_offset]; + chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + mutex_lock(&chan->lock); + dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); + ret = mhi_ep_process_ch_ring(ring, el); + if (ret) { + dev_err(dev, "Error processing ring for channel (%u): %d\n", + ring->ch_id, ret); + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + + mutex_unlock(&chan->lock); + kfree(itr); + } +} + +static void mhi_ep_state_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_state_transition *itr, *tmp; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling MHI state transition to %s\n", + mhi_state_str(itr->state)); + + switch (itr->state) { + case MHI_STATE_M0: + ret = mhi_ep_set_m0_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M0 state\n"); + break; + case MHI_STATE_M3: + ret = mhi_ep_set_m3_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M3 state\n"); + break; + default: + dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); + break; + } + kfree(itr); + } +} + +static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, + u32 ch_idx) +{ + struct mhi_ep_ring_item *item; + struct mhi_ep_ring *ring; + bool work = !!ch_int; + LIST_HEAD(head); + u32 i; + + /* First add the ring items to a local list */ + for_each_set_bit(i, &ch_int, 32) { + /* Channel index varies for each register: 0, 32, 64, 96 */ + u32 ch_id = ch_idx + i; + + ring = &mhi_cntrl->mhi_chan[ch_id].ring; + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->ring = ring; + list_add_tail(&item->node, &head); + } + + /* Now, splice the local list into ch_db_list and queue the work item */ + if (work) { + spin_lock(&mhi_cntrl->list_lock); + list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); + spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); + } +} + +/* + * Channel interrupt statuses are contained in 4 registers each of 32bit length. + * For checking all interrupts, we need to loop through each registers and then + * check for bits set. + */ +static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 ch_int, ch_idx, i; + + /* Bail out if there is no channel doorbell interrupt */ + if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) + return; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + ch_idx = i * MHI_MASK_CH_LEN; + + /* Only process channel interrupt if the mask is enabled */ + ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; + if (ch_int) { + mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + mhi_cntrl->chdb[i].status); + } + } +} + +static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state state) +{ + struct mhi_ep_state_transition *item; + + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->state = state; + spin_lock(&mhi_cntrl->list_lock); + list_add_tail(&item->node, &mhi_cntrl->st_transition_list); + spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); +} + +/* + * Interrupt handler that services interrupts raised by the host writing to + * MHICTRL and Command ring doorbell (CRDB) registers for state change and + * channel interrupts. + */ +static irqreturn_t mhi_ep_irq(int irq, void *data) +{ + struct mhi_ep_cntrl *mhi_cntrl = data; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + u32 int_value; + bool mhi_reset; + + /* Acknowledge the ctrl interrupt */ + int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); + + /* Check for ctrl interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { + dev_dbg(dev, "Processing ctrl interrupt\n"); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + dev_info(dev, "Host triggered MHI reset!\n"); + disable_irq_nosync(mhi_cntrl->irq); + schedule_work(&mhi_cntrl->reset_work); + return IRQ_HANDLED; + } + + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); + } + + /* Check for command doorbell interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { + dev_dbg(dev, "Processing command doorbell interrupt\n"); + queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); + } + + /* Check for channel interrupts */ + mhi_ep_check_channel_interrupt(mhi_cntrl); + + return IRQ_HANDLED; +} + +static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_ring *ch_ring, *ev_ring; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int i; + + /* Stop all the channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to client drivers */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mutex_unlock(&mhi_chan->lock); + } + + flush_workqueue(mhi_cntrl->wq); + + /* Destroy devices associated with all channels */ + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); + + /* Stop and reset the transfer rings */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + ch_ring = &mhi_cntrl->mhi_chan[i].ring; + mutex_lock(&mhi_chan->lock); + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + mutex_unlock(&mhi_chan->lock); + } + + /* Stop and reset the event rings */ + for (i = 0; i < mhi_cntrl->event_rings; i++) { + ev_ring = &mhi_cntrl->mhi_event[i].ring; + if (!ev_ring->started) + continue; + + mutex_lock(&mhi_cntrl->event_lock); + mhi_ep_ring_reset(mhi_cntrl, ev_ring); + mutex_unlock(&mhi_cntrl->event_lock); + } + + /* Stop and reset the command ring */ + mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); + + mhi_ep_free_host_cfg(mhi_cntrl); + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + + mhi_cntrl->enabled = false; +} + +static void mhi_ep_reset_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state cur_state; + int ret; + + mhi_ep_abort_transfer(mhi_cntrl); + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ + mhi_ep_mmio_reset(mhi_cntrl); + cur_state = mhi_cntrl->mhi_state; + spin_unlock_bh(&mhi_cntrl->state_lock); + + /* + * Only proceed further if the reset is due to SYS_ERR. The host will + * issue reset during shutdown also and we don't need to do re-init in + * that case. + */ + if (cur_state == MHI_STATE_SYS_ERR) { + mhi_ep_mmio_init(mhi_cntrl); + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + return; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); + return; + } + + enable_irq(mhi_cntrl->irq); + } +} + +/* + * We don't need to do anything special other than setting the MHI SYS_ERR + * state. The host will reset all contexts and issue MHI RESET so that we + * could also recover from error state. + */ +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + return; + + /* Signal host that the device went to SYS_ERR state */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); +} + +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + /* + * Mask all interrupts until the state machine is ready. Interrupts will + * be enabled later with mhi_ep_enable(). + */ + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + mhi_ep_mmio_init(mhi_cntrl); + + mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Initialize command, channel and event rings */ + mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); + for (i = 0; i < mhi_cntrl->max_chan; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); + for (i = 0; i < mhi_cntrl->event_rings; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); + + mhi_cntrl->mhi_state = MHI_STATE_RESET; + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + goto err_free_event; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint\n"); + goto err_free_event; + } + + enable_irq(mhi_cntrl->irq); + mhi_cntrl->enabled = true; + + return 0; + +err_free_event: + kfree(mhi_cntrl->mhi_event); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_power_up); + +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) +{ + if (mhi_cntrl->enabled) + mhi_ep_abort_transfer(mhi_cntrl); + + kfree(mhi_cntrl->mhi_event); + disable_irq(mhi_cntrl->irq); +} +EXPORT_SYMBOL_GPL(mhi_ep_power_down); + +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently running */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); + /* Set channel state to SUSPENDED */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently suspended */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); + /* Set channel state to RUNNING */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + +static void mhi_ep_release_device(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + mhi_dev->mhi_cntrl->mhi_dev = NULL; + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created in mhi_ep_create_device() + * if the mhi_dev associated with it is NULL. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_device_type dev_type) +{ + struct mhi_ep_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_ep_bus_type; + dev->release = mhi_ep_release_device; + + /* Controller device is always allocated first */ + if (dev_type == MHI_DEVICE_CONTROLLER) + /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ + dev->parent = mhi_cntrl->cntrl_dev; + else + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_type = dev_type; + + return mhi_dev; +} + +/* + * MHI channels are always defined in pairs with UL as the even numbered + * channel and DL as odd numbered one. This function gets UL channel (primary) + * as the ch_id and always looks after the next entry in channel list for + * the corresponding DL channel (secondary). + */ +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + struct device *dev = mhi_cntrl->cntrl_dev; + struct mhi_ep_device *mhi_dev; + int ret; + + /* Check if the channel name is same for both UL and DL */ + if (strcmp(mhi_chan->name, mhi_chan[1].name)) { + dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", + mhi_chan->name, mhi_chan[1].name); + return -EINVAL; + } + + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); + if (IS_ERR(mhi_dev)) + return PTR_ERR(mhi_dev); + + /* Configure primary channel */ + mhi_dev->ul_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Configure secondary channel as well */ + mhi_chan++; + mhi_dev->dl_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Channel name is same for both UL and DL */ + mhi_dev->name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%s_%s", + dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_dev->name); + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + + return ret; +} + +static int mhi_ep_destroy_device(struct device *dev, void *data) +{ + struct mhi_ep_device *mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl; + struct mhi_ep_chan *ul_chan, *dl_chan; + + if (dev->bus != &mhi_ep_bus_type) + return 0; + + mhi_dev = to_mhi_ep_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy devices created for channels */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + if (ul_chan) + put_device(&ul_chan->mhi_dev->dev); + + if (dl_chan) + put_device(&dl_chan->mhi_dev->dev); + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", + mhi_dev->name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + const struct mhi_ep_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + u32 chan, i; + int ret = -EINVAL; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * Allocate max_channels supported by the MHI endpoint and populate + * only the defined channels + */ + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), + GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + for (i = 0; i < config->num_channels; i++) { + struct mhi_ep_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", + chan, mhi_cntrl->max_chan); + goto error_chan_cfg; + } + + /* Bi-directional and direction less channels are not supported */ + if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { + dev_err(dev, "Invalid direction (%u) for channel (%u)\n", + ch_cfg->dir, chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + mhi_chan->dir = ch_cfg->dir; + mutex_init(&mhi_chan->lock); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +/* + * Allocate channel and command rings here. Event rings will be allocated + * in mhi_ep_power_up() as the config comes from the host. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + struct mhi_ep_device *mhi_dev; + int ret; + + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) + return -EINVAL; + + ret = mhi_ep_chan_init(mhi_cntrl, config); + if (ret) + return ret; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_ch; + } + + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); + INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); + INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); + + mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); + if (!mhi_cntrl->wq) { + ret = -ENOMEM; + goto err_free_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); + spin_lock_init(&mhi_cntrl->state_lock); + spin_lock_init(&mhi_cntrl->list_lock); + mutex_init(&mhi_cntrl->event_lock); + + /* Set MHI version and AMSS EE before enumeration */ + mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* Set controller index */ + ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); + if (ret < 0) + goto err_destroy_wq; + + mhi_cntrl->index = ret; + + irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); + ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, + "doorbell_irq", mhi_cntrl); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); + goto err_ida_free; + } + + /* Allocate the controller device */ + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); + ret = PTR_ERR(mhi_dev); + goto err_free_irq; + } + + dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); + mhi_cntrl->mhi_dev = mhi_dev; + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_put_dev; + + dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); + + return 0; + +err_put_dev: + put_device(&mhi_dev->dev); +err_free_irq: + free_irq(mhi_cntrl->irq, mhi_cntrl); +err_ida_free: + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_destroy_wq: + destroy_workqueue(mhi_cntrl->wq); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_ch: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_register_controller); + +/* + * It is expected that the controller drivers will power down the MHI EP stack + * using "mhi_ep_power_down()" before calling this function to unregister themselves. + */ +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + + destroy_workqueue(mhi_cntrl->wq); + + free_irq(mhi_cntrl->irq, mhi_cntrl); + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); + +static int mhi_ep_driver_probe(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + return mhi_drv->probe(mhi_dev, mhi_dev->id); +} + +static int mhi_ep_driver_remove(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Disconnect the channels associated with the driver */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to the client driver */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mhi_chan->xfer_cb = NULL; + mutex_unlock(&mhi_chan->lock); + } + + /* Remove the client driver now */ + mhi_drv->remove(mhi_dev); + + return 0; +} + +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + /* Client drivers should have callbacks defined for both channels */ + if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) + return -EINVAL; + + driver->bus = &mhi_ep_bus_type; + driver->owner = owner; + driver->probe = mhi_ep_driver_probe; + driver->remove = mhi_ep_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); + +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); + +static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + +static int mhi_ep_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); + const struct mhi_device_id *id; + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +struct bus_type mhi_ep_bus_type = { + .name = "mhi_ep", + .dev_name = "mhi_ep", + .match = mhi_ep_match, + .uevent = mhi_ep_uevent, +}; + +static int __init mhi_ep_init(void) +{ + return bus_register(&mhi_ep_bus_type); +} + +static void __exit mhi_ep_exit(void) +{ + bus_unregister(&mhi_ep_bus_type); +} + +postcore_initcall(mhi_ep_init); +module_exit(mhi_ep_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Bus Endpoint stack"); +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c new file mode 100644 index 000000000000..b5bfd22f2c8e --- /dev/null +++ b/drivers/bus/mhi/ep/mmio.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/mhi_ep.h> + +#include "internal.h" + +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset) +{ + return readl(mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val) +{ + writel(val, mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, offset); + regval &= ~mask; + regval |= (val << __ffs(mask)) & mask; + mhi_ep_mmio_write(mhi_cntrl, offset, regval); +} + +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask) +{ + u32 regval; + + regval = mhi_ep_mmio_read(dev, offset); + regval &= mask; + regval >>= __ffs(mask); + + return regval; +} + +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL); + *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval); + *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval); +} + +static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable) +{ + u32 chid_mask, chid_shift, chdb_idx, val; + + chid_shift = ch_id % 32; + chid_mask = BIT(chid_shift); + chdb_idx = ch_id / 32; + + val = enable ? 1 : 0; + + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val); + + /* Update the local copy of the channel mask */ + mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask; + mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift; +} + +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true); +} + +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false); +} + +static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val); + mhi_cntrl->chdb[i].mask = val; + } +} + +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true); +} + +static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false); +} + +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + bool chdb = false; + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i)); + if (mhi_cntrl->chdb[i].status) + chdb = true; + } + + /* Return whether a channel doorbell interrupt occurred or not */ + return chdb; +} + +static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val); +} + +static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false); +} + +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 1); +} + +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 0); +} + +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 1); +} + +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 0); +} + +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl); + mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl); + mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl); +} + +static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + MHI_CHDB_INT_CLEAR_n_CLEAR_ALL); + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i), + MHI_ERDB_INT_CLEAR_n_CLEAR_ALL); + + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, + MHI_CTRL_INT_MMIO_WR_CLEAR | + MHI_CTRL_INT_CRDB_CLEAR | + MHI_CTRL_INT_CRDB_MHICTRL_CLEAR); +} + +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER); + mhi_cntrl->ch_ctx_host_pa = regval; + mhi_cntrl->ch_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER); + mhi_cntrl->ch_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER); + mhi_cntrl->ev_ctx_host_pa = regval; + mhi_cntrl->ev_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER); + mhi_cntrl->ev_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER); + mhi_cntrl->cmd_ctx_host_pa = regval; + mhi_cntrl->cmd_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER); + mhi_cntrl->cmd_ctx_host_pa |= regval; +} + +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + u64 db_offset; + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h); + db_offset = regval; + db_offset <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l); + db_offset |= regval; + + return db_offset; +} + +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value); +} + +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0); +} + +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0); + mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0); + mhi_ep_mmio_clear_interrupts(mhi_cntrl); +} + +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF); + mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF); + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); + + mhi_ep_mmio_reset(mhi_cntrl); +} + +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); +} diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c new file mode 100644 index 000000000000..115518ec76a4 --- /dev/null +++ b/drivers/bus/mhi/ep/ring.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/mhi_ep.h> +#include "internal.h" + +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr) +{ + return (ptr - ring->rbase) / sizeof(struct mhi_ring_element); +} + +static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring) +{ + __le64 rlen; + + memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64)); + + return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element); +} + +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring) +{ + ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size; +} + +static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t start, copy_size; + int ret; + + /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */ + if (ring->type == RING_TYPE_ER) + return 0; + + /* No need to cache the ring if write pointer is unmodified */ + if (ring->wr_offset == end) + return 0; + + start = ring->wr_offset; + if (start < end) { + copy_size = (end - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + } else { + copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + + if (end) { + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase, + &ring->ring_cache[0], + end * sizeof(struct mhi_ring_element)); + if (ret < 0) + return ret; + } + } + + dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size); + + return 0; +} + +static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr) +{ + size_t wr_offset; + int ret; + + wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr); + + /* Cache the host ring till write offset */ + ret = __mhi_ep_cache_ring(ring, wr_offset); + if (ret) + return ret; + + ring->wr_offset = wr_offset; + + return 0; +} + +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring) +{ + u64 wr_ptr; + + wr_ptr = mhi_ep_mmio_get_db(ring); + + return mhi_ep_cache_ring(ring, wr_ptr); +} + +/* TODO: Support for adding multiple ring elements to the ring */ +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t old_offset = 0; + u32 num_free_elem; + __le64 rp; + int ret; + + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write pointer\n"); + return ret; + } + + if (ring->rd_offset < ring->wr_offset) + num_free_elem = (ring->wr_offset - ring->rd_offset) - 1; + else + num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1; + + /* Check if there is space in ring for adding at least an element */ + if (!num_free_elem) { + dev_err(dev, "No space left in the ring\n"); + return -ENOSPC; + } + + old_offset = ring->rd_offset; + mhi_ep_ring_inc_index(ring); + + dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset); + + /* Update rp in ring context */ + rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase); + memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64)); + + ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)), + sizeof(*el)); + if (ret < 0) + return ret; + + return 0; +} + +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) +{ + ring->type = type; + if (ring->type == RING_TYPE_CMD) { + ring->db_offset_h = EP_CRDB_HIGHER; + ring->db_offset_l = EP_CRDB_LOWER; + } else if (ring->type == RING_TYPE_CH) { + ring->db_offset_h = CHDB_HIGHER_n(id); + ring->db_offset_l = CHDB_LOWER_n(id); + ring->ch_id = id; + } else { + ring->db_offset_h = ERDB_HIGHER_n(id); + ring->db_offset_l = ERDB_LOWER_n(id); + } +} + +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + __le64 val; + int ret; + + ring->mhi_cntrl = mhi_cntrl; + ring->ring_ctx = ctx; + ring->ring_size = mhi_ep_ring_num_elems(ring); + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64)); + ring->rbase = le64_to_cpu(val); + + if (ring->type == RING_TYPE_CH) + ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex); + + if (ring->type == RING_TYPE_ER) + ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec); + + /* During ring init, both rp and wp are equal */ + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64)); + ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + + /* Allocate ring cache memory for holding the copy of host ring */ + ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL); + if (!ring->ring_cache) + return -ENOMEM; + + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64)); + ret = mhi_ep_cache_ring(ring, le64_to_cpu(val)); + if (ret) { + dev_err(dev, "Failed to cache ring\n"); + kfree(ring->ring_cache); + return ret; + } + + ring->started = true; + + return 0; +} + +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) +{ + ring->started = false; + kfree(ring->ring_cache); + ring->ring_cache = NULL; +} diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c new file mode 100644 index 000000000000..3655c19e23c7 --- /dev/null +++ b/drivers/bus/mhi/ep/sm.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/errno.h> +#include <linux/mhi_ep.h> +#include "internal.h" + +bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state cur_mhi_state, + enum mhi_state mhi_state) +{ + if (mhi_state == MHI_STATE_SYS_ERR) + return true; /* Allowed in any state */ + + if (mhi_state == MHI_STATE_READY) + return cur_mhi_state == MHI_STATE_RESET; + + if (mhi_state == MHI_STATE_M0) + return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY; + + if (mhi_state == MHI_STATE_M3) + return cur_mhi_state == MHI_STATE_M0; + + return false; +} + +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) { + dev_err(dev, "MHI state change to %s from %s is not allowed!\n", + mhi_state_str(mhi_state), + mhi_state_str(mhi_cntrl->mhi_state)); + return -EACCES; + } + + /* TODO: Add support for M1 and M2 states */ + if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) { + dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state)); + return -EOPNOTSUPP; + } + + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state); + mhi_cntrl->mhi_state = mhi_state; + + if (mhi_state == MHI_STATE_READY) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1); + + if (mhi_state == MHI_STATE_SYS_ERR) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1); + + return 0; +} + +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state old_state; + int ret; + + /* If MHI is in M3, resume suspended channels */ + spin_lock_bh(&mhi_cntrl->state_lock); + old_state = mhi_cntrl->mhi_state; + if (old_state == MHI_STATE_M3) + mhi_ep_resume_channels(mhi_cntrl); + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + + /* Signal host that the device moved to M0 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); + if (ret) { + dev_err(dev, "Failed sending M0 state change event\n"); + return ret; + } + + if (old_state == MHI_STATE_READY) { + /* Send AMSS EE event to host */ + ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS); + if (ret) { + dev_err(dev, "Failed sending AMSS EE event\n"); + return ret; + } + } + + return 0; +} + +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + spin_lock_bh(&mhi_cntrl->state_lock); + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + + mhi_ep_suspend_channels(mhi_cntrl); + + /* Signal host that the device moved to M3 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); + if (ret) { + dev_err(dev, "Failed sending M3 state change event\n"); + return ret; + } + + return 0; +} + +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state mhi_state; + int ret, is_ready; + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Ensure that the MHISTATUS is set to RESET by host */ + mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK); + is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK); + + if (mhi_state != MHI_STATE_RESET || is_ready) { + dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); + spin_unlock_bh(&mhi_cntrl->state_lock); + return -EIO; + } + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) + mhi_ep_handle_syserr(mhi_cntrl); + + return ret; +} diff --git a/drivers/bus/mhi/host/Kconfig b/drivers/bus/mhi/host/Kconfig new file mode 100644 index 000000000000..da5cd0c9fc62 --- /dev/null +++ b/drivers/bus/mhi/host/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# MHI bus +# +# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +# + +config MHI_BUS + tristate "Modem Host Interface (MHI) bus" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by the host processors to control + and communicate with modem devices over a high speed peripheral + bus or shared memory. + +config MHI_BUS_DEBUG + bool "Debugfs support for the MHI bus" + depends on MHI_BUS && DEBUG_FS + help + Enable debugfs support for use with the MHI transport. Allows + reading and/or modifying some values within the MHI controller + for debug and test purposes. + +config MHI_BUS_PCI_GENERIC + tristate "MHI PCI controller driver" + depends on MHI_BUS + depends on PCI + help + This driver provides MHI PCI controller driver for devices such as + Qualcomm SDX55 based PCIe modems. + diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/host/Makefile index c3feb4130aa3..859c2f38451c 100644 --- a/drivers/bus/mhi/core/Makefile +++ b/drivers/bus/mhi/host/Makefile @@ -1,4 +1,6 @@ obj-$(CONFIG_MHI_BUS) += mhi.o - mhi-y := init.o main.o pm.o boot.o mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o + +obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o +mhi_pci_generic-y += pci_generic.o diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/host/boot.c index 74295d3cc662..26d0eddb1477 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/host/boot.c @@ -19,8 +19,8 @@ #include "internal.h" /* Setup RDDM vector table for RDDM transfer and program RXVEC */ -void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, - struct image_info *img_info) +int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) { struct mhi_buf *mhi_buf = img_info->mhi_buf; struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; @@ -28,6 +28,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 sequence_id; unsigned int i; + int ret; for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { bhi_vec->dma_addr = mhi_buf->dma_addr; @@ -45,12 +46,17 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); - mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, - BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, - sequence_id); + ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, sequence_id); + if (ret) { + dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n"); + return ret; + } dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n", &mhi_buf->dma_addr, mhi_buf->len, sequence_id); + + return 0; } /* Collect RDDM buffer during kernel panic */ @@ -68,7 +74,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state), + mhi_state_str(mhi_cntrl->dev_state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); /* @@ -127,9 +133,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) while (retry--) { ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, - BHIE_RXVECSTATUS_STATUS_BMSK, - BHIE_RXVECSTATUS_STATUS_SHFT, - &rx_status); + BHIE_RXVECSTATUS_STATUS_BMSK, &rx_status); if (ret) return -EIO; @@ -168,7 +172,6 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, BHIE_RXVECSTATUS_STATUS_BMSK, - BHIE_RXVECSTATUS_STATUS_SHFT, &rx_status) || rx_status, msecs_to_jiffies(mhi_cntrl->timeout_ms)); @@ -202,18 +205,19 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); - mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, - BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, - sequence_id); + ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, sequence_id); read_unlock_bh(pm_lock); + if (ret) + return ret; + /* Wait for the image download to complete */ ret = wait_event_timeout(mhi_cntrl->state_event, MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || mhi_read_reg_field(mhi_cntrl, base, BHIE_TXVECSTATUS_OFFS, BHIE_TXVECSTATUS_STATUS_BMSK, - BHIE_TXVECSTATUS_STATUS_SHFT, &tx_status) || tx_status, msecs_to_jiffies(mhi_cntrl->timeout_ms)); if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || @@ -265,8 +269,7 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, ret = wait_event_timeout(mhi_cntrl->state_event, MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, - BHI_STATUS_MASK, BHI_STATUS_SHIFT, - &tx_status) || tx_status, + BHI_STATUS_MASK, &tx_status) || tx_status, msecs_to_jiffies(mhi_cntrl->timeout_ms)); if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) goto invalid_pm_state; diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/host/debugfs.c index 858d7516410b..cfec7811dfbb 100644 --- a/drivers/bus/mhi/core/debugfs.c +++ b/drivers/bus/mhi/host/debugfs.c @@ -20,7 +20,7 @@ static int mhi_debugfs_states_show(struct seq_file *m, void *d) seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), mhi_is_active(mhi_cntrl) ? "Active" : "Inactive", - TO_MHI_STATE_STR(mhi_cntrl->dev_state), + mhi_state_str(mhi_cntrl->dev_state), TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_cntrl->wake_set ? "true" : "false"); @@ -60,16 +60,16 @@ static int mhi_debugfs_events_show(struct seq_file *m, void *d) } seq_printf(m, "Index: %d intmod count: %lu time: %lu", - i, (er_ctxt->intmod & EV_CTX_INTMODC_MASK) >> - EV_CTX_INTMODC_SHIFT, - (er_ctxt->intmod & EV_CTX_INTMODT_MASK) >> - EV_CTX_INTMODT_SHIFT); + i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >> + __ffs(EV_CTX_INTMODC_MASK), + (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >> + __ffs(EV_CTX_INTMODT_MASK)); - seq_printf(m, " base: 0x%0llx len: 0x%llx", er_ctxt->rbase, - er_ctxt->rlen); + seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase), + le64_to_cpu(er_ctxt->rlen)); - seq_printf(m, " rp: 0x%llx wp: 0x%llx", er_ctxt->rp, - er_ctxt->wp); + seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp), + le64_to_cpu(er_ctxt->wp)); seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp, &mhi_event->db_cfg.db_val); @@ -106,18 +106,18 @@ static int mhi_debugfs_channels_show(struct seq_file *m, void *d) seq_printf(m, "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx", - mhi_chan->name, mhi_chan->chan, (chan_ctxt->chcfg & - CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT, - (chan_ctxt->chcfg & CHAN_CTX_BRSTMODE_MASK) >> - CHAN_CTX_BRSTMODE_SHIFT, (chan_ctxt->chcfg & - CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT); + mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) & + CHAN_CTX_CHSTATE_MASK) >> __ffs(CHAN_CTX_CHSTATE_MASK), + (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >> + __ffs(CHAN_CTX_BRSTMODE_MASK), (le32_to_cpu(chan_ctxt->chcfg) & + CHAN_CTX_POLLCFG_MASK) >> __ffs(CHAN_CTX_POLLCFG_MASK)); - seq_printf(m, " type: 0x%x event ring: %u", chan_ctxt->chtype, - chan_ctxt->erindex); + seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype), + le32_to_cpu(chan_ctxt->erindex)); seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx", - chan_ctxt->rbase, chan_ctxt->rlen, chan_ctxt->rp, - chan_ctxt->wp); + le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen), + le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp)); seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n", ring->rp, ring->wp, @@ -206,13 +206,13 @@ static int mhi_debugfs_regdump_show(struct seq_file *m, void *d) seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state), + mhi_state_str(mhi_cntrl->dev_state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); state = mhi_get_mhi_state(mhi_cntrl); ee = mhi_get_exec_env(mhi_cntrl); seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee), - TO_MHI_STATE_STR(state)); + mhi_state_str(state)); for (i = 0; regs[i].name; i++) { if (!regs[i].base) diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/host/init.c index 046f407dc5d6..c137d55ccfa0 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/host/init.c @@ -4,6 +4,7 @@ * */ +#include <linux/bitfield.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/dma-direction.h> @@ -44,18 +45,6 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { [DEV_ST_TRANSITION_DISABLE] = "DISABLE", }; -const char * const mhi_state_str[MHI_STATE_MAX] = { - [MHI_STATE_RESET] = "RESET", - [MHI_STATE_READY] = "READY", - [MHI_STATE_M0] = "M0", - [MHI_STATE_M1] = "M1", - [MHI_STATE_M2] = "M2", - [MHI_STATE_M3] = "M3", - [MHI_STATE_M3_FAST] = "M3 FAST", - [MHI_STATE_BHI] = "BHI", - [MHI_STATE_SYS_ERR] = "SYS ERROR", -}; - const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { [MHI_CH_STATE_TYPE_RESET] = "RESET", [MHI_CH_STATE_TYPE_STOP] = "STOP", @@ -77,12 +66,14 @@ static const char * const mhi_pm_state_str[] = { [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", }; -const char *to_mhi_pm_state_str(enum mhi_pm_state state) +const char *to_mhi_pm_state_str(u32 state) { - unsigned long pm_state = state; - int index = find_last_bit(&pm_state, 32); + int index; + + if (state) + index = __fls(state); - if (index >= ARRAY_SIZE(mhi_pm_state_str)) + if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) return "Invalid State"; return mhi_pm_state_str[index]; @@ -95,7 +86,7 @@ static ssize_t serial_number_show(struct device *dev, struct mhi_device *mhi_dev = to_mhi_device(dev); struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n", + return sysfs_emit(buf, "Serial Number: %u\n", mhi_cntrl->serial_number); } static DEVICE_ATTR_RO(serial_number); @@ -109,17 +100,30 @@ static ssize_t oem_pk_hash_show(struct device *dev, int i, cnt = 0; for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) - cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, - "OEMPKHASH[%d]: 0x%x\n", i, - mhi_cntrl->oem_pk_hash[i]); + cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", + i, mhi_cntrl->oem_pk_hash[i]); return cnt; } static DEVICE_ATTR_RO(oem_pk_hash); +static ssize_t soc_reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_soc_reset(mhi_cntrl); + return count; +} +static DEVICE_ATTR_WO(soc_reset); + static struct attribute *mhi_dev_attrs[] = { &dev_attr_serial_number.attr, &dev_attr_oem_pk_hash.attr, + &dev_attr_soc_reset.attr, NULL, }; ATTRIBUTE_GROUPS(mhi_dev); @@ -291,17 +295,17 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) if (mhi_chan->offload_ch) continue; - tmp = chan_ctxt->chcfg; + tmp = le32_to_cpu(chan_ctxt->chcfg); tmp &= ~CHAN_CTX_CHSTATE_MASK; - tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); tmp &= ~CHAN_CTX_BRSTMODE_MASK; - tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); + tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode); tmp &= ~CHAN_CTX_POLLCFG_MASK; - tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT); - chan_ctxt->chcfg = tmp; + tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg); + chan_ctxt->chcfg = cpu_to_le32(tmp); - chan_ctxt->chtype = mhi_chan->type; - chan_ctxt->erindex = mhi_chan->er_index; + chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); + chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); mhi_chan->ch_state = MHI_CH_STATE_DISABLED; mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; @@ -326,17 +330,17 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) if (mhi_event->offload_ev) continue; - tmp = er_ctxt->intmod; + tmp = le32_to_cpu(er_ctxt->intmod); tmp &= ~EV_CTX_INTMODC_MASK; tmp &= ~EV_CTX_INTMODT_MASK; - tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT); - er_ctxt->intmod = tmp; + tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod); + er_ctxt->intmod = cpu_to_le32(tmp); - er_ctxt->ertype = MHI_ER_TYPE_VALID; - er_ctxt->msivec = mhi_event->irq; + er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); + er_ctxt->msivec = cpu_to_le32(mhi_event->irq); mhi_event->db_cfg.db_mode = true; - ring->el_size = sizeof(struct mhi_tre); + ring->el_size = sizeof(struct mhi_ring_element); ring->len = ring->el_size * ring->elements; ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); if (ret) @@ -347,9 +351,9 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) * ring is empty */ ring->rp = ring->wp = ring->base; - er_ctxt->rbase = ring->iommu_base; + er_ctxt->rbase = cpu_to_le64(ring->iommu_base); er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; - er_ctxt->rlen = ring->len; + er_ctxt->rlen = cpu_to_le64(ring->len); ring->ctxt_wp = &er_ctxt->wp; } @@ -368,7 +372,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { struct mhi_ring *ring = &mhi_cmd->ring; - ring->el_size = sizeof(struct mhi_tre); + ring->el_size = sizeof(struct mhi_ring_element); ring->elements = CMD_EL_PER_RING; ring->len = ring->el_size * ring->elements; ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); @@ -376,9 +380,9 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) goto error_alloc_cmd; ring->rp = ring->wp = ring->base; - cmd_ctxt->rbase = ring->iommu_base; + cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; - cmd_ctxt->rlen = ring->len; + cmd_ctxt->rlen = cpu_to_le64(ring->len); ring->ctxt_wp = &cmd_ctxt->wp; } @@ -434,82 +438,71 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) struct device *dev = &mhi_cntrl->mhi_dev->dev; struct { u32 offset; - u32 mask; - u32 shift; u32 val; } reg_info[] = { { - CCABAP_HIGHER, U32_MAX, 0, + CCABAP_HIGHER, upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), }, { - CCABAP_LOWER, U32_MAX, 0, + CCABAP_LOWER, lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), }, { - ECABAP_HIGHER, U32_MAX, 0, + ECABAP_HIGHER, upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), }, { - ECABAP_LOWER, U32_MAX, 0, + ECABAP_LOWER, lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), }, { - CRCBAP_HIGHER, U32_MAX, 0, + CRCBAP_HIGHER, upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), }, { - CRCBAP_LOWER, U32_MAX, 0, + CRCBAP_LOWER, lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), }, { - MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, - mhi_cntrl->total_ev_rings, - }, - { - MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, - mhi_cntrl->hw_ev_rings, - }, - { - MHICTRLBASE_HIGHER, U32_MAX, 0, + MHICTRLBASE_HIGHER, upper_32_bits(mhi_cntrl->iova_start), }, { - MHICTRLBASE_LOWER, U32_MAX, 0, + MHICTRLBASE_LOWER, lower_32_bits(mhi_cntrl->iova_start), }, { - MHIDATABASE_HIGHER, U32_MAX, 0, + MHIDATABASE_HIGHER, upper_32_bits(mhi_cntrl->iova_start), }, { - MHIDATABASE_LOWER, U32_MAX, 0, + MHIDATABASE_LOWER, lower_32_bits(mhi_cntrl->iova_start), }, { - MHICTRLLIMIT_HIGHER, U32_MAX, 0, + MHICTRLLIMIT_HIGHER, upper_32_bits(mhi_cntrl->iova_stop), }, { - MHICTRLLIMIT_LOWER, U32_MAX, 0, + MHICTRLLIMIT_LOWER, lower_32_bits(mhi_cntrl->iova_stop), }, { - MHIDATALIMIT_HIGHER, U32_MAX, 0, + MHIDATALIMIT_HIGHER, upper_32_bits(mhi_cntrl->iova_stop), }, { - MHIDATALIMIT_LOWER, U32_MAX, 0, + MHIDATALIMIT_LOWER, lower_32_bits(mhi_cntrl->iova_stop), }, - { 0, 0, 0 } + {0, 0} }; dev_dbg(dev, "Initializing MHI registers\n"); /* Read channel db offset */ - ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, - CHDBOFF_CHDBOFF_SHIFT, &val); + ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val); if (ret) { dev_err(dev, "Unable to read CHDBOFF register\n"); return -EIO; @@ -525,8 +518,7 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) mhi_chan->tre_ring.db_addr = base + val; /* Read event ring db offset */ - ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, - ERDBOFF_ERDBOFF_SHIFT, &val); + ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val); if (ret) { dev_err(dev, "Unable to read ERDBOFF register\n"); return -EIO; @@ -546,9 +538,22 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) /* Write to MMIO registers */ for (i = 0; reg_info[i].offset; i++) - mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, - reg_info[i].mask, reg_info[i].shift, - reg_info[i].val); + mhi_write_reg(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].val); + + ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK, + mhi_cntrl->total_ev_rings); + if (ret) { + dev_err(dev, "Unable to write MHICFG register\n"); + return ret; + } + + ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK, + mhi_cntrl->hw_ev_rings); + if (ret) { + dev_err(dev, "Unable to write MHICFG register\n"); + return ret; + } return 0; } @@ -579,10 +584,10 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, chan_ctxt->rp = 0; chan_ctxt->wp = 0; - tmp = chan_ctxt->chcfg; + tmp = le32_to_cpu(chan_ctxt->chcfg); tmp &= ~CHAN_CTX_CHSTATE_MASK; - tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); - chan_ctxt->chcfg = tmp; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); + chan_ctxt->chcfg = cpu_to_le32(tmp); /* Update to all cores */ smp_wmb(); @@ -599,7 +604,7 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; - tre_ring->el_size = sizeof(struct mhi_tre); + tre_ring->el_size = sizeof(struct mhi_ring_element); tre_ring->len = tre_ring->el_size * tre_ring->elements; chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); @@ -616,14 +621,14 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, return -ENOMEM; } - tmp = chan_ctxt->chcfg; + tmp = le32_to_cpu(chan_ctxt->chcfg); tmp &= ~CHAN_CTX_CHSTATE_MASK; - tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT); - chan_ctxt->chcfg = tmp; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED); + chan_ctxt->chcfg = cpu_to_le32(tmp); - chan_ctxt->rbase = tre_ring->iommu_base; + chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; - chan_ctxt->rlen = tre_ring->len; + chan_ctxt->rlen = cpu_to_le64(tre_ring->len); tre_ring->ctxt_wp = &chan_ctxt->wp; tre_ring->rp = tre_ring->wp = tre_ring->base; @@ -962,14 +967,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, if (ret) goto err_destroy_wq; - mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >> - SOC_HW_VERSION_FAM_NUM_SHFT; - mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >> - SOC_HW_VERSION_DEV_NUM_SHFT; - mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >> - SOC_HW_VERSION_MAJOR_VER_SHFT; - mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >> - SOC_HW_VERSION_MINOR_VER_SHFT; + mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info); + mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info); + mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info); + mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info); mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); if (mhi_cntrl->index < 0) { @@ -1120,8 +1121,15 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) */ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, mhi_cntrl->rddm_size); - if (mhi_cntrl->rddm_image) - mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + if (mhi_cntrl->rddm_image) { + ret = mhi_rddm_prepare(mhi_cntrl, + mhi_cntrl->rddm_image); + if (ret) { + mhi_free_bhie_table(mhi_cntrl, + mhi_cntrl->rddm_image); + goto error_reg_offset; + } + } } mutex_unlock(&mhi_cntrl->pm_mutex); diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h new file mode 100644 index 000000000000..01fd10a399b6 --- /dev/null +++ b/drivers/bus/mhi/host/internal.h @@ -0,0 +1,383 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +#include "../common.h" + +extern struct bus_type mhi_bus_type; + +/* Host request register */ +#define MHI_SOC_RESET_REQ_OFFSET 0xb0 +#define MHI_SOC_RESET_REQ BIT(0) + +#define SOC_HW_VERSION_OFFS 0x224 +#define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28) +#define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16) +#define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8) +#define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0) + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; +}; + +struct bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +enum mhi_ch_state_type { + MHI_CH_STATE_TYPE_RESET, + MHI_CH_STATE_TYPE_STOP, + MHI_CH_STATE_TYPE_START, + MHI_CH_STATE_TYPE_MAX, +}; + +extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; +#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \ + "INVALID_STATE" : \ + mhi_ch_state_type_str[(state)]) + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ + mode != MHI_DB_BRST_ENABLE) + +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ + ee == MHI_EE_EDL) +#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS) +#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL) +#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \ + ee == MHI_EE_FP) + +enum dev_st_transition { + DEV_ST_TRANSITION_PBL, + DEV_ST_TRANSITION_READY, + DEV_ST_TRANSITION_SBL, + DEV_ST_TRANSITION_MISSION_MODE, + DEV_ST_TRANSITION_FP, + DEV_ST_TRANSITION_SYS_ERR, + DEV_ST_TRANSITION_DISABLE, + DEV_ST_TRANSITION_MAX, +}; + +extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; +#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : dev_state_tran_str[state]) + +/* internal power states */ +enum mhi_pm_state { + MHI_PM_STATE_DISABLE, + MHI_PM_STATE_POR, + MHI_PM_STATE_M0, + MHI_PM_STATE_M2, + MHI_PM_STATE_M3_ENTER, + MHI_PM_STATE_M3, + MHI_PM_STATE_M3_EXIT, + MHI_PM_STATE_FW_DL_ERR, + MHI_PM_STATE_SYS_ERR_DETECT, + MHI_PM_STATE_SYS_ERR_PROCESS, + MHI_PM_STATE_SHUTDOWN_PROCESS, + MHI_PM_STATE_LD_ERR_FATAL_DETECT, + MHI_PM_STATE_MAX +}; + +#define MHI_PM_DISABLE BIT(0) +#define MHI_PM_POR BIT(1) +#define MHI_PM_M0 BIT(2) +#define MHI_PM_M2 BIT(3) +#define MHI_PM_M3_ENTER BIT(4) +#define MHI_PM_M3 BIT(5) +#define MHI_PM_M3_EXIT BIT(6) +/* firmware download failure state */ +#define MHI_PM_FW_DL_ERR BIT(7) +#define MHI_PM_SYS_ERR_DETECT BIT(8) +#define MHI_PM_SYS_ERR_PROCESS BIT(9) +#define MHI_PM_SHUTDOWN_PROCESS BIT(10) +/* link not accessible */ +#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_EXIT)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + +#define NR_OF_CMD_RINGS 1 +#define CMD_EL_PER_RING 128 +#define PRIMARY_CMD_RING 0 +#define MHI_DEV_WAKE_DB 127 +#define MHI_MAX_MTU 0xffff +#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) + +enum mhi_er_type { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum mhi_db_brst_mode brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_pm_transitions { + enum mhi_pm_state from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum dev_st_transition state; +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + __le64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + void *v_addr; + void *bb_addr; + void *wp; + void *cb_buf; + dma_addr_t p_addr; + size_t len; + enum dma_data_direction dir; + bool used; /* Indicates whether the buffer is used or not */ + bool pre_mapped; /* Already pre-mapped by client */ +}; + +struct mhi_event { + struct mhi_controller *mhi_cntrl; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + u32 er_index; + u32 intmod; + u32 irq; + int chan; /* this event ring is dedicated to a channel (optional) */ + u32 priority; + enum mhi_er_data_type data_type; + struct mhi_ring ring; + struct db_cfg db_cfg; + struct tasklet_struct task; + spinlock_t lock; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ +}; + +struct mhi_chan { + const char *name; + /* + * Important: When consuming, increment tre_ring first and when + * releasing, decrement buf_ring first. If tre_ring has space, buf_ring + * is guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + u32 chan; + u32 er_index; + u32 intmod; + enum mhi_ch_type type; + enum dma_data_direction dir; + struct db_cfg db_cfg; + enum mhi_ch_ee_mask ee_mask; + enum mhi_ch_state ch_state; + enum mhi_ev_ccs ccs; + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + struct list_head node; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + bool wake_capable; +}; + +/* Default MHI timeout */ +#define MHI_TIMEOUT_MS (1000) + +/* debugfs related functions */ +#ifdef CONFIG_MHI_BUS_DEBUG +void mhi_create_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_debugfs_init(void); +void mhi_debugfs_exit(void); +#else +static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_debugfs_init(void) +{ +} + +static inline void mhi_debugfs_exit(void) +{ +} +#endif + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); + +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info); + +/* Power management APIs */ +enum mhi_pm_state __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state); +const char *to_mhi_pm_state_str(u32 state); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd); +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl); +static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) +{ + return (mhi_cntrl->dev_state >= MHI_STATE_M0 && + mhi_cntrl->dev_state <= MHI_STATE_M3_FAST); +} + +static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) +{ + pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); +} + +/* Register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t db_val); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t db_val); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 *out); +int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val, u32 delayus); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +/* Initialization methods */ +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info); +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); + +/* Automatically allocate and queue inbound buffers */ +#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) +int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, unsigned int flags); + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +/* Event processing methods */ +void mhi_ctrl_ev_task(unsigned long data); +void mhi_ev_task(unsigned long data); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); + +/* ISR handlers */ +irqreturn_t mhi_irq_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_handler(int irq_number, void *dev); + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + struct mhi_buf_info *info, enum mhi_flags flags); +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); + +#endif /* _MHI_INT_H */ diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/host/main.c index ffde617f93a3..f3aef77a6a4a 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/host/main.c @@ -24,7 +24,7 @@ int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, - u32 mask, u32 shift, u32 *out) + u32 mask, u32 *out) { u32 tmp; int ret; @@ -33,21 +33,20 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, if (ret) return ret; - *out = (tmp & mask) >> shift; + *out = (tmp & mask) >> __ffs(mask); return 0; } int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, - u32 mask, u32 shift, u32 val, u32 delayus) + u32 mask, u32 val, u32 delayus) { int ret; u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; while (retry--) { - ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift, - &out); + ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out); if (ret) return ret; @@ -66,19 +65,22 @@ void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); } -void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 mask, u32 shift, u32 val) +int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val) { int ret; u32 tmp; ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); if (ret) - return; + return ret; tmp &= ~mask; - tmp |= (val << shift); + tmp |= (val << __ffs(mask)); mhi_write_reg(mhi_cntrl, base, offset, tmp); + + return 0; } void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, @@ -114,7 +116,7 @@ void mhi_ring_er_db(struct mhi_event *mhi_event) struct mhi_ring *ring = &mhi_event->ring; mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, - ring->db_addr, *ring->ctxt_wp); + ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); } void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) @@ -123,7 +125,7 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) struct mhi_ring *ring = &mhi_cmd->ring; db = ring->iommu_base + (ring->wp - ring->base); - *ring->ctxt_wp = db; + *ring->ctxt_wp = cpu_to_le64(db); mhi_write_db(mhi_cntrl, ring->db_addr, db); } @@ -140,7 +142,7 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, * before letting h/w know there is new element to fetch. */ dma_wmb(); - *ring->ctxt_wp = db; + *ring->ctxt_wp = cpu_to_le64(db); mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, db); @@ -159,8 +161,7 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) { u32 state; int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, - MHISTATUS_MHISTATE_MASK, - MHISTATUS_MHISTATE_SHIFT, &state); + MHISTATUS_MHISTATE_MASK, &state); return ret ? MHI_STATE_MAX : state; } EXPORT_SYMBOL_GPL(mhi_get_mhi_state); @@ -432,7 +433,7 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev) struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; struct mhi_ring *ev_ring = &mhi_event->ring; - dma_addr_t ptr = er_ctxt->rp; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); void *dev_rp; if (!is_valid_ring_ptr(ev_ring, ptr)) { @@ -479,8 +480,8 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) ee = mhi_get_exec_env(mhi_cntrl); dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee), - TO_MHI_STATE_STR(mhi_cntrl->dev_state), - TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state)); + mhi_state_str(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(ee), mhi_state_str(state)); if (state == MHI_STATE_SYS_ERR) { dev_dbg(dev, "System error detected\n"); @@ -533,18 +534,13 @@ irqreturn_t mhi_intvec_handler(int irq_number, void *dev) static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) { - dma_addr_t ctxt_wp; - /* Update the WP */ ring->wp += ring->el_size; - ctxt_wp = *ring->ctxt_wp + ring->el_size; - if (ring->wp >= (ring->base + ring->len)) { + if (ring->wp >= (ring->base + ring->len)) ring->wp = ring->base; - ctxt_wp = ring->iommu_base; - } - *ring->ctxt_wp = ctxt_wp; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base)); /* Update the RP */ ring->rp += ring->el_size; @@ -556,7 +552,7 @@ static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, } static int parse_xfer_event(struct mhi_controller *mhi_cntrl, - struct mhi_tre *event, + struct mhi_ring_element *event, struct mhi_chan *mhi_chan) { struct mhi_ring *buf_ring, *tre_ring; @@ -592,7 +588,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, case MHI_EV_CC_EOT: { dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); - struct mhi_tre *local_rp, *ev_tre; + struct mhi_ring_element *local_rp, *ev_tre; void *dev_rp; struct mhi_buf_info *buf_info; u16 xfer_len; @@ -691,7 +687,7 @@ end_process_tx_event: } static int parse_rsc_event(struct mhi_controller *mhi_cntrl, - struct mhi_tre *event, + struct mhi_ring_element *event, struct mhi_chan *mhi_chan) { struct mhi_ring *buf_ring, *tre_ring; @@ -755,12 +751,12 @@ end_process_rsc_event: } static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, - struct mhi_tre *tre) + struct mhi_ring_element *tre) { dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; struct mhi_ring *mhi_ring = &cmd_ring->ring; - struct mhi_tre *cmd_pkt; + struct mhi_ring_element *cmd_pkt; struct mhi_chan *mhi_chan; u32 chan; @@ -793,7 +789,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota) { - struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring_element *dev_rp, *local_rp; struct mhi_ring *ev_ring = &mhi_event->ring; struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; @@ -801,7 +797,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 chan; int count = 0; - dma_addr_t ptr = er_ctxt->rp; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); /* * This is a quick check to avoid unnecessary event processing @@ -846,7 +842,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, new_state = MHI_TRE_GET_EV_STATE(local_rp); dev_dbg(dev, "State change event to state: %s\n", - TO_MHI_STATE_STR(new_state)); + mhi_state_str(new_state)); switch (new_state) { case MHI_STATE_M0: @@ -873,7 +869,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, } default: dev_err(dev, "Invalid state: %s\n", - TO_MHI_STATE_STR(new_state)); + mhi_state_str(new_state)); } break; @@ -940,7 +936,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp; - ptr = er_ctxt->rp; + ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); @@ -963,14 +959,14 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota) { - struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring_element *dev_rp, *local_rp; struct mhi_ring *ev_ring = &mhi_event->ring; struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; int count = 0; u32 chan; struct mhi_chan *mhi_chan; - dma_addr_t ptr = er_ctxt->rp; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) return -EIO; @@ -1011,7 +1007,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp; - ptr = er_ctxt->rp; + ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); @@ -1187,7 +1183,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, struct mhi_buf_info *info, enum mhi_flags flags) { struct mhi_ring *buf_ring, *tre_ring; - struct mhi_tre *mhi_tre; + struct mhi_ring_element *mhi_tre; struct mhi_buf_info *buf_info; int eot, eob, chain, bei; int ret; @@ -1258,7 +1254,7 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, enum mhi_cmd_type cmd) { - struct mhi_tre *cmd_tre = NULL; + struct mhi_ring_element *cmd_tre = NULL; struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; struct mhi_ring *ring = &mhi_cmd->ring; struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -1520,7 +1516,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, int chan) { - struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring_element *dev_rp, *local_rp; struct mhi_ring *ev_ring; struct device *dev = &mhi_cntrl->mhi_dev->dev; unsigned long flags; @@ -1533,7 +1529,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, /* mark all stale events related to channel as STALE event */ spin_lock_irqsave(&mhi_event->lock, flags); - ptr = er_ctxt->rp; + ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index b79895810c52..841626727f6b 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -327,6 +327,7 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { .config = &modem_quectel_em1xx_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, + .mru_default = 32768, .sideband_wake = true, }; @@ -370,7 +371,16 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .sideband_wake = false, }; -static const struct mhi_channel_config mhi_mv31_channels[] = { +static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = { + .name = "foxconn-sdx65", + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_mv3x_channels[] = { MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), /* MBIM Control Channel */ @@ -381,25 +391,33 @@ static const struct mhi_channel_config mhi_mv31_channels[] = { MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), }; -static struct mhi_event_config mhi_mv31_events[] = { +static struct mhi_event_config mhi_mv3x_events[] = { MHI_EVENT_CONFIG_CTRL(0, 256), MHI_EVENT_CONFIG_DATA(1, 256), MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), }; -static const struct mhi_controller_config modem_mv31_config = { +static const struct mhi_controller_config modem_mv3x_config = { .max_channels = 128, .timeout_ms = 20000, - .num_channels = ARRAY_SIZE(mhi_mv31_channels), - .ch_cfg = mhi_mv31_channels, - .num_events = ARRAY_SIZE(mhi_mv31_events), - .event_cfg = mhi_mv31_events, + .num_channels = ARRAY_SIZE(mhi_mv3x_channels), + .ch_cfg = mhi_mv3x_channels, + .num_events = ARRAY_SIZE(mhi_mv3x_events), + .event_cfg = mhi_mv3x_events, }; static const struct mhi_pci_dev_info mhi_mv31_info = { .name = "cinterion-mv31", - .config = &modem_mv31_config, + .config = &modem_mv3x_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, +}; + +static const struct mhi_pci_dev_info mhi_mv32_info = { + .name = "cinterion-mv32", + .config = &modem_mv3x_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, .mru_default = 32768, @@ -445,20 +463,100 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { .sideband_wake = false, }; +static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = { + MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2), +}; + +static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) +}; + +static struct mhi_controller_config modem_telit_fn980_hw_v1_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels), + .ch_cfg = mhi_telit_fn980_hw_v1_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events), + .event_cfg = mhi_telit_fn980_hw_v1_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = { + .name = "telit-fn980-hwv1", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_telit_fn980_hw_v1_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_telit_fn990_channels[] = { + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static struct mhi_event_config mhi_telit_fn990_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_DATA(1, 128), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) +}; + +static const struct mhi_controller_config modem_telit_fn990_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels), + .ch_cfg = mhi_telit_fn990_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn990_events), + .event_cfg = mhi_telit_fn990_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn990_info = { + .name = "telit-fn990", + .config = &modem_telit_fn990_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, +}; + +/* Keep the list sorted based on the PID. New VID should be added as the last entry */ static const struct pci_device_id mhi_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, + /* Telit FN980 hardware revision v1 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), + .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, - { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), - .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, + /* Telit FN990 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), + .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, - { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), - .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, @@ -471,9 +569,21 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* T99W175 (sdx55), Based on Qualcomm new baseline */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* T99W368 (sdx65) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, + /* T99W373 (sdx62) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, /* MV31-W (Cinterion) */ { PCI_DEVICE(0x1269, 0x00b3), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, + /* MV32-WA (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00ba), + .driver_data = (kernel_ulong_t) &mhi_mv32_info }, + /* MV32-WB (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00bb), + .driver_data = (kernel_ulong_t) &mhi_mv32_info }, { } }; MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); @@ -1059,6 +1169,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev) * the intermediate restore kernel reinitializes MHI device with new * context. */ + flush_work(&mhi_pdev->recovery_work); if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { mhi_power_down(mhi_cntrl, true); mhi_unprepare_after_power_down(mhi_cntrl); @@ -1084,6 +1195,7 @@ static const struct dev_pm_ops mhi_pci_pm_ops = { .resume = mhi_pci_resume, .freeze = mhi_pci_freeze, .thaw = mhi_pci_restore, + .poweroff = mhi_pci_freeze, .restore = mhi_pci_restore, #endif }; diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/host/pm.c index 4aae0baea008..dc2e8ff3bff2 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -129,14 +129,20 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) { + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + if (state == MHI_STATE_RESET) { - mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); + ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, 1); } else { - mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_MHISTATE_MASK, - MHICTRL_MHISTATE_SHIFT, state); + ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, state); } + + if (ret) + dev_err(dev, "Failed to set MHI state to: %s\n", + mhi_state_str(state)); } /* NOP for backward compatibility, host allowed to ring DB in M2 state */ @@ -167,16 +173,14 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) /* Wait for RESET to be cleared and READY bit to be set by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, - interval_us); + MHICTRL_RESET_MASK, 0, interval_us); if (ret) { dev_err(dev, "Device failed to clear MHI Reset\n"); return ret; } ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, - MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1, - interval_us); + MHISTATUS_READY_MASK, 1, interval_us); if (ret) { dev_err(dev, "Device failed to enter MHI Ready\n"); return ret; @@ -218,7 +222,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size; - *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); /* Update all cores */ smp_wmb(); @@ -420,7 +424,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size; - *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); /* Update to all cores */ smp_wmb(); @@ -470,8 +474,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) /* Wait for the reset bit to be cleared by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, - 25000); + MHICTRL_RESET_MASK, 0, 25000); if (ret) dev_err(dev, "Device failed to clear MHI Reset\n"); @@ -480,6 +483,15 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) * hence re-program it */ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + + if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { + /* wait for ready to be set */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, + MHISTATUS, + MHISTATUS_READY_MASK, 1, 25000); + if (ret) + dev_err(dev, "Device failed to enter READY state\n"); + } } dev_dbg(dev, @@ -545,7 +557,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + mhi_state_str(mhi_cntrl->dev_state)); mutex_unlock(&mhi_cntrl->pm_mutex); } @@ -602,7 +614,6 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) mhi_cntrl->regs, MHICTRL, MHICTRL_RESET_MASK, - MHICTRL_RESET_SHIFT, &in_reset) || !in_reset, timeout); if (!ret || in_reset) { @@ -689,7 +700,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) exit_sys_error_transition: dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + mhi_state_str(mhi_cntrl->dev_state)); mutex_unlock(&mhi_cntrl->pm_mutex); } @@ -864,7 +875,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { dev_err(dev, "Did not enter M3 state, MHI state: %s, PM state: %s\n", - TO_MHI_STATE_STR(mhi_cntrl->dev_state), + mhi_state_str(mhi_cntrl->dev_state), to_mhi_pm_state_str(mhi_cntrl->pm_state)); return -EIO; } @@ -890,7 +901,7 @@ static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force) dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + mhi_state_str(mhi_cntrl->dev_state)); if (mhi_cntrl->pm_state == MHI_PM_DISABLE) return 0; @@ -900,7 +911,7 @@ static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force) if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) { dev_warn(dev, "Resuming from non M3 state (%s)\n", - TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + mhi_state_str(mhi_get_mhi_state(mhi_cntrl))); if (!force) return -EINVAL; } @@ -937,7 +948,7 @@ static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force) if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { dev_err(dev, "Did not enter M0 state, MHI state: %s, PM state: %s\n", - TO_MHI_STATE_STR(mhi_cntrl->dev_state), + mhi_state_str(mhi_cntrl->dev_state), to_mhi_pm_state_str(mhi_cntrl->pm_state)); return -EIO; } @@ -1088,13 +1099,12 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) state = mhi_get_mhi_state(mhi_cntrl); dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n", - TO_MHI_EXEC_STR(current_ee), TO_MHI_STATE_STR(state)); + TO_MHI_EXEC_STR(current_ee), mhi_state_str(state)); if (state == MHI_STATE_SYS_ERR) { mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, - interval_us); + MHICTRL_RESET_MASK, 0, interval_us); if (ret) { dev_info(dev, "Failed to reset MHI due to syserr state\n"); goto error_exit; diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index 626dedd110cb..fca0d0669aa9 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -351,6 +351,7 @@ phys_addr_t __weak mips_cdmm_phys_base(void) np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm"); if (np) { err = of_address_to_resource(np, 0, &res); + of_node_put(np); if (!err) return res.start; } diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index fd87a59837fa..5eb0fe73ddc4 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -815,7 +815,7 @@ static int moxtet_probe(struct spi_device *spi) return 0; } -static int moxtet_remove(struct spi_device *spi) +static void moxtet_remove(struct spi_device *spi) { struct moxtet *moxtet = spi_get_drvdata(spi); @@ -828,8 +828,6 @@ static int moxtet_remove(struct spi_device *spi) device_for_each_child(moxtet->dev, NULL, __unregister); mutex_destroy(&moxtet->lock); - - return 0; } static const struct of_device_id moxtet_dt_ids[] = { diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c new file mode 100644 index 000000000000..eedeb29a5ff3 --- /dev/null +++ b/drivers/bus/qcom-ssc-block-bus.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2021, Michael Srba + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_clock.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/reset.h> + +/* AXI Halt Register Offsets */ +#define AXI_HALTREQ_REG 0x0 +#define AXI_HALTACK_REG 0x4 +#define AXI_IDLE_REG 0x8 + +#define SSCAON_CONFIG0_CLAMP_EN_OVRD BIT(4) +#define SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL BIT(5) + +static const char *const qcom_ssc_block_pd_names[] = { + "ssc_cx", + "ssc_mx" +}; + +struct qcom_ssc_block_bus_data { + const char *const *pd_names; + struct device *pds[ARRAY_SIZE(qcom_ssc_block_pd_names)]; + char __iomem *reg_mpm_sscaon_config0; + char __iomem *reg_mpm_sscaon_config1; + struct regmap *halt_map; + struct clk *xo_clk; + struct clk *aggre2_clk; + struct clk *gcc_im_sleep_clk; + struct clk *aggre2_north_clk; + struct clk *ssc_xo_clk; + struct clk *ssc_ahbs_clk; + struct reset_control *ssc_bcr; + struct reset_control *ssc_reset; + u32 ssc_axi_halt; + int num_pds; +}; + +static void reg32_set_bits(char __iomem *reg, u32 value) +{ + u32 tmp = ioread32(reg); + + iowrite32(tmp | value, reg); +} + +static void reg32_clear_bits(char __iomem *reg, u32 value) +{ + u32 tmp = ioread32(reg); + + iowrite32(tmp & (~value), reg); +} + +static int qcom_ssc_block_bus_init(struct device *dev) +{ + int ret; + + struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev); + + ret = clk_prepare_enable(data->xo_clk); + if (ret) { + dev_err(dev, "error enabling xo_clk: %d\n", ret); + goto err_xo_clk; + } + + ret = clk_prepare_enable(data->aggre2_clk); + if (ret) { + dev_err(dev, "error enabling aggre2_clk: %d\n", ret); + goto err_aggre2_clk; + } + + ret = clk_prepare_enable(data->gcc_im_sleep_clk); + if (ret) { + dev_err(dev, "error enabling gcc_im_sleep_clk: %d\n", ret); + goto err_gcc_im_sleep_clk; + } + + /* + * We need to intervene here because the HW logic driving these signals cannot handle + * initialization after power collapse by itself. + */ + reg32_clear_bits(data->reg_mpm_sscaon_config0, + SSCAON_CONFIG0_CLAMP_EN_OVRD | SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL); + /* override few_ack/rest_ack */ + reg32_clear_bits(data->reg_mpm_sscaon_config1, BIT(31)); + + ret = clk_prepare_enable(data->aggre2_north_clk); + if (ret) { + dev_err(dev, "error enabling aggre2_north_clk: %d\n", ret); + goto err_aggre2_north_clk; + } + + ret = reset_control_deassert(data->ssc_reset); + if (ret) { + dev_err(dev, "error deasserting ssc_reset: %d\n", ret); + goto err_ssc_reset; + } + + ret = reset_control_deassert(data->ssc_bcr); + if (ret) { + dev_err(dev, "error deasserting ssc_bcr: %d\n", ret); + goto err_ssc_bcr; + } + + regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 0); + + ret = clk_prepare_enable(data->ssc_xo_clk); + if (ret) { + dev_err(dev, "error deasserting ssc_xo_clk: %d\n", ret); + goto err_ssc_xo_clk; + } + + ret = clk_prepare_enable(data->ssc_ahbs_clk); + if (ret) { + dev_err(dev, "error deasserting ssc_ahbs_clk: %d\n", ret); + goto err_ssc_ahbs_clk; + } + + return 0; + +err_ssc_ahbs_clk: + clk_disable(data->ssc_xo_clk); + +err_ssc_xo_clk: + regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1); + + reset_control_assert(data->ssc_bcr); + +err_ssc_bcr: + reset_control_assert(data->ssc_reset); + +err_ssc_reset: + clk_disable(data->aggre2_north_clk); + +err_aggre2_north_clk: + reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5)); + reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31)); + + clk_disable(data->gcc_im_sleep_clk); + +err_gcc_im_sleep_clk: + clk_disable(data->aggre2_clk); + +err_aggre2_clk: + clk_disable(data->xo_clk); + +err_xo_clk: + return ret; +} + +static void qcom_ssc_block_bus_deinit(struct device *dev) +{ + int ret; + + struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev); + + clk_disable(data->ssc_xo_clk); + clk_disable(data->ssc_ahbs_clk); + + ret = reset_control_assert(data->ssc_bcr); + if (ret) + dev_err(dev, "error asserting ssc_bcr: %d\n", ret); + + regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1); + + reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31)); + reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5)); + + ret = reset_control_assert(data->ssc_reset); + if (ret) + dev_err(dev, "error asserting ssc_reset: %d\n", ret); + + clk_disable(data->gcc_im_sleep_clk); + + clk_disable(data->aggre2_north_clk); + + clk_disable(data->aggre2_clk); + clk_disable(data->xo_clk); +} + +static int qcom_ssc_block_bus_pds_attach(struct device *dev, struct device **pds, + const char *const *pd_names, size_t num_pds) +{ + int ret; + int i; + + for (i = 0; i < num_pds; i++) { + pds[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); + if (IS_ERR_OR_NULL(pds[i])) { + ret = PTR_ERR(pds[i]) ? : -ENODATA; + goto unroll_attach; + } + } + + return num_pds; + +unroll_attach: + for (i--; i >= 0; i--) + dev_pm_domain_detach(pds[i], false); + + return ret; +}; + +static void qcom_ssc_block_bus_pds_detach(struct device *dev, struct device **pds, size_t num_pds) +{ + int i; + + for (i = 0; i < num_pds; i++) + dev_pm_domain_detach(pds[i], false); +} + +static int qcom_ssc_block_bus_pds_enable(struct device **pds, size_t num_pds) +{ + int ret; + int i; + + for (i = 0; i < num_pds; i++) { + dev_pm_genpd_set_performance_state(pds[i], INT_MAX); + ret = pm_runtime_get_sync(pds[i]); + if (ret < 0) + goto unroll_pd_votes; + } + + return 0; + +unroll_pd_votes: + for (i--; i >= 0; i--) { + dev_pm_genpd_set_performance_state(pds[i], 0); + pm_runtime_put(pds[i]); + } + + return ret; +}; + +static void qcom_ssc_block_bus_pds_disable(struct device **pds, size_t num_pds) +{ + int i; + + for (i = 0; i < num_pds; i++) { + dev_pm_genpd_set_performance_state(pds[i], 0); + pm_runtime_put(pds[i]); + } +} + +static int qcom_ssc_block_bus_probe(struct platform_device *pdev) +{ + struct qcom_ssc_block_bus_data *data; + struct device_node *np = pdev->dev.of_node; + struct of_phandle_args halt_args; + struct resource *res; + int ret; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + platform_set_drvdata(pdev, data); + + data->pd_names = qcom_ssc_block_pd_names; + data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names); + + /* power domains */ + ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n"); + + ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n"); + + /* low level overrides for when the HW logic doesn't "just work" */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0"); + data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->reg_mpm_sscaon_config0)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config0), + "Failed to ioremap mpm_sscaon_config0\n"); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config1"); + data->reg_mpm_sscaon_config1 = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->reg_mpm_sscaon_config1)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config1), + "Failed to ioremap mpm_sscaon_config1\n"); + + /* resets */ + data->ssc_bcr = devm_reset_control_get_exclusive(&pdev->dev, "ssc_bcr"); + if (IS_ERR(data->ssc_bcr)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_bcr), + "Failed to acquire reset: scc_bcr\n"); + + data->ssc_reset = devm_reset_control_get_exclusive(&pdev->dev, "ssc_reset"); + if (IS_ERR(data->ssc_reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_reset), + "Failed to acquire reset: ssc_reset:\n"); + + /* clocks */ + data->xo_clk = devm_clk_get(&pdev->dev, "xo"); + if (IS_ERR(data->xo_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->xo_clk), + "Failed to get clock: xo\n"); + + data->aggre2_clk = devm_clk_get(&pdev->dev, "aggre2"); + if (IS_ERR(data->aggre2_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_clk), + "Failed to get clock: aggre2\n"); + + data->gcc_im_sleep_clk = devm_clk_get(&pdev->dev, "gcc_im_sleep"); + if (IS_ERR(data->gcc_im_sleep_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->gcc_im_sleep_clk), + "Failed to get clock: gcc_im_sleep\n"); + + data->aggre2_north_clk = devm_clk_get(&pdev->dev, "aggre2_north"); + if (IS_ERR(data->aggre2_north_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_north_clk), + "Failed to get clock: aggre2_north\n"); + + data->ssc_xo_clk = devm_clk_get(&pdev->dev, "ssc_xo"); + if (IS_ERR(data->ssc_xo_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_xo_clk), + "Failed to get clock: ssc_xo\n"); + + data->ssc_ahbs_clk = devm_clk_get(&pdev->dev, "ssc_ahbs"); + if (IS_ERR(data->ssc_ahbs_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_ahbs_clk), + "Failed to get clock: ssc_ahbs\n"); + + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, "qcom,halt-regs", 1, 0, + &halt_args); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Failed to parse qcom,halt-regs\n"); + + data->halt_map = syscon_node_to_regmap(halt_args.np); + of_node_put(halt_args.np); + if (IS_ERR(data->halt_map)) + return PTR_ERR(data->halt_map); + + data->ssc_axi_halt = halt_args.args[0]; + + qcom_ssc_block_bus_init(&pdev->dev); + + of_platform_populate(np, NULL, NULL, &pdev->dev); + + return 0; +} + +static int qcom_ssc_block_bus_remove(struct platform_device *pdev) +{ + struct qcom_ssc_block_bus_data *data = platform_get_drvdata(pdev); + + qcom_ssc_block_bus_deinit(&pdev->dev); + + iounmap(data->reg_mpm_sscaon_config0); + iounmap(data->reg_mpm_sscaon_config1); + + qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds); + qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds); + pm_runtime_disable(&pdev->dev); + pm_clk_destroy(&pdev->dev); + + return 0; +} + +static const struct of_device_id qcom_ssc_block_bus_of_match[] = { + { .compatible = "qcom,ssc-block-bus", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match); + +static struct platform_driver qcom_ssc_block_bus_driver = { + .probe = qcom_ssc_block_bus_probe, + .remove = qcom_ssc_block_bus_remove, + .driver = { + .name = "qcom-ssc-block-bus", + .of_match_table = qcom_ssc_block_bus_of_match, + }, +}; + +module_platform_driver(qcom_ssc_block_bus_driver); + +MODULE_DESCRIPTION("A driver for handling the init sequence needed for accessing the SSC block on (some) qcom SoCs over AHB"); +MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 4566e730ef2b..60b082fe2ed0 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -227,6 +227,8 @@ static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb, dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev)); + return rdev; + err_device_add: put_device(&rdev->dev); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 54c0ee6dda30..9a7d12332fad 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3049,7 +3049,7 @@ static const struct soc_device_attribute sysc_soc_match[] = { SOC_FLAG("AM43*", SOC_AM4), SOC_FLAG("DRA7*", SOC_DRA7), - { /* sentinel */ }, + { /* sentinel */ } }; /* @@ -3070,7 +3070,7 @@ static const struct soc_device_attribute sysc_soc_feat_match[] = { SOC_FLAG("OMAP3615/AM3715", DIS_IVA), SOC_FLAG("OMAP3621", DIS_ISP), - { /* sentinel */ }, + { /* sentinel */ } }; static int sysc_add_disabled(unsigned long base) @@ -3232,13 +3232,27 @@ static int sysc_check_disabled_devices(struct sysc *ddata) */ static int sysc_check_active_timer(struct sysc *ddata) { + int error; + if (ddata->cap->type != TI_SYSC_OMAP2_TIMER && ddata->cap->type != TI_SYSC_OMAP4_TIMER) return 0; + /* + * Quirk for omap3 beagleboard revision A to B4 to use gpt12. + * Revision C and later are fixed with commit 23885389dbbb ("ARM: + * dts: Fix timer regression for beagleboard revision c"). This all + * can be dropped if we stop supporting old beagleboard revisions + * A to B4 at some point. + */ + if (sysc_soc->soc == SOC_3430) + error = -ENXIO; + else + error = -EBUSY; + if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) - return -ENXIO; + return error; return 0; } @@ -3381,7 +3395,9 @@ static int sysc_remove(struct platform_device *pdev) struct sysc *ddata = platform_get_drvdata(pdev); int error; - cancel_delayed_work_sync(&ddata->idle_work); + /* Device can still be enabled, see deferred idle quirk in probe */ + if (cancel_delayed_work_sync(&ddata->idle_work)) + ti_sysc_idle(&ddata->idle_work.work); error = pm_runtime_resume_and_get(ddata->dev); if (error < 0) { |