aboutsummaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig34
-rw-r--r--drivers/char/Makefile3
-rw-r--r--drivers/char/hpet.c6
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/amd-rng.c2
-rw-r--r--drivers/char/hw_random/core.c38
-rw-r--r--drivers/char/hw_random/exynos-trng.c7
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c53
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c3
-rw-r--r--drivers/char/hw_random/omap-rng.c6
-rw-r--r--drivers/char/hw_random/pseries-rng.c2
-rw-r--r--drivers/char/ipmi/Kconfig27
-rw-r--r--drivers/char/ipmi/Makefile2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c22
-rw-r--r--drivers/char/ipmi/kcs_bmc.c505
-rw-r--r--drivers/char/ipmi/kcs_bmc.h92
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c633
-rw-r--r--drivers/char/ipmi/kcs_bmc_cdev_ipmi.c568
-rw-r--r--drivers/char/ipmi/kcs_bmc_client.h45
-rw-r--r--drivers/char/ipmi/kcs_bmc_device.h22
-rw-r--r--drivers/char/ipmi/kcs_bmc_npcm7xx.c92
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c157
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c7
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c3
-rw-r--r--drivers/char/pcmcia/scr24x_cs.c1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c24
-rw-r--r--drivers/char/powernv-op-panel.c1
-rw-r--r--drivers/char/raw.c362
-rw-r--r--drivers/char/tpm/tpm1-cmd.c4
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c2
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c8
-rw-r--r--drivers/char/tpm/tpm_tis.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c25
-rw-r--r--drivers/char/tpm/tpm_tis_core.h3
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c4
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c14
-rw-r--r--drivers/char/ttyprintk.c52
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/xillybus/Kconfig22
-rw-r--r--drivers/char/xillybus/Makefile2
-rw-r--r--drivers/char/xillybus/xillybus.h10
-rw-r--r--drivers/char/xillybus/xillybus_class.c262
-rw-r--r--drivers/char/xillybus/xillybus_class.h30
-rw-r--r--drivers/char/xillybus/xillybus_core.c180
-rw-r--r--drivers/char/xillybus/xillybus_of.c1
-rw-r--r--drivers/char/xillybus/xillybus_pcie.c1
-rw-r--r--drivers/char/xillybus/xillyusb.c2259
50 files changed, 4228 insertions, 1394 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b151e0fcdeb5..ea3ead00f30f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -218,19 +218,6 @@ config XILINX_HWICAP
If unsure, say N.
-config R3964
- tristate "Siemens R3964 line discipline"
- depends on TTY && BROKEN
- help
- This driver allows synchronous communication with devices using the
- Siemens R3964 packet protocol. Unless you are dealing with special
- hardware like PLCs, you are unlikely to need this.
-
- To compile this driver as a module, choose M here: the
- module will be called n_r3964.
-
- If unsure, say N.
-
config APPLICOM
tristate "Applicom intelligent fieldbus card support"
depends on PCI
@@ -357,27 +344,6 @@ config NVRAM
To compile this driver as a module, choose M here: the
module will be called nvram.
-config RAW_DRIVER
- tristate "RAW driver (/dev/raw/rawN)"
- depends on BLOCK
- help
- The raw driver permits block devices to be bound to /dev/raw/rawN.
- Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
- See the raw(8) manpage for more details.
-
- Applications should preferably open the device (eg /dev/hda1)
- with the O_DIRECT flag.
-
-config MAX_RAW_DEVS
- int "Maximum number of RAW devices to support (1-65536)"
- depends on RAW_DRIVER
- range 1 65536
- default "256"
- help
- The maximum number of RAW devices that are supported.
- Default is 256. Increase this number in case you need lots of
- raw devices.
-
config DEVPORT
bool "/dev/port character device"
depends on ISA || PCI
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index ffce287ef415..264eb398fdd4 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
-obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
obj-$(CONFIG_IBM_BSR) += bsr.o
@@ -44,6 +43,6 @@ obj-$(CONFIG_TCG_TPM) += tpm/
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
-obj-$(CONFIG_XILLYBUS) += xillybus/
+obj-$(CONFIG_XILLYBUS_CLASS) += xillybus/
obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
obj-$(CONFIG_ADI) += adi.o
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index ed3b7dab678d..4e5431f01450 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -156,12 +156,12 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
* This has the effect of treating non-periodic like periodic.
*/
if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
- unsigned long m, t, mc, base, k;
+ unsigned long t, mc, base, k;
struct hpet __iomem *hpet = devp->hd_hpet;
struct hpets *hpetp = devp->hd_hpets;
t = devp->hd_ireqfreq;
- m = read_counter(&devp->hd_timer->hpet_compare);
+ read_counter(&devp->hd_timer->hpet_compare);
mc = read_counter(&hpet->hpet_mc);
/* The time for the next interrupt would logically be t + m,
* however, if we are very unlucky and the interrupt is delayed
@@ -984,6 +984,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
HPET_RANGE_SIZE);
+ if (!hdp->hd_address)
+ return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 1fe006f3f12f..3f166c8a4099 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -152,7 +152,7 @@ config HW_RANDOM_VIA
config HW_RANDOM_IXP4XX
tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Pseudo-Random
@@ -165,17 +165,17 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3
default HW_RANDOM
help
- This driver provides kernel-side support for the Random Number
+ This driver provides kernel-side support for the Random Number
Generator hardware found on OMAP16xx, OMAP2/3/4/5, AM33xx/AM43xx
multimedia processors, and Marvell Armada 7k/8k SoCs.
To compile this driver as a module, choose M here: the
module will be called omap-rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_OMAP3_ROM
tristate "OMAP3 ROM Random Number Generator support"
@@ -485,13 +485,13 @@ config HW_RANDOM_NPCM
depends on ARCH_NPCM || COMPILE_TEST
default HW_RANDOM
help
- This driver provides support for the Random Number
+ This driver provides support for the Random Number
Generator hardware available in Nuvoton NPCM SoCs.
To compile this driver as a module, choose M here: the
module will be called npcm-rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_KEYSTONE
depends on ARCH_KEYSTONE || COMPILE_TEST
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 9959c762da2f..d8d4ef5214a1 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -126,7 +126,7 @@ static struct hwrng amd_rng = {
static int __init mod_init(void)
{
- int err = -ENODEV;
+ int err;
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
u32 pmbase;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index adb3c2bd7783..a3db27916256 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -319,11 +319,11 @@ static int enable_best_rng(void)
return ret;
}
-static ssize_t hwrng_attr_current_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t rng_current_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
- int err = -ENODEV;
+ int err;
struct hwrng *rng, *old_rng, *new_rng;
err = mutex_lock_interruptible(&rng_mutex);
@@ -354,9 +354,9 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
return err ? : len;
}
-static ssize_t hwrng_attr_current_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rng_current_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
ssize_t ret;
struct hwrng *rng;
@@ -371,9 +371,9 @@ static ssize_t hwrng_attr_current_show(struct device *dev,
return ret;
}
-static ssize_t hwrng_attr_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rng_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int err;
struct hwrng *rng;
@@ -392,22 +392,16 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
return strlen(buf);
}
-static ssize_t hwrng_attr_selected_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rng_selected_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
-static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
- hwrng_attr_current_show,
- hwrng_attr_current_store);
-static DEVICE_ATTR(rng_available, S_IRUGO,
- hwrng_attr_available_show,
- NULL);
-static DEVICE_ATTR(rng_selected, S_IRUGO,
- hwrng_attr_selected_show,
- NULL);
+static DEVICE_ATTR_RW(rng_current);
+static DEVICE_ATTR_RO(rng_available);
+static DEVICE_ATTR_RO(rng_selected);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 8e1fe3f8dd2d..9cc3d542dd0f 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -132,7 +132,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
return PTR_ERR(trng->mem);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Could not get runtime PM.\n");
goto err_pm_get;
@@ -165,7 +165,7 @@ err_register:
clk_disable_unprepare(trng->clk);
err_clock:
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
err_pm_get:
pm_runtime_disable(&pdev->dev);
@@ -196,10 +196,9 @@ static int __maybe_unused exynos_trng_resume(struct device *dev)
{
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Could not get runtime PM.\n");
- pm_runtime_put_noidle(dev);
return ret;
}
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
index beec1627db3c..188854dd16a9 100644
--- a/drivers/char/hw_random/ixp4xx-rng.c
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/char/hw_random/ixp4xx-rng.c
*
@@ -8,23 +9,20 @@
* Copyright 2005 (c) MontaVista Software, Inc.
*
* Fixes by Michael Buesch
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/hw_random.h>
+#include <linux/of.h>
+#include <linux/soc/ixp4xx/cpu.h>
#include <asm/io.h>
-#include <mach/hardware.h>
-
static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer)
{
@@ -40,35 +38,40 @@ static struct hwrng ixp4xx_rng_ops = {
.data_read = ixp4xx_rng_data_read,
};
-static int __init ixp4xx_rng_init(void)
+static int ixp4xx_rng_probe(struct platform_device *pdev)
{
void __iomem * rng_base;
- int err;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
if (!cpu_is_ixp46x()) /* includes IXP455 */
return -ENOSYS;
- rng_base = ioremap(0x70002100, 4);
- if (!rng_base)
- return -ENOMEM;
- ixp4xx_rng_ops.priv = (unsigned long)rng_base;
- err = hwrng_register(&ixp4xx_rng_ops);
- if (err)
- iounmap(rng_base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(rng_base))
+ return PTR_ERR(rng_base);
- return err;
+ ixp4xx_rng_ops.priv = (unsigned long)rng_base;
+ return devm_hwrng_register(dev, &ixp4xx_rng_ops);
}
-static void __exit ixp4xx_rng_exit(void)
-{
- void __iomem * rng_base = (void __iomem *)ixp4xx_rng_ops.priv;
-
- hwrng_unregister(&ixp4xx_rng_ops);
- iounmap(rng_base);
-}
+static const struct of_device_id ixp4xx_rng_of_match[] = {
+ {
+ .compatible = "intel,ixp46x-rng",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ixp4xx_rng_of_match);
-module_init(ixp4xx_rng_init);
-module_exit(ixp4xx_rng_exit);
+static struct platform_driver ixp4xx_rng_driver = {
+ .driver = {
+ .name = "ixp4xx-hwrandom",
+ .of_match_table = ixp4xx_rng_of_match,
+ },
+ .probe = ixp4xx_rng_probe,
+};
+module_platform_driver(ixp4xx_rng_driver);
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x");
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 8f1d47ff9799..2f2f21f1b659 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -241,10 +241,9 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Failed to enable SA power-domain\n");
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index cede9f159102..00ff96703dd2 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -454,10 +454,9 @@ static int omap_rng_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
- pm_runtime_put_noidle(&pdev->dev);
goto err_ioremap;
}
@@ -543,10 +542,9 @@ static int __maybe_unused omap_rng_resume(struct device *dev)
struct omap_rng_dev *priv = dev_get_drvdata(dev);
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Failed to runtime_get device: %d\n", ret);
- pm_runtime_put_noidle(dev);
return ret;
}
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index f4949b689bd5..62bdd5af1339 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -29,7 +29,7 @@ static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
return 8;
}
-/**
+/*
* pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations
*
* This is a required function for a driver to operate in a CMO environment
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 07847d9a459a..249b31197eea 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -124,6 +124,33 @@ config NPCM7XX_KCS_IPMI_BMC
This support is also available as a module. If so, the module
will be called kcs_bmc_npcm7xx.
+config IPMI_KCS_BMC_CDEV_IPMI
+ depends on IPMI_KCS_BMC
+ tristate "IPMI character device interface for BMC KCS devices"
+ help
+ Provides a BMC-side character device implementing IPMI
+ semantics for KCS IPMI devices.
+
+ Say YES if you wish to expose KCS devices on the BMC for IPMI
+ purposes.
+
+ This support is also available as a module. The module will be
+ called kcs_bmc_cdev_ipmi.
+
+config IPMI_KCS_BMC_SERIO
+ depends on IPMI_KCS_BMC && SERIO
+ tristate "SerIO adaptor for BMC KCS devices"
+ help
+ Adapts the BMC KCS device for the SerIO subsystem. This allows users
+ to take advantage of userspace interfaces provided by SerIO where
+ appropriate.
+
+ Say YES if you wish to expose KCS devices on the BMC via SerIO
+ interfaces.
+
+ This support is also available as a module. The module will be
+ called kcs_bmc_serio.
+
config ASPEED_BT_IPMI_BMC
depends on ARCH_ASPEED || COMPILE_TEST
depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 0822adc2ec41..84f47d18007f 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -23,6 +23,8 @@ obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
obj-$(CONFIG_IPMI_KCS_BMC) += kcs_bmc.o
+obj-$(CONFIG_IPMI_KCS_BMC_SERIO) += kcs_bmc_serio.o
+obj-$(CONFIG_IPMI_KCS_BMC_CDEV_IPMI) += kcs_bmc_cdev_ipmi.o
obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
obj-$(CONFIG_ASPEED_KCS_IPMI_BMC) += kcs_bmc_aspeed.o
obj-$(CONFIG_NPCM7XX_KCS_IPMI_BMC) += kcs_bmc_npcm7xx.o
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8a0e97b33cae..e96cb5c4f97a 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/panic_notifier.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 32c334e34d55..e4ff3b50de7f 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -371,16 +371,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
data[0] = 0;
WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
- if ((ipmi_version_major > 1)
- || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
- /* This is an IPMI 1.5-only feature. */
- data[0] |= WDOG_DONT_STOP_ON_SET;
- } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
- /*
- * In ipmi 1.0, setting the timer stops the watchdog, we
- * need to start it back up again.
- */
- hbnow = 1;
+ if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ if ((ipmi_version_major > 1) ||
+ ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
+ /* This is an IPMI 1.5-only feature. */
+ data[0] |= WDOG_DONT_STOP_ON_SET;
+ } else {
+ /*
+ * In ipmi 1.0, setting the timer stops the watchdog, we
+ * need to start it back up again.
+ */
+ hbnow = 1;
+ }
}
data[1] = 0;
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index f292e74bd4a5..03d02a848f3a 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -1,458 +1,189 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015-2018, Intel Corporation.
+ * Copyright (c) 2021, IBM Corp.
*/
-#define pr_fmt(fmt) "kcs-bmc: " fmt
-
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/ipmi_bmc.h>
+#include <linux/device.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
+#include <linux/mutex.h>
#include "kcs_bmc.h"
-#define DEVICE_NAME "ipmi-kcs"
-
-#define KCS_MSG_BUFSIZ 1000
-
-#define KCS_ZERO_DATA 0
+/* Implement both the device and client interfaces here */
+#include "kcs_bmc_device.h"
+#include "kcs_bmc_client.h"
+/* Record registered devices and drivers */
+static DEFINE_MUTEX(kcs_bmc_lock);
+static LIST_HEAD(kcs_bmc_devices);
+static LIST_HEAD(kcs_bmc_drivers);
-/* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
-#define KCS_STATUS_STATE(state) (state << 6)
-#define KCS_STATUS_STATE_MASK GENMASK(7, 6)
-#define KCS_STATUS_CMD_DAT BIT(3)
-#define KCS_STATUS_SMS_ATN BIT(2)
-#define KCS_STATUS_IBF BIT(1)
-#define KCS_STATUS_OBF BIT(0)
+/* Consumer data access */
-/* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
-enum kcs_states {
- IDLE_STATE = 0,
- READ_STATE = 1,
- WRITE_STATE = 2,
- ERROR_STATE = 3,
-};
-
-/* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
-#define KCS_CMD_GET_STATUS_ABORT 0x60
-#define KCS_CMD_WRITE_START 0x61
-#define KCS_CMD_WRITE_END 0x62
-#define KCS_CMD_READ_BYTE 0x68
-
-static inline u8 read_data(struct kcs_bmc *kcs_bmc)
+u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc)
{
- return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr);
+ return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr);
}
+EXPORT_SYMBOL(kcs_bmc_read_data);
-static inline void write_data(struct kcs_bmc *kcs_bmc, u8 data)
+void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data)
{
- kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data);
+ kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data);
}
+EXPORT_SYMBOL(kcs_bmc_write_data);
-static inline u8 read_status(struct kcs_bmc *kcs_bmc)
+u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc)
{
- return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.str);
+ return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.str);
}
+EXPORT_SYMBOL(kcs_bmc_read_status);
-static inline void write_status(struct kcs_bmc *kcs_bmc, u8 data)
+void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data)
{
- kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data);
+ kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data);
}
+EXPORT_SYMBOL(kcs_bmc_write_status);
-static void update_status_bits(struct kcs_bmc *kcs_bmc, u8 mask, u8 val)
+void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val)
{
- u8 tmp = read_status(kcs_bmc);
-
- tmp &= ~mask;
- tmp |= val & mask;
-
- write_status(kcs_bmc, tmp);
+ kcs_bmc->ops->io_updateb(kcs_bmc, kcs_bmc->ioreg.str, mask, val);
}
+EXPORT_SYMBOL(kcs_bmc_update_status);
-static inline void set_state(struct kcs_bmc *kcs_bmc, u8 state)
+irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc)
{
- update_status_bits(kcs_bmc, KCS_STATUS_STATE_MASK,
- KCS_STATUS_STATE(state));
-}
+ struct kcs_bmc_client *client;
+ irqreturn_t rc = IRQ_NONE;
-static void kcs_force_abort(struct kcs_bmc *kcs_bmc)
-{
- set_state(kcs_bmc, ERROR_STATE);
- read_data(kcs_bmc);
- write_data(kcs_bmc, KCS_ZERO_DATA);
+ spin_lock(&kcs_bmc->lock);
+ client = kcs_bmc->client;
+ if (client)
+ rc = client->ops->event(client);
+ spin_unlock(&kcs_bmc->lock);
- kcs_bmc->phase = KCS_PHASE_ERROR;
- kcs_bmc->data_in_avail = false;
- kcs_bmc->data_in_idx = 0;
-}
-
-static void kcs_bmc_handle_data(struct kcs_bmc *kcs_bmc)
-{
- u8 data;
-
- switch (kcs_bmc->phase) {
- case KCS_PHASE_WRITE_START:
- kcs_bmc->phase = KCS_PHASE_WRITE_DATA;
- fallthrough;
-
- case KCS_PHASE_WRITE_DATA:
- if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
- set_state(kcs_bmc, WRITE_STATE);
- write_data(kcs_bmc, KCS_ZERO_DATA);
- kcs_bmc->data_in[kcs_bmc->data_in_idx++] =
- read_data(kcs_bmc);
- } else {
- kcs_force_abort(kcs_bmc);
- kcs_bmc->error = KCS_LENGTH_ERROR;
- }
- break;
-
- case KCS_PHASE_WRITE_END_CMD:
- if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
- set_state(kcs_bmc, READ_STATE);
- kcs_bmc->data_in[kcs_bmc->data_in_idx++] =
- read_data(kcs_bmc);
- kcs_bmc->phase = KCS_PHASE_WRITE_DONE;
- kcs_bmc->data_in_avail = true;
- wake_up_interruptible(&kcs_bmc->queue);
- } else {
- kcs_force_abort(kcs_bmc);
- kcs_bmc->error = KCS_LENGTH_ERROR;
- }
- break;
-
- case KCS_PHASE_READ:
- if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len)
- set_state(kcs_bmc, IDLE_STATE);
-
- data = read_data(kcs_bmc);
- if (data != KCS_CMD_READ_BYTE) {
- set_state(kcs_bmc, ERROR_STATE);
- write_data(kcs_bmc, KCS_ZERO_DATA);
- break;
- }
-
- if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len) {
- write_data(kcs_bmc, KCS_ZERO_DATA);
- kcs_bmc->phase = KCS_PHASE_IDLE;
- break;
- }
-
- write_data(kcs_bmc,
- kcs_bmc->data_out[kcs_bmc->data_out_idx++]);
- break;
-
- case KCS_PHASE_ABORT_ERROR1:
- set_state(kcs_bmc, READ_STATE);
- read_data(kcs_bmc);
- write_data(kcs_bmc, kcs_bmc->error);
- kcs_bmc->phase = KCS_PHASE_ABORT_ERROR2;
- break;
-
- case KCS_PHASE_ABORT_ERROR2:
- set_state(kcs_bmc, IDLE_STATE);
- read_data(kcs_bmc);
- write_data(kcs_bmc, KCS_ZERO_DATA);
- kcs_bmc->phase = KCS_PHASE_IDLE;
- break;
-
- default:
- kcs_force_abort(kcs_bmc);
- break;
- }
-}
-
-static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
-{
- u8 cmd;
-
- set_state(kcs_bmc, WRITE_STATE);
- write_data(kcs_bmc, KCS_ZERO_DATA);
-
- cmd = read_data(kcs_bmc);
- switch (cmd) {
- case KCS_CMD_WRITE_START:
- kcs_bmc->phase = KCS_PHASE_WRITE_START;
- kcs_bmc->error = KCS_NO_ERROR;
- kcs_bmc->data_in_avail = false;
- kcs_bmc->data_in_idx = 0;
- break;
-
- case KCS_CMD_WRITE_END:
- if (kcs_bmc->phase != KCS_PHASE_WRITE_DATA) {
- kcs_force_abort(kcs_bmc);
- break;
- }
-
- kcs_bmc->phase = KCS_PHASE_WRITE_END_CMD;
- break;
-
- case KCS_CMD_GET_STATUS_ABORT:
- if (kcs_bmc->error == KCS_NO_ERROR)
- kcs_bmc->error = KCS_ABORTED_BY_COMMAND;
-
- kcs_bmc->phase = KCS_PHASE_ABORT_ERROR1;
- kcs_bmc->data_in_avail = false;
- kcs_bmc->data_in_idx = 0;
- break;
-
- default:
- kcs_force_abort(kcs_bmc);
- kcs_bmc->error = KCS_ILLEGAL_CONTROL_CODE;
- break;
- }
-}
-
-int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
-{
- unsigned long flags;
- int ret = -ENODATA;
- u8 status;
-
- spin_lock_irqsave(&kcs_bmc->lock, flags);
-
- status = read_status(kcs_bmc);
- if (status & KCS_STATUS_IBF) {
- if (!kcs_bmc->running)
- kcs_force_abort(kcs_bmc);
- else if (status & KCS_STATUS_CMD_DAT)
- kcs_bmc_handle_cmd(kcs_bmc);
- else
- kcs_bmc_handle_data(kcs_bmc);
-
- ret = 0;
- }
-
- spin_unlock_irqrestore(&kcs_bmc->lock, flags);
-
- return ret;
+ return rc;
}
EXPORT_SYMBOL(kcs_bmc_handle_event);
-static inline struct kcs_bmc *to_kcs_bmc(struct file *filp)
-{
- return container_of(filp->private_data, struct kcs_bmc, miscdev);
-}
-
-static int kcs_bmc_open(struct inode *inode, struct file *filp)
+int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- int ret = 0;
+ int rc;
spin_lock_irq(&kcs_bmc->lock);
- if (!kcs_bmc->running)
- kcs_bmc->running = 1;
- else
- ret = -EBUSY;
- spin_unlock_irq(&kcs_bmc->lock);
-
- return ret;
-}
-
-static __poll_t kcs_bmc_poll(struct file *filp, poll_table *wait)
-{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- __poll_t mask = 0;
-
- poll_wait(filp, &kcs_bmc->queue, wait);
+ if (kcs_bmc->client) {
+ rc = -EBUSY;
+ } else {
+ u8 mask = KCS_BMC_EVENT_TYPE_IBF;
- spin_lock_irq(&kcs_bmc->lock);
- if (kcs_bmc->data_in_avail)
- mask |= EPOLLIN;
+ kcs_bmc->client = client;
+ kcs_bmc_update_event_mask(kcs_bmc, mask, mask);
+ rc = 0;
+ }
spin_unlock_irq(&kcs_bmc->lock);
- return mask;
+ return rc;
}
+EXPORT_SYMBOL(kcs_bmc_enable_device);
-static ssize_t kcs_bmc_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
+void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- bool data_avail;
- size_t data_len;
- ssize_t ret;
-
- if (!(filp->f_flags & O_NONBLOCK))
- wait_event_interruptible(kcs_bmc->queue,
- kcs_bmc->data_in_avail);
-
- mutex_lock(&kcs_bmc->mutex);
-
spin_lock_irq(&kcs_bmc->lock);
- data_avail = kcs_bmc->data_in_avail;
- if (data_avail) {
- data_len = kcs_bmc->data_in_idx;
- memcpy(kcs_bmc->kbuffer, kcs_bmc->data_in, data_len);
- }
- spin_unlock_irq(&kcs_bmc->lock);
-
- if (!data_avail) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- if (count < data_len) {
- pr_err("channel=%u with too large data : %zu\n",
- kcs_bmc->channel, data_len);
-
- spin_lock_irq(&kcs_bmc->lock);
- kcs_force_abort(kcs_bmc);
- spin_unlock_irq(&kcs_bmc->lock);
+ if (client == kcs_bmc->client) {
+ u8 mask = KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE;
- ret = -EOVERFLOW;
- goto out_unlock;
- }
-
- if (copy_to_user(buf, kcs_bmc->kbuffer, data_len)) {
- ret = -EFAULT;
- goto out_unlock;
- }
-
- ret = data_len;
-
- spin_lock_irq(&kcs_bmc->lock);
- if (kcs_bmc->phase == KCS_PHASE_WRITE_DONE) {
- kcs_bmc->phase = KCS_PHASE_WAIT_READ;
- kcs_bmc->data_in_avail = false;
- kcs_bmc->data_in_idx = 0;
- } else {
- ret = -EAGAIN;
+ kcs_bmc_update_event_mask(kcs_bmc, mask, 0);
+ kcs_bmc->client = NULL;
}
spin_unlock_irq(&kcs_bmc->lock);
-
-out_unlock:
- mutex_unlock(&kcs_bmc->mutex);
-
- return ret;
}
+EXPORT_SYMBOL(kcs_bmc_disable_device);
-static ssize_t kcs_bmc_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
+int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- ssize_t ret;
-
- /* a minimum response size '3' : netfn + cmd + ccode */
- if (count < 3 || count > KCS_MSG_BUFSIZ)
- return -EINVAL;
-
- mutex_lock(&kcs_bmc->mutex);
-
- if (copy_from_user(kcs_bmc->kbuffer, buf, count)) {
- ret = -EFAULT;
- goto out_unlock;
- }
+ struct kcs_bmc_driver *drv;
+ int error = 0;
+ int rc;
- spin_lock_irq(&kcs_bmc->lock);
- if (kcs_bmc->phase == KCS_PHASE_WAIT_READ) {
- kcs_bmc->phase = KCS_PHASE_READ;
- kcs_bmc->data_out_idx = 1;
- kcs_bmc->data_out_len = count;
- memcpy(kcs_bmc->data_out, kcs_bmc->kbuffer, count);
- write_data(kcs_bmc, kcs_bmc->data_out[0]);
- ret = count;
- } else {
- ret = -EINVAL;
+ spin_lock_init(&kcs_bmc->lock);
+ kcs_bmc->client = NULL;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_add(&kcs_bmc->entry, &kcs_bmc_devices);
+ list_for_each_entry(drv, &kcs_bmc_drivers, entry) {
+ rc = drv->ops->add_device(kcs_bmc);
+ if (!rc)
+ continue;
+
+ dev_err(kcs_bmc->dev, "Failed to add chardev for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ error = rc;
}
- spin_unlock_irq(&kcs_bmc->lock);
-
-out_unlock:
- mutex_unlock(&kcs_bmc->mutex);
+ mutex_unlock(&kcs_bmc_lock);
- return ret;
+ return error;
}
+EXPORT_SYMBOL(kcs_bmc_add_device);
-static long kcs_bmc_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- long ret = 0;
+ struct kcs_bmc_driver *drv;
+ int rc;
- spin_lock_irq(&kcs_bmc->lock);
-
- switch (cmd) {
- case IPMI_BMC_IOCTL_SET_SMS_ATN:
- update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN,
- KCS_STATUS_SMS_ATN);
- break;
-
- case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
- update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN,
- 0);
- break;
-
- case IPMI_BMC_IOCTL_FORCE_ABORT:
- kcs_force_abort(kcs_bmc);
- break;
-
- default:
- ret = -EINVAL;
- break;
+ mutex_lock(&kcs_bmc_lock);
+ list_del(&kcs_bmc->entry);
+ list_for_each_entry(drv, &kcs_bmc_drivers, entry) {
+ rc = drv->ops->remove_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to remove chardev for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
}
-
- spin_unlock_irq(&kcs_bmc->lock);
-
- return ret;
+ mutex_unlock(&kcs_bmc_lock);
}
+EXPORT_SYMBOL(kcs_bmc_remove_device);
-static int kcs_bmc_release(struct inode *inode, struct file *filp)
+void kcs_bmc_register_driver(struct kcs_bmc_driver *drv)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
-
- spin_lock_irq(&kcs_bmc->lock);
- kcs_bmc->running = 0;
- kcs_force_abort(kcs_bmc);
- spin_unlock_irq(&kcs_bmc->lock);
+ struct kcs_bmc_device *kcs_bmc;
+ int rc;
- return 0;
+ mutex_lock(&kcs_bmc_lock);
+ list_add(&drv->entry, &kcs_bmc_drivers);
+ list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) {
+ rc = drv->ops->add_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to add driver for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
}
+EXPORT_SYMBOL(kcs_bmc_register_driver);
-static const struct file_operations kcs_bmc_fops = {
- .owner = THIS_MODULE,
- .open = kcs_bmc_open,
- .read = kcs_bmc_read,
- .write = kcs_bmc_write,
- .release = kcs_bmc_release,
- .poll = kcs_bmc_poll,
- .unlocked_ioctl = kcs_bmc_ioctl,
-};
-
-struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
+void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv)
{
- struct kcs_bmc *kcs_bmc;
-
- kcs_bmc = devm_kzalloc(dev, sizeof(*kcs_bmc) + sizeof_priv, GFP_KERNEL);
- if (!kcs_bmc)
- return NULL;
-
- spin_lock_init(&kcs_bmc->lock);
- kcs_bmc->channel = channel;
+ struct kcs_bmc_device *kcs_bmc;
+ int rc;
- mutex_init(&kcs_bmc->mutex);
- init_waitqueue_head(&kcs_bmc->queue);
-
- kcs_bmc->data_in = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
- kcs_bmc->data_out = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
- kcs_bmc->kbuffer = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
-
- kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
- kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
- DEVICE_NAME, channel);
- if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer ||
- !kcs_bmc->miscdev.name)
- return NULL;
- kcs_bmc->miscdev.fops = &kcs_bmc_fops;
+ mutex_lock(&kcs_bmc_lock);
+ list_del(&drv->entry);
+ list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) {
+ rc = drv->ops->remove_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to remove driver for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
+}
+EXPORT_SYMBOL(kcs_bmc_unregister_driver);
- return kcs_bmc;
+void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events)
+{
+ kcs_bmc->ops->irq_mask_update(kcs_bmc, mask, events);
}
-EXPORT_SYMBOL(kcs_bmc_alloc);
+EXPORT_SYMBOL(kcs_bmc_update_event_mask);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
diff --git a/drivers/char/ipmi/kcs_bmc.h b/drivers/char/ipmi/kcs_bmc.h
index eb9ea4ce78b8..fa408b802c79 100644
--- a/drivers/char/ipmi/kcs_bmc.h
+++ b/drivers/char/ipmi/kcs_bmc.h
@@ -6,54 +6,14 @@
#ifndef __KCS_BMC_H__
#define __KCS_BMC_H__
-#include <linux/miscdevice.h>
+#include <linux/list.h>
-/* Different phases of the KCS BMC module.
- * KCS_PHASE_IDLE:
- * BMC should not be expecting nor sending any data.
- * KCS_PHASE_WRITE_START:
- * BMC is receiving a WRITE_START command from system software.
- * KCS_PHASE_WRITE_DATA:
- * BMC is receiving a data byte from system software.
- * KCS_PHASE_WRITE_END_CMD:
- * BMC is waiting a last data byte from system software.
- * KCS_PHASE_WRITE_DONE:
- * BMC has received the whole request from system software.
- * KCS_PHASE_WAIT_READ:
- * BMC is waiting the response from the upper IPMI service.
- * KCS_PHASE_READ:
- * BMC is transferring the response to system software.
- * KCS_PHASE_ABORT_ERROR1:
- * BMC is waiting error status request from system software.
- * KCS_PHASE_ABORT_ERROR2:
- * BMC is waiting for idle status afer error from system software.
- * KCS_PHASE_ERROR:
- * BMC has detected a protocol violation at the interface level.
- */
-enum kcs_phases {
- KCS_PHASE_IDLE,
-
- KCS_PHASE_WRITE_START,
- KCS_PHASE_WRITE_DATA,
- KCS_PHASE_WRITE_END_CMD,
- KCS_PHASE_WRITE_DONE,
+#define KCS_BMC_EVENT_TYPE_OBE BIT(0)
+#define KCS_BMC_EVENT_TYPE_IBF BIT(1)
- KCS_PHASE_WAIT_READ,
- KCS_PHASE_READ,
-
- KCS_PHASE_ABORT_ERROR1,
- KCS_PHASE_ABORT_ERROR2,
- KCS_PHASE_ERROR
-};
-
-/* IPMI 2.0 - Table 9-4, KCS Interface Status Codes */
-enum kcs_errors {
- KCS_NO_ERROR = 0x00,
- KCS_ABORTED_BY_COMMAND = 0x01,
- KCS_ILLEGAL_CONTROL_CODE = 0x02,
- KCS_LENGTH_ERROR = 0x06,
- KCS_UNSPECIFIED_ERROR = 0xFF
-};
+#define KCS_BMC_STR_OBF BIT(0)
+#define KCS_BMC_STR_IBF BIT(1)
+#define KCS_BMC_STR_CMD_DAT BIT(3)
/* IPMI 2.0 - 9.5, KCS Interface Registers
* @idr: Input Data Register
@@ -66,43 +26,21 @@ struct kcs_ioreg {
u32 str;
};
-struct kcs_bmc {
- spinlock_t lock;
+struct kcs_bmc_device_ops;
+struct kcs_bmc_client;
+struct kcs_bmc_device {
+ struct list_head entry;
+
+ struct device *dev;
u32 channel;
- int running;
- /* Setup by BMC KCS controller driver */
struct kcs_ioreg ioreg;
- u8 (*io_inputb)(struct kcs_bmc *kcs_bmc, u32 reg);
- void (*io_outputb)(struct kcs_bmc *kcs_bmc, u32 reg, u8 b);
-
- enum kcs_phases phase;
- enum kcs_errors error;
-
- wait_queue_head_t queue;
- bool data_in_avail;
- int data_in_idx;
- u8 *data_in;
-
- int data_out_idx;
- int data_out_len;
- u8 *data_out;
- struct mutex mutex;
- u8 *kbuffer;
+ const struct kcs_bmc_device_ops *ops;
- struct miscdevice miscdev;
-
- unsigned long priv[];
+ spinlock_t lock;
+ struct kcs_bmc_client *client;
};
-static inline void *kcs_bmc_priv(struct kcs_bmc *kcs_bmc)
-{
- return kcs_bmc->priv;
-}
-
-int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc);
-struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv,
- u32 channel);
#endif /* __KCS_BMC_H__ */
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index eefe362f65f0..92a37b33494c 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -9,10 +9,12 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/regmap.h>
@@ -20,24 +22,53 @@
#include <linux/slab.h>
#include <linux/timer.h>
-#include "kcs_bmc.h"
+#include "kcs_bmc_device.h"
#define DEVICE_NAME "ast-kcs-bmc"
#define KCS_CHANNEL_MAX 4
+/*
+ * Field class descriptions
+ *
+ * LPCyE Enable LPC channel y
+ * IBFIEy Input Buffer Full IRQ Enable for LPC channel y
+ * IRQxEy Assert SerIRQ x for LPC channel y (Deprecated, use IDyIRQX, IRQXEy)
+ * IDyIRQX Use the specified 4-bit SerIRQ for LPC channel y
+ * SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1)
+ * IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y
+ */
+
+#define LPC_TYIRQX_LOW 0b00
+#define LPC_TYIRQX_HIGH 0b01
+#define LPC_TYIRQX_RSVD 0b10
+#define LPC_TYIRQX_RISING 0b11
+
#define LPC_HICR0 0x000
#define LPC_HICR0_LPC3E BIT(7)
#define LPC_HICR0_LPC2E BIT(6)
#define LPC_HICR0_LPC1E BIT(5)
#define LPC_HICR2 0x008
-#define LPC_HICR2_IBFIF3 BIT(3)
-#define LPC_HICR2_IBFIF2 BIT(2)
-#define LPC_HICR2_IBFIF1 BIT(1)
+#define LPC_HICR2_IBFIE3 BIT(3)
+#define LPC_HICR2_IBFIE2 BIT(2)
+#define LPC_HICR2_IBFIE1 BIT(1)
#define LPC_HICR4 0x010
#define LPC_HICR4_LADR12AS BIT(7)
#define LPC_HICR4_KCSENBL BIT(2)
+#define LPC_SIRQCR0 0x070
+/* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */
+#define LPC_SIRQCR0_IRQ12E1 BIT(1)
+#define LPC_SIRQCR0_IRQ1E1 BIT(0)
+#define LPC_HICR5 0x080
+#define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20)
+#define LPC_HICR5_ID3IRQX_SHIFT 20
+#define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16)
+#define LPC_HICR5_ID2IRQX_SHIFT 16
+#define LPC_HICR5_SEL3IRQX BIT(15)
+#define LPC_HICR5_IRQXE3 BIT(14)
+#define LPC_HICR5_SEL2IRQX BIT(13)
+#define LPC_HICR5_IRQXE2 BIT(12)
#define LPC_LADR3H 0x014
#define LPC_LADR3L 0x018
#define LPC_LADR12H 0x01C
@@ -52,21 +83,64 @@
#define LPC_STR2 0x040
#define LPC_STR3 0x044
#define LPC_HICRB 0x100
-#define LPC_HICRB_IBFIF4 BIT(1)
+#define LPC_HICRB_EN16LADR2 BIT(5)
+#define LPC_HICRB_EN16LADR1 BIT(4)
+#define LPC_HICRB_IBFIE4 BIT(1)
#define LPC_HICRB_LPC4E BIT(0)
+#define LPC_HICRC 0x104
+#define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4)
+#define LPC_HICRC_ID4IRQX_SHIFT 4
+#define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2)
+#define LPC_HICRC_TY4IRQX_SHIFT 2
+#define LPC_HICRC_OBF4_AUTO_CLR BIT(1)
+#define LPC_HICRC_IRQXE4 BIT(0)
#define LPC_LADR4 0x110
#define LPC_IDR4 0x114
#define LPC_ODR4 0x118
#define LPC_STR4 0x11C
+#define LPC_LSADR12 0x120
+#define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16)
+#define LPC_LSADR12_LSADR2_SHIFT 16
+#define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0)
+#define LPC_LSADR12_LSADR1_SHIFT 0
+
+#define OBE_POLL_PERIOD (HZ / 2)
+
+enum aspeed_kcs_irq_mode {
+ aspeed_kcs_irq_none,
+ aspeed_kcs_irq_serirq,
+};
struct aspeed_kcs_bmc {
+ struct kcs_bmc_device kcs_bmc;
+
struct regmap *map;
+
+ struct {
+ enum aspeed_kcs_irq_mode mode;
+ int id;
+ } upstream_irq;
+
+ struct {
+ spinlock_t lock;
+ bool remove;
+ struct timer_list timer;
+ } obe;
};
+struct aspeed_kcs_of_ops {
+ int (*get_channel)(struct platform_device *pdev);
+ int (*get_io_address)(struct platform_device *pdev, u32 addrs[2]);
+};
-static u8 aspeed_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
+static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
{
- struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc);
+}
+
+static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
u32 val = 0;
int rc;
@@ -76,15 +150,66 @@ static u8 aspeed_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
return rc == 0 ? (u8) val : 0;
}
-static void aspeed_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data)
+static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
{
- struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
int rc;
rc = regmap_write(priv->map, reg, data);
WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+
+ /* Trigger the upstream IRQ on ODR writes, if enabled */
+
+ switch (reg) {
+ case LPC_ODR1:
+ case LPC_ODR2:
+ case LPC_ODR3:
+ case LPC_ODR4:
+ break;
+ default:
+ return;
+ }
+
+ if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq)
+ return;
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ switch (priv->upstream_irq.id) {
+ case 12:
+ regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1,
+ LPC_SIRQCR0_IRQ12E1);
+ break;
+ case 1:
+ regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1,
+ LPC_SIRQCR0_IRQ1E1);
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2);
+ break;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3);
+ break;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4);
+ break;
+ default:
+ break;
+ }
}
+static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_update_bits(priv->map, reg, mask, val);
+ WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
+}
/*
* AST_usrGuide_KCS.pdf
@@ -99,118 +224,237 @@ static void aspeed_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data)
* C. KCS4
* D / C : CA4h / CA5h
*/
-static void aspeed_kcs_set_address(struct kcs_bmc *kcs_bmc, u16 addr)
+static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
{
- struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
- switch (kcs_bmc->channel) {
+ if (WARN_ON(nr_addrs < 1 || nr_addrs > 2))
+ return -EINVAL;
+
+ switch (priv->kcs_bmc.channel) {
case 1:
- regmap_update_bits(priv->map, LPC_HICR4,
- LPC_HICR4_LADR12AS, 0);
- regmap_write(priv->map, LPC_LADR12H, addr >> 8);
- regmap_write(priv->map, LPC_LADR12L, addr & 0xFF);
+ regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0);
+ regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
+ if (nr_addrs == 2) {
+ regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK,
+ addrs[1] << LPC_LSADR12_LSADR1_SHIFT);
+
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1,
+ LPC_HICRB_EN16LADR1);
+ }
break;
case 2:
- regmap_update_bits(priv->map, LPC_HICR4,
- LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
- regmap_write(priv->map, LPC_LADR12H, addr >> 8);
- regmap_write(priv->map, LPC_LADR12L, addr & 0xFF);
+ regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
+ regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
+ if (nr_addrs == 2) {
+ regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK,
+ addrs[1] << LPC_LSADR12_LSADR2_SHIFT);
+
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2,
+ LPC_HICRB_EN16LADR2);
+ }
break;
case 3:
- regmap_write(priv->map, LPC_LADR3H, addr >> 8);
- regmap_write(priv->map, LPC_LADR3L, addr & 0xFF);
+ if (nr_addrs == 2) {
+ dev_err(priv->kcs_bmc.dev,
+ "Channel 3 only supports inferred status IO address\n");
+ return -EINVAL;
+ }
+
+ regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF);
break;
case 4:
- regmap_write(priv->map, LPC_LADR4, ((addr + 1) << 16) |
- addr);
+ if (nr_addrs == 1)
+ regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]);
+ else
+ regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]);
+
break;
default:
- break;
+ return -EINVAL;
}
+
+ return 0;
}
-static void aspeed_kcs_enable_channel(struct kcs_bmc *kcs_bmc, bool enable)
+static inline int aspeed_kcs_map_serirq_type(u32 dt_type)
{
- struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ switch (dt_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ return LPC_TYIRQX_RISING;
+ case IRQ_TYPE_LEVEL_HIGH:
+ return LPC_TYIRQX_HIGH;
+ case IRQ_TYPE_LEVEL_LOW:
+ return LPC_TYIRQX_LOW;
+ default:
+ return -EINVAL;
+ }
+}
- switch (kcs_bmc->channel) {
+static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type)
+{
+ unsigned int mask, val, hw_type;
+ int ret;
+
+ if (id > 15)
+ return -EINVAL;
+
+ ret = aspeed_kcs_map_serirq_type(dt_type);
+ if (ret < 0)
+ return ret;
+ hw_type = ret;
+
+ priv->upstream_irq.mode = aspeed_kcs_irq_serirq;
+ priv->upstream_irq.id = id;
+
+ switch (priv->kcs_bmc.channel) {
case 1:
- if (enable) {
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF1, LPC_HICR2_IBFIF1);
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC1E, LPC_HICR0_LPC1E);
- } else {
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC1E, 0);
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF1, 0);
- }
+ /* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */
break;
-
case 2:
- if (enable) {
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF2, LPC_HICR2_IBFIF2);
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC2E, LPC_HICR0_LPC2E);
- } else {
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC2E, 0);
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF2, 0);
- }
- break;
+ if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
+ return -EINVAL;
+
+ mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK;
+ val = (id << LPC_HICR5_ID2IRQX_SHIFT);
+ val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0;
+ regmap_update_bits(priv->map, LPC_HICR5, mask, val);
- case 3:
- if (enable) {
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF3, LPC_HICR2_IBFIF3);
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC3E, LPC_HICR0_LPC3E);
- regmap_update_bits(priv->map, LPC_HICR4,
- LPC_HICR4_KCSENBL, LPC_HICR4_KCSENBL);
- } else {
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC3E, 0);
- regmap_update_bits(priv->map, LPC_HICR4,
- LPC_HICR4_KCSENBL, 0);
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF3, 0);
- }
break;
+ case 3:
+ if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
+ return -EINVAL;
+
+ mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK;
+ val = (id << LPC_HICR5_ID3IRQX_SHIFT);
+ val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0;
+ regmap_update_bits(priv->map, LPC_HICR5, mask, val);
+ break;
case 4:
- if (enable)
- regmap_update_bits(priv->map, LPC_HICRB,
- LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E,
- LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E);
- else
- regmap_update_bits(priv->map, LPC_HICRB,
- LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E,
- 0);
+ mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR;
+ val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT);
+ regmap_update_bits(priv->map, LPC_HICRC, mask, val);
break;
+ default:
+ dev_warn(priv->kcs_bmc.dev,
+ "SerIRQ configuration not supported on KCS channel %d\n",
+ priv->kcs_bmc.channel);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ switch (kcs_bmc->channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E);
+ return;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E);
+ return;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E);
+ regmap_update_bits(priv->map, LPC_HICR4,
+ LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL);
+ return;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E);
+ return;
default:
- break;
+ pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
+ return;
}
}
-static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
+static void aspeed_kcs_check_obe(struct timer_list *timer)
+{
+ struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer);
+ unsigned long flags;
+ u8 str;
+
+ spin_lock_irqsave(&priv->obe.lock, flags);
+ if (priv->obe.remove) {
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+ return;
+ }
+
+ str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
+ if (str & KCS_BMC_STR_OBF) {
+ mod_timer(timer, jiffies + OBE_POLL_PERIOD);
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+
+ kcs_bmc_handle_event(&priv->kcs_bmc);
+}
+
+static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
{
- struct kcs_bmc *kcs_bmc = arg;
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
- if (!kcs_bmc_handle_event(kcs_bmc))
- return IRQ_HANDLED;
+ /* We don't have an OBE IRQ, emulate it */
+ if (mask & KCS_BMC_EVENT_TYPE_OBE) {
+ if (KCS_BMC_EVENT_TYPE_OBE & state)
+ mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
+ else
+ del_timer(&priv->obe.timer);
+ }
- return IRQ_NONE;
+ if (mask & KCS_BMC_EVENT_TYPE_IBF) {
+ const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF);
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1,
+ enable * LPC_HICR2_IBFIE1);
+ return;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2,
+ enable * LPC_HICR2_IBFIE2);
+ return;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3,
+ enable * LPC_HICR2_IBFIE3);
+ return;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4,
+ enable * LPC_HICRB_IBFIE4);
+ return;
+ default:
+ pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
+ return;
+ }
+ }
}
-static int aspeed_kcs_config_irq(struct kcs_bmc *kcs_bmc,
+static const struct kcs_bmc_device_ops aspeed_kcs_ops = {
+ .irq_mask_update = aspeed_kcs_irq_mask_update,
+ .io_inputb = aspeed_kcs_inb,
+ .io_outputb = aspeed_kcs_outb,
+ .io_updateb = aspeed_kcs_updateb,
+};
+
+static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
+{
+ struct kcs_bmc_device *kcs_bmc = arg;
+
+ return kcs_bmc_handle_event(kcs_bmc);
+}
+
+static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -231,13 +475,10 @@ static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
{ .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
};
-static struct kcs_bmc *aspeed_kcs_probe_of_v1(struct platform_device *pdev)
+static int aspeed_kcs_of_v1_get_channel(struct platform_device *pdev)
{
- struct aspeed_kcs_bmc *priv;
struct device_node *np;
- struct kcs_bmc *kcs;
u32 channel;
- u32 slave;
int rc;
np = pdev->dev.of_node;
@@ -245,166 +486,213 @@ static struct kcs_bmc *aspeed_kcs_probe_of_v1(struct platform_device *pdev)
rc = of_property_read_u32(np, "kcs_chan", &channel);
if ((rc != 0) || (channel == 0 || channel > KCS_CHANNEL_MAX)) {
dev_err(&pdev->dev, "no valid 'kcs_chan' configured\n");
- return ERR_PTR(-EINVAL);
- }
-
- kcs = kcs_bmc_alloc(&pdev->dev, sizeof(struct aspeed_kcs_bmc), channel);
- if (!kcs)
- return ERR_PTR(-ENOMEM);
-
- priv = kcs_bmc_priv(kcs);
- priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
- if (IS_ERR(priv->map)) {
- dev_err(&pdev->dev, "Couldn't get regmap\n");
- return ERR_PTR(-ENODEV);
- }
-
- rc = of_property_read_u32(np, "kcs_addr", &slave);
- if (rc) {
- dev_err(&pdev->dev, "no valid 'kcs_addr' configured\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- kcs->ioreg = ast_kcs_bmc_ioregs[channel - 1];
- aspeed_kcs_set_address(kcs, slave);
-
- return kcs;
+ return channel;
}
-static int aspeed_kcs_calculate_channel(const struct kcs_ioreg *regs)
+static int
+aspeed_kcs_of_v1_get_io_address(struct platform_device *pdev, u32 addrs[2])
{
- int i;
+ int rc;
- for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
- if (!memcmp(&ast_kcs_bmc_ioregs[i], regs, sizeof(*regs)))
- return i + 1;
+ rc = of_property_read_u32(pdev->dev.of_node, "kcs_addr", addrs);
+ if (rc || addrs[0] > 0xffff) {
+ dev_err(&pdev->dev, "no valid 'kcs_addr' configured\n");
+ return -EINVAL;
}
- return -EINVAL;
+ return 1;
}
-static struct kcs_bmc *aspeed_kcs_probe_of_v2(struct platform_device *pdev)
+static int aspeed_kcs_of_v2_get_channel(struct platform_device *pdev)
{
- struct aspeed_kcs_bmc *priv;
struct device_node *np;
struct kcs_ioreg ioreg;
- struct kcs_bmc *kcs;
const __be32 *reg;
- int channel;
- u32 slave;
- int rc;
+ int i;
np = pdev->dev.of_node;
/* Don't translate addresses, we want offsets for the regmaps */
reg = of_get_address(np, 0, NULL, NULL);
if (!reg)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ioreg.idr = be32_to_cpup(reg);
reg = of_get_address(np, 1, NULL, NULL);
if (!reg)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ioreg.odr = be32_to_cpup(reg);
reg = of_get_address(np, 2, NULL, NULL);
if (!reg)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ioreg.str = be32_to_cpup(reg);
- channel = aspeed_kcs_calculate_channel(&ioreg);
- if (channel < 0)
- return ERR_PTR(channel);
+ for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
+ if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg)))
+ return i + 1;
+ }
- kcs = kcs_bmc_alloc(&pdev->dev, sizeof(struct aspeed_kcs_bmc), channel);
- if (!kcs)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
+}
- kcs->ioreg = ioreg;
+static int
+aspeed_kcs_of_v2_get_io_address(struct platform_device *pdev, u32 addrs[2])
+{
+ int rc;
- priv = kcs_bmc_priv(kcs);
- priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
- if (IS_ERR(priv->map)) {
- dev_err(&pdev->dev, "Couldn't get regmap\n");
- return ERR_PTR(-ENODEV);
+ rc = of_property_read_variable_u32_array(pdev->dev.of_node,
+ "aspeed,lpc-io-reg",
+ addrs, 1, 2);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n");
+ return rc;
}
- rc = of_property_read_u32(np, "aspeed,lpc-io-reg", &slave);
- if (rc)
- return ERR_PTR(rc);
+ if (addrs[0] > 0xffff) {
+ dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n");
+ return -EINVAL;
+ }
- aspeed_kcs_set_address(kcs, slave);
+ if (rc == 2 && addrs[1] > 0xffff) {
+ dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n");
+ return -EINVAL;
+ }
- return kcs;
+ return rc;
}
static int aspeed_kcs_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct kcs_bmc *kcs_bmc;
+ const struct aspeed_kcs_of_ops *ops;
+ struct kcs_bmc_device *kcs_bmc;
+ struct aspeed_kcs_bmc *priv;
struct device_node *np;
- int rc;
+ bool have_upstream_irq;
+ u32 upstream_irq[2];
+ int rc, channel;
+ int nr_addrs;
+ u32 addrs[2];
- np = dev->of_node->parent;
+ np = pdev->dev.of_node->parent;
if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
!of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
!of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
- dev_err(dev, "unsupported LPC device binding\n");
+ dev_err(&pdev->dev, "unsupported LPC device binding\n");
return -ENODEV;
}
- np = dev->of_node;
- if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc") ||
- of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc"))
- kcs_bmc = aspeed_kcs_probe_of_v1(pdev);
- else if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc-v2") ||
- of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc-v2"))
- kcs_bmc = aspeed_kcs_probe_of_v2(pdev);
- else
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
return -EINVAL;
- if (IS_ERR(kcs_bmc))
- return PTR_ERR(kcs_bmc);
+ channel = ops->get_channel(pdev);
+ if (channel < 0)
+ return channel;
- kcs_bmc->io_inputb = aspeed_kcs_inb;
- kcs_bmc->io_outputb = aspeed_kcs_outb;
+ nr_addrs = ops->get_io_address(pdev, addrs);
+ if (nr_addrs < 0)
+ return nr_addrs;
- rc = aspeed_kcs_config_irq(kcs_bmc, pdev);
+ np = pdev->dev.of_node;
+ rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2);
+ if (rc && rc != -EINVAL)
+ return -EINVAL;
+
+ have_upstream_irq = !rc;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ kcs_bmc = &priv->kcs_bmc;
+ kcs_bmc->dev = &pdev->dev;
+ kcs_bmc->channel = channel;
+ kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1];
+ kcs_bmc->ops = &aspeed_kcs_ops;
+
+ priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(priv->map)) {
+ dev_err(&pdev->dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&priv->obe.lock);
+ priv->obe.remove = false;
+ timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0);
+
+ rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs);
if (rc)
return rc;
- dev_set_drvdata(dev, kcs_bmc);
+ /* Host to BMC IRQ */
+ rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev);
+ if (rc)
+ return rc;
+
+ /* BMC to Host IRQ */
+ if (have_upstream_irq) {
+ rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]);
+ if (rc < 0)
+ return rc;
+ } else {
+ priv->upstream_irq.mode = aspeed_kcs_irq_none;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
aspeed_kcs_enable_channel(kcs_bmc, true);
- rc = misc_register(&kcs_bmc->miscdev);
+ rc = kcs_bmc_add_device(&priv->kcs_bmc);
if (rc) {
- dev_err(dev, "Unable to register device\n");
+ dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
return rc;
}
- dev_dbg(&pdev->dev,
- "Probed KCS device %d (IDR=0x%x, ODR=0x%x, STR=0x%x)\n",
- kcs_bmc->channel, kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr,
- kcs_bmc->ioreg.str);
+ dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n",
+ kcs_bmc->channel, addrs[0]);
return 0;
}
static int aspeed_kcs_remove(struct platform_device *pdev)
{
- struct kcs_bmc *kcs_bmc = dev_get_drvdata(&pdev->dev);
+ struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev);
+ struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
+
+ kcs_bmc_remove_device(kcs_bmc);
- misc_deregister(&kcs_bmc->miscdev);
+ aspeed_kcs_enable_channel(kcs_bmc, false);
+ aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+
+ /* Make sure it's proper dead */
+ spin_lock_irq(&priv->obe.lock);
+ priv->obe.remove = true;
+ spin_unlock_irq(&priv->obe.lock);
+ del_timer_sync(&priv->obe.timer);
return 0;
}
+static const struct aspeed_kcs_of_ops of_v1_ops = {
+ .get_channel = aspeed_kcs_of_v1_get_channel,
+ .get_io_address = aspeed_kcs_of_v1_get_io_address,
+};
+
+static const struct aspeed_kcs_of_ops of_v2_ops = {
+ .get_channel = aspeed_kcs_of_v2_get_channel,
+ .get_io_address = aspeed_kcs_of_v2_get_io_address,
+};
+
static const struct of_device_id ast_kcs_bmc_match[] = {
- { .compatible = "aspeed,ast2400-kcs-bmc" },
- { .compatible = "aspeed,ast2500-kcs-bmc" },
- { .compatible = "aspeed,ast2400-kcs-bmc-v2" },
- { .compatible = "aspeed,ast2500-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2400-kcs-bmc", .data = &of_v1_ops },
+ { .compatible = "aspeed,ast2500-kcs-bmc", .data = &of_v1_ops },
+ { .compatible = "aspeed,ast2400-kcs-bmc-v2", .data = &of_v2_ops },
+ { .compatible = "aspeed,ast2500-kcs-bmc-v2", .data = &of_v2_ops },
{ }
};
MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
@@ -421,4 +709,5 @@ module_platform_driver(ast_kcs_bmc_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device");
diff --git a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
new file mode 100644
index 000000000000..486834a962c3
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "kcs-bmc: " fmt
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ipmi_bmc.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_client.h"
+
+/* Different phases of the KCS BMC module.
+ * KCS_PHASE_IDLE:
+ * BMC should not be expecting nor sending any data.
+ * KCS_PHASE_WRITE_START:
+ * BMC is receiving a WRITE_START command from system software.
+ * KCS_PHASE_WRITE_DATA:
+ * BMC is receiving a data byte from system software.
+ * KCS_PHASE_WRITE_END_CMD:
+ * BMC is waiting a last data byte from system software.
+ * KCS_PHASE_WRITE_DONE:
+ * BMC has received the whole request from system software.
+ * KCS_PHASE_WAIT_READ:
+ * BMC is waiting the response from the upper IPMI service.
+ * KCS_PHASE_READ:
+ * BMC is transferring the response to system software.
+ * KCS_PHASE_ABORT_ERROR1:
+ * BMC is waiting error status request from system software.
+ * KCS_PHASE_ABORT_ERROR2:
+ * BMC is waiting for idle status afer error from system software.
+ * KCS_PHASE_ERROR:
+ * BMC has detected a protocol violation at the interface level.
+ */
+enum kcs_ipmi_phases {
+ KCS_PHASE_IDLE,
+
+ KCS_PHASE_WRITE_START,
+ KCS_PHASE_WRITE_DATA,
+ KCS_PHASE_WRITE_END_CMD,
+ KCS_PHASE_WRITE_DONE,
+
+ KCS_PHASE_WAIT_READ,
+ KCS_PHASE_READ,
+
+ KCS_PHASE_ABORT_ERROR1,
+ KCS_PHASE_ABORT_ERROR2,
+ KCS_PHASE_ERROR
+};
+
+/* IPMI 2.0 - Table 9-4, KCS Interface Status Codes */
+enum kcs_ipmi_errors {
+ KCS_NO_ERROR = 0x00,
+ KCS_ABORTED_BY_COMMAND = 0x01,
+ KCS_ILLEGAL_CONTROL_CODE = 0x02,
+ KCS_LENGTH_ERROR = 0x06,
+ KCS_UNSPECIFIED_ERROR = 0xFF
+};
+
+struct kcs_bmc_ipmi {
+ struct list_head entry;
+
+ struct kcs_bmc_client client;
+
+ spinlock_t lock;
+
+ enum kcs_ipmi_phases phase;
+ enum kcs_ipmi_errors error;
+
+ wait_queue_head_t queue;
+ bool data_in_avail;
+ int data_in_idx;
+ u8 *data_in;
+
+ int data_out_idx;
+ int data_out_len;
+ u8 *data_out;
+
+ struct mutex mutex;
+ u8 *kbuffer;
+
+ struct miscdevice miscdev;
+};
+
+#define DEVICE_NAME "ipmi-kcs"
+
+#define KCS_MSG_BUFSIZ 1000
+
+#define KCS_ZERO_DATA 0
+
+/* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
+#define KCS_STATUS_STATE(state) (state << 6)
+#define KCS_STATUS_STATE_MASK GENMASK(7, 6)
+#define KCS_STATUS_CMD_DAT BIT(3)
+#define KCS_STATUS_SMS_ATN BIT(2)
+#define KCS_STATUS_IBF BIT(1)
+#define KCS_STATUS_OBF BIT(0)
+
+/* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
+enum kcs_states {
+ IDLE_STATE = 0,
+ READ_STATE = 1,
+ WRITE_STATE = 2,
+ ERROR_STATE = 3,
+};
+
+/* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
+#define KCS_CMD_GET_STATUS_ABORT 0x60
+#define KCS_CMD_WRITE_START 0x61
+#define KCS_CMD_WRITE_END 0x62
+#define KCS_CMD_READ_BYTE 0x68
+
+static inline void set_state(struct kcs_bmc_ipmi *priv, u8 state)
+{
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_STATE_MASK, KCS_STATUS_STATE(state));
+}
+
+static void kcs_bmc_ipmi_force_abort(struct kcs_bmc_ipmi *priv)
+{
+ set_state(priv, ERROR_STATE);
+ kcs_bmc_read_data(priv->client.dev);
+ kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
+
+ priv->phase = KCS_PHASE_ERROR;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+}
+
+static void kcs_bmc_ipmi_handle_data(struct kcs_bmc_ipmi *priv)
+{
+ struct kcs_bmc_device *dev;
+ u8 data;
+
+ dev = priv->client.dev;
+
+ switch (priv->phase) {
+ case KCS_PHASE_WRITE_START:
+ priv->phase = KCS_PHASE_WRITE_DATA;
+ fallthrough;
+
+ case KCS_PHASE_WRITE_DATA:
+ if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
+ set_state(priv, WRITE_STATE);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
+ } else {
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_LENGTH_ERROR;
+ }
+ break;
+
+ case KCS_PHASE_WRITE_END_CMD:
+ if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
+ set_state(priv, READ_STATE);
+ priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
+ priv->phase = KCS_PHASE_WRITE_DONE;
+ priv->data_in_avail = true;
+ wake_up_interruptible(&priv->queue);
+ } else {
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_LENGTH_ERROR;
+ }
+ break;
+
+ case KCS_PHASE_READ:
+ if (priv->data_out_idx == priv->data_out_len)
+ set_state(priv, IDLE_STATE);
+
+ data = kcs_bmc_read_data(dev);
+ if (data != KCS_CMD_READ_BYTE) {
+ set_state(priv, ERROR_STATE);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ break;
+ }
+
+ if (priv->data_out_idx == priv->data_out_len) {
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->phase = KCS_PHASE_IDLE;
+ break;
+ }
+
+ kcs_bmc_write_data(dev, priv->data_out[priv->data_out_idx++]);
+ break;
+
+ case KCS_PHASE_ABORT_ERROR1:
+ set_state(priv, READ_STATE);
+ kcs_bmc_read_data(dev);
+ kcs_bmc_write_data(dev, priv->error);
+ priv->phase = KCS_PHASE_ABORT_ERROR2;
+ break;
+
+ case KCS_PHASE_ABORT_ERROR2:
+ set_state(priv, IDLE_STATE);
+ kcs_bmc_read_data(dev);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->phase = KCS_PHASE_IDLE;
+ break;
+
+ default:
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+ }
+}
+
+static void kcs_bmc_ipmi_handle_cmd(struct kcs_bmc_ipmi *priv)
+{
+ u8 cmd;
+
+ set_state(priv, WRITE_STATE);
+ kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
+
+ cmd = kcs_bmc_read_data(priv->client.dev);
+ switch (cmd) {
+ case KCS_CMD_WRITE_START:
+ priv->phase = KCS_PHASE_WRITE_START;
+ priv->error = KCS_NO_ERROR;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ break;
+
+ case KCS_CMD_WRITE_END:
+ if (priv->phase != KCS_PHASE_WRITE_DATA) {
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+ }
+
+ priv->phase = KCS_PHASE_WRITE_END_CMD;
+ break;
+
+ case KCS_CMD_GET_STATUS_ABORT:
+ if (priv->error == KCS_NO_ERROR)
+ priv->error = KCS_ABORTED_BY_COMMAND;
+
+ priv->phase = KCS_PHASE_ABORT_ERROR1;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ break;
+
+ default:
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_ILLEGAL_CONTROL_CODE;
+ break;
+ }
+}
+
+static inline struct kcs_bmc_ipmi *client_to_kcs_bmc_ipmi(struct kcs_bmc_client *client)
+{
+ return container_of(client, struct kcs_bmc_ipmi, client);
+}
+
+static irqreturn_t kcs_bmc_ipmi_event(struct kcs_bmc_client *client)
+{
+ struct kcs_bmc_ipmi *priv;
+ u8 status;
+ int ret;
+
+ priv = client_to_kcs_bmc_ipmi(client);
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ status = kcs_bmc_read_status(client->dev);
+ if (status & KCS_STATUS_IBF) {
+ if (status & KCS_STATUS_CMD_DAT)
+ kcs_bmc_ipmi_handle_cmd(priv);
+ else
+ kcs_bmc_ipmi_handle_data(priv);
+
+ ret = IRQ_HANDLED;
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static const struct kcs_bmc_client_ops kcs_bmc_ipmi_client_ops = {
+ .event = kcs_bmc_ipmi_event,
+};
+
+static inline struct kcs_bmc_ipmi *to_kcs_bmc(struct file *filp)
+{
+ return container_of(filp->private_data, struct kcs_bmc_ipmi, miscdev);
+}
+
+static int kcs_bmc_ipmi_open(struct inode *inode, struct file *filp)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+
+ return kcs_bmc_enable_device(priv->client.dev, &priv->client);
+}
+
+static __poll_t kcs_bmc_ipmi_poll(struct file *filp, poll_table *wait)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ __poll_t mask = 0;
+
+ poll_wait(filp, &priv->queue, wait);
+
+ spin_lock_irq(&priv->lock);
+ if (priv->data_in_avail)
+ mask |= EPOLLIN;
+ spin_unlock_irq(&priv->lock);
+
+ return mask;
+}
+
+static ssize_t kcs_bmc_ipmi_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ bool data_avail;
+ size_t data_len;
+ ssize_t ret;
+
+ if (!(filp->f_flags & O_NONBLOCK))
+ wait_event_interruptible(priv->queue,
+ priv->data_in_avail);
+
+ mutex_lock(&priv->mutex);
+
+ spin_lock_irq(&priv->lock);
+ data_avail = priv->data_in_avail;
+ if (data_avail) {
+ data_len = priv->data_in_idx;
+ memcpy(priv->kbuffer, priv->data_in, data_len);
+ }
+ spin_unlock_irq(&priv->lock);
+
+ if (!data_avail) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (count < data_len) {
+ pr_err("channel=%u with too large data : %zu\n",
+ priv->client.dev->channel, data_len);
+
+ spin_lock_irq(&priv->lock);
+ kcs_bmc_ipmi_force_abort(priv);
+ spin_unlock_irq(&priv->lock);
+
+ ret = -EOVERFLOW;
+ goto out_unlock;
+ }
+
+ if (copy_to_user(buf, priv->kbuffer, data_len)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ ret = data_len;
+
+ spin_lock_irq(&priv->lock);
+ if (priv->phase == KCS_PHASE_WRITE_DONE) {
+ priv->phase = KCS_PHASE_WAIT_READ;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ } else {
+ ret = -EAGAIN;
+ }
+ spin_unlock_irq(&priv->lock);
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static ssize_t kcs_bmc_ipmi_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ ssize_t ret;
+
+ /* a minimum response size '3' : netfn + cmd + ccode */
+ if (count < 3 || count > KCS_MSG_BUFSIZ)
+ return -EINVAL;
+
+ mutex_lock(&priv->mutex);
+
+ if (copy_from_user(priv->kbuffer, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ spin_lock_irq(&priv->lock);
+ if (priv->phase == KCS_PHASE_WAIT_READ) {
+ priv->phase = KCS_PHASE_READ;
+ priv->data_out_idx = 1;
+ priv->data_out_len = count;
+ memcpy(priv->data_out, priv->kbuffer, count);
+ kcs_bmc_write_data(priv->client.dev, priv->data_out[0]);
+ ret = count;
+ } else {
+ ret = -EINVAL;
+ }
+ spin_unlock_irq(&priv->lock);
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static long kcs_bmc_ipmi_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ long ret = 0;
+
+ spin_lock_irq(&priv->lock);
+
+ switch (cmd) {
+ case IPMI_BMC_IOCTL_SET_SMS_ATN:
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, KCS_STATUS_SMS_ATN);
+ break;
+
+ case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, 0);
+ break;
+
+ case IPMI_BMC_IOCTL_FORCE_ABORT:
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+static int kcs_bmc_ipmi_release(struct inode *inode, struct file *filp)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+
+ kcs_bmc_ipmi_force_abort(priv);
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+
+ return 0;
+}
+
+static const struct file_operations kcs_bmc_ipmi_fops = {
+ .owner = THIS_MODULE,
+ .open = kcs_bmc_ipmi_open,
+ .read = kcs_bmc_ipmi_read,
+ .write = kcs_bmc_ipmi_write,
+ .release = kcs_bmc_ipmi_release,
+ .poll = kcs_bmc_ipmi_poll,
+ .unlocked_ioctl = kcs_bmc_ipmi_ioctl,
+};
+
+static DEFINE_SPINLOCK(kcs_bmc_ipmi_instances_lock);
+static LIST_HEAD(kcs_bmc_ipmi_instances);
+
+static int kcs_bmc_ipmi_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_ipmi *priv;
+ int rc;
+
+ priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ mutex_init(&priv->mutex);
+
+ init_waitqueue_head(&priv->queue);
+
+ priv->client.dev = kcs_bmc;
+ priv->client.ops = &kcs_bmc_ipmi_client_ops;
+ priv->data_in = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ priv->data_out = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ priv->kbuffer = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = devm_kasprintf(kcs_bmc->dev, GFP_KERNEL, "%s%u", DEVICE_NAME,
+ kcs_bmc->channel);
+ if (!priv->data_in || !priv->data_out || !priv->kbuffer || !priv->miscdev.name)
+ return -EINVAL;
+
+ priv->miscdev.fops = &kcs_bmc_ipmi_fops;
+
+ rc = misc_register(&priv->miscdev);
+ if (rc) {
+ dev_err(kcs_bmc->dev, "Unable to register device: %d\n", rc);
+ return rc;
+ }
+
+ spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
+ list_add(&priv->entry, &kcs_bmc_ipmi_instances);
+ spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
+
+ dev_info(kcs_bmc->dev, "Initialised IPMI client for channel %d", kcs_bmc->channel);
+
+ return 0;
+}
+
+static int kcs_bmc_ipmi_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_ipmi *priv = NULL, *pos;
+
+ spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
+ list_for_each_entry(pos, &kcs_bmc_ipmi_instances, entry) {
+ if (pos->client.dev == kcs_bmc) {
+ priv = pos;
+ list_del(&pos->entry);
+ break;
+ }
+ }
+ spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
+
+ if (!priv)
+ return -ENODEV;
+
+ misc_deregister(&priv->miscdev);
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+ devm_kfree(kcs_bmc->dev, priv->kbuffer);
+ devm_kfree(kcs_bmc->dev, priv->data_out);
+ devm_kfree(kcs_bmc->dev, priv->data_in);
+ devm_kfree(kcs_bmc->dev, priv);
+
+ return 0;
+}
+
+static const struct kcs_bmc_driver_ops kcs_bmc_ipmi_driver_ops = {
+ .add_device = kcs_bmc_ipmi_add_device,
+ .remove_device = kcs_bmc_ipmi_remove_device,
+};
+
+static struct kcs_bmc_driver kcs_bmc_ipmi_driver = {
+ .ops = &kcs_bmc_ipmi_driver_ops,
+};
+
+static int kcs_bmc_ipmi_init(void)
+{
+ kcs_bmc_register_driver(&kcs_bmc_ipmi_driver);
+
+ return 0;
+}
+module_init(kcs_bmc_ipmi_init);
+
+static void kcs_bmc_ipmi_exit(void)
+{
+ kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver);
+}
+module_exit(kcs_bmc_ipmi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
diff --git a/drivers/char/ipmi/kcs_bmc_client.h b/drivers/char/ipmi/kcs_bmc_client.h
new file mode 100644
index 000000000000..6fdcde0a7169
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_client.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, IBM Corp. */
+
+#ifndef __KCS_BMC_CONSUMER_H__
+#define __KCS_BMC_CONSUMER_H__
+
+#include <linux/irqreturn.h>
+
+#include "kcs_bmc.h"
+
+struct kcs_bmc_driver_ops {
+ int (*add_device)(struct kcs_bmc_device *kcs_bmc);
+ int (*remove_device)(struct kcs_bmc_device *kcs_bmc);
+};
+
+struct kcs_bmc_driver {
+ struct list_head entry;
+
+ const struct kcs_bmc_driver_ops *ops;
+};
+
+struct kcs_bmc_client_ops {
+ irqreturn_t (*event)(struct kcs_bmc_client *client);
+};
+
+struct kcs_bmc_client {
+ const struct kcs_bmc_client_ops *ops;
+
+ struct kcs_bmc_device *dev;
+};
+
+void kcs_bmc_register_driver(struct kcs_bmc_driver *drv);
+void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv);
+
+int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
+void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
+
+void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events);
+
+u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data);
+u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data);
+void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val);
+#endif
diff --git a/drivers/char/ipmi/kcs_bmc_device.h b/drivers/char/ipmi/kcs_bmc_device.h
new file mode 100644
index 000000000000..17c572f25c54
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_device.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, IBM Corp. */
+
+#ifndef __KCS_BMC_DEVICE_H__
+#define __KCS_BMC_DEVICE_H__
+
+#include <linux/irqreturn.h>
+
+#include "kcs_bmc.h"
+
+struct kcs_bmc_device_ops {
+ void (*irq_mask_update)(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 enable);
+ u8 (*io_inputb)(struct kcs_bmc_device *kcs_bmc, u32 reg);
+ void (*io_outputb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 b);
+ void (*io_updateb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 b);
+};
+
+irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc);
+int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc);
+
+#endif
diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
index 722f7391fe1f..7961fec56476 100644
--- a/drivers/char/ipmi/kcs_bmc_npcm7xx.c
+++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
@@ -17,7 +17,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include "kcs_bmc.h"
+#include "kcs_bmc_device.h"
#define DEVICE_NAME "npcm-kcs-bmc"
#define KCS_CHANNEL_MAX 3
@@ -38,6 +38,7 @@
#define KCS2CTL 0x2A
#define KCS3CTL 0x3C
#define KCS_CTL_IBFIE BIT(0)
+#define KCS_CTL_OBEIE BIT(1)
#define KCS1IE 0x1C
#define KCS2IE 0x2E
@@ -65,6 +66,8 @@ struct npcm7xx_kcs_reg {
};
struct npcm7xx_kcs_bmc {
+ struct kcs_bmc_device kcs_bmc;
+
struct regmap *map;
const struct npcm7xx_kcs_reg *reg;
@@ -76,9 +79,14 @@ static const struct npcm7xx_kcs_reg npcm7xx_kcs_reg_tbl[KCS_CHANNEL_MAX] = {
{ .sts = KCS3ST, .dob = KCS3DO, .dib = KCS3DI, .ctl = KCS3CTL, .ie = KCS3IE },
};
-static u8 npcm7xx_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
+static inline struct npcm7xx_kcs_bmc *to_npcm7xx_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
{
- struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ return container_of(kcs_bmc, struct npcm7xx_kcs_bmc, kcs_bmc);
+}
+
+static u8 npcm7xx_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
u32 val = 0;
int rc;
@@ -88,37 +96,53 @@ static u8 npcm7xx_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
return rc == 0 ? (u8)val : 0;
}
-static void npcm7xx_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data)
+static void npcm7xx_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
{
- struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
int rc;
rc = regmap_write(priv->map, reg, data);
WARN(rc != 0, "regmap_write() failed: %d\n", rc);
}
-static void npcm7xx_kcs_enable_channel(struct kcs_bmc *kcs_bmc, bool enable)
+static void npcm7xx_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 data)
{
- struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+ int rc;
- regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
- enable ? KCS_CTL_IBFIE : 0);
+ rc = regmap_update_bits(priv->map, reg, mask, data);
+ WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
+}
+
+static void npcm7xx_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE,
enable ? KCS_IE_IRQE | KCS_IE_HIRQE : 0);
}
-static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg)
+static void npcm7xx_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
{
- struct kcs_bmc *kcs_bmc = arg;
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+
+ if (mask & KCS_BMC_EVENT_TYPE_OBE)
+ regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_OBEIE,
+ !!(state & KCS_BMC_EVENT_TYPE_OBE) * KCS_CTL_OBEIE);
- if (!kcs_bmc_handle_event(kcs_bmc))
- return IRQ_HANDLED;
+ if (mask & KCS_BMC_EVENT_TYPE_IBF)
+ regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
+ !!(state & KCS_BMC_EVENT_TYPE_IBF) * KCS_CTL_IBFIE);
+}
- return IRQ_NONE;
+static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg)
+{
+ struct kcs_bmc_device *kcs_bmc = arg;
+
+ return kcs_bmc_handle_event(kcs_bmc);
}
-static int npcm7xx_kcs_config_irq(struct kcs_bmc *kcs_bmc,
+static int npcm7xx_kcs_config_irq(struct kcs_bmc_device *kcs_bmc,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -132,11 +156,18 @@ static int npcm7xx_kcs_config_irq(struct kcs_bmc *kcs_bmc,
dev_name(dev), kcs_bmc);
}
+static const struct kcs_bmc_device_ops npcm7xx_kcs_ops = {
+ .irq_mask_update = npcm7xx_kcs_irq_mask_update,
+ .io_inputb = npcm7xx_kcs_inb,
+ .io_outputb = npcm7xx_kcs_outb,
+ .io_updateb = npcm7xx_kcs_updateb,
+};
+
static int npcm7xx_kcs_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct npcm7xx_kcs_bmc *priv;
- struct kcs_bmc *kcs_bmc;
+ struct kcs_bmc_device *kcs_bmc;
u32 chan;
int rc;
@@ -146,11 +177,10 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev)
return -ENODEV;
}
- kcs_bmc = kcs_bmc_alloc(dev, sizeof(*priv), chan);
- if (!kcs_bmc)
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- priv = kcs_bmc_priv(kcs_bmc);
priv->map = syscon_node_to_regmap(dev->parent->of_node);
if (IS_ERR(priv->map)) {
dev_err(dev, "Couldn't get regmap\n");
@@ -158,22 +188,26 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev)
}
priv->reg = &npcm7xx_kcs_reg_tbl[chan - 1];
+ kcs_bmc = &priv->kcs_bmc;
+ kcs_bmc->dev = &pdev->dev;
+ kcs_bmc->channel = chan;
kcs_bmc->ioreg.idr = priv->reg->dib;
kcs_bmc->ioreg.odr = priv->reg->dob;
kcs_bmc->ioreg.str = priv->reg->sts;
- kcs_bmc->io_inputb = npcm7xx_kcs_inb;
- kcs_bmc->io_outputb = npcm7xx_kcs_outb;
+ kcs_bmc->ops = &npcm7xx_kcs_ops;
- dev_set_drvdata(dev, kcs_bmc);
+ platform_set_drvdata(pdev, priv);
- npcm7xx_kcs_enable_channel(kcs_bmc, true);
rc = npcm7xx_kcs_config_irq(kcs_bmc, pdev);
if (rc)
return rc;
- rc = misc_register(&kcs_bmc->miscdev);
+ npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+ npcm7xx_kcs_enable_channel(kcs_bmc, true);
+
+ rc = kcs_bmc_add_device(kcs_bmc);
if (rc) {
- dev_err(dev, "Unable to register device\n");
+ dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
return rc;
}
@@ -186,9 +220,13 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev)
static int npcm7xx_kcs_remove(struct platform_device *pdev)
{
- struct kcs_bmc *kcs_bmc = dev_get_drvdata(&pdev->dev);
+ struct npcm7xx_kcs_bmc *priv = platform_get_drvdata(pdev);
+ struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
+
+ kcs_bmc_remove_device(kcs_bmc);
- misc_deregister(&kcs_bmc->miscdev);
+ npcm7xx_kcs_enable_channel(kcs_bmc, false);
+ npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
return 0;
}
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
new file mode 100644
index 000000000000..7948cabde50b
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2021 IBM Corp. */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_client.h"
+
+struct kcs_bmc_serio {
+ struct list_head entry;
+
+ struct kcs_bmc_client client;
+ struct serio *port;
+
+ spinlock_t lock;
+};
+
+static inline struct kcs_bmc_serio *client_to_kcs_bmc_serio(struct kcs_bmc_client *client)
+{
+ return container_of(client, struct kcs_bmc_serio, client);
+}
+
+static irqreturn_t kcs_bmc_serio_event(struct kcs_bmc_client *client)
+{
+ struct kcs_bmc_serio *priv;
+ u8 handled = IRQ_NONE;
+ u8 status;
+
+ priv = client_to_kcs_bmc_serio(client);
+
+ spin_lock(&priv->lock);
+
+ status = kcs_bmc_read_status(client->dev);
+
+ if (status & KCS_BMC_STR_IBF)
+ handled = serio_interrupt(priv->port, kcs_bmc_read_data(client->dev), 0);
+
+ spin_unlock(&priv->lock);
+
+ return handled;
+}
+
+static const struct kcs_bmc_client_ops kcs_bmc_serio_client_ops = {
+ .event = kcs_bmc_serio_event,
+};
+
+static int kcs_bmc_serio_open(struct serio *port)
+{
+ struct kcs_bmc_serio *priv = port->port_data;
+
+ return kcs_bmc_enable_device(priv->client.dev, &priv->client);
+}
+
+static void kcs_bmc_serio_close(struct serio *port)
+{
+ struct kcs_bmc_serio *priv = port->port_data;
+
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+}
+
+static DEFINE_SPINLOCK(kcs_bmc_serio_instances_lock);
+static LIST_HEAD(kcs_bmc_serio_instances);
+
+static int kcs_bmc_serio_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_serio *priv;
+ struct serio *port;
+
+ priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+
+ /* Use kzalloc() as the allocation is cleaned up with kfree() via serio_unregister_port() */
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!(priv && port))
+ return -ENOMEM;
+
+ port->id.type = SERIO_8042;
+ port->open = kcs_bmc_serio_open;
+ port->close = kcs_bmc_serio_close;
+ port->port_data = priv;
+ port->dev.parent = kcs_bmc->dev;
+
+ spin_lock_init(&priv->lock);
+ priv->port = port;
+ priv->client.dev = kcs_bmc;
+ priv->client.ops = &kcs_bmc_serio_client_ops;
+
+ spin_lock_irq(&kcs_bmc_serio_instances_lock);
+ list_add(&priv->entry, &kcs_bmc_serio_instances);
+ spin_unlock_irq(&kcs_bmc_serio_instances_lock);
+
+ serio_register_port(port);
+
+ dev_info(kcs_bmc->dev, "Initialised serio client for channel %d", kcs_bmc->channel);
+
+ return 0;
+}
+
+static int kcs_bmc_serio_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_serio *priv = NULL, *pos;
+
+ spin_lock_irq(&kcs_bmc_serio_instances_lock);
+ list_for_each_entry(pos, &kcs_bmc_serio_instances, entry) {
+ if (pos->client.dev == kcs_bmc) {
+ priv = pos;
+ list_del(&pos->entry);
+ break;
+ }
+ }
+ spin_unlock_irq(&kcs_bmc_serio_instances_lock);
+
+ if (!priv)
+ return -ENODEV;
+
+ /* kfree()s priv->port via put_device() */
+ serio_unregister_port(priv->port);
+
+ /* Ensure the IBF IRQ is disabled if we were the active client */
+ kcs_bmc_disable_device(kcs_bmc, &priv->client);
+
+ devm_kfree(priv->client.dev->dev, priv);
+
+ return 0;
+}
+
+static const struct kcs_bmc_driver_ops kcs_bmc_serio_driver_ops = {
+ .add_device = kcs_bmc_serio_add_device,
+ .remove_device = kcs_bmc_serio_remove_device,
+};
+
+static struct kcs_bmc_driver kcs_bmc_serio_driver = {
+ .ops = &kcs_bmc_serio_driver_ops,
+};
+
+static int kcs_bmc_serio_init(void)
+{
+ kcs_bmc_register_driver(&kcs_bmc_serio_driver);
+
+ return 0;
+}
+module_init(kcs_bmc_serio_init);
+
+static void kcs_bmc_serio_exit(void)
+{
+ kcs_bmc_unregister_driver(&kcs_bmc_serio_driver);
+}
+module_exit(kcs_bmc_serio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("Adapter driver for serio access to BMC KCS devices");
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 15dc54fa1d47..1c596b5cdb27 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -16,7 +16,6 @@
#include <linux/mman.h>
#include <linux/random.h>
#include <linux/init.h>
-#include <linux/raw.h>
#include <linux/tty.h>
#include <linux/capability.h>
#include <linux/ptrace.h>
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 89681f07bc78..8f1bce0b4fe5 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -544,6 +544,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
io_read_num_rec_bytes(iobase, &num_bytes_read);
if (num_bytes_read >= 4) {
DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
+ if (num_bytes_read > 4) {
+ rc = -EIO;
+ goto exit_setprotocol;
+ }
break;
}
usleep_range(10000, 11000);
@@ -1050,7 +1054,6 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
struct cm4000_dev *dev = filp->private_data;
unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
- unsigned char tmp;
unsigned char infolen;
unsigned char sendT0;
unsigned short nsend;
@@ -1148,7 +1151,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
set_cardparameter(dev);
/* dummy read, reset flag procedure received */
- tmp = inb(REG_FLAGS1(iobase));
+ inb(REG_FLAGS1(iobase));
dev->flags1 = 0x20 /* T_Active */
| (sendT0)
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index d5e43606339c..827711911da4 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -221,7 +221,6 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
unsigned long i;
size_t min_bytes_to_read;
int rc;
- unsigned char uc;
DEBUGP(2, dev, "-> cm4040_read(%s,%d)\n", current->comm, current->pid);
@@ -308,7 +307,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
return -EIO;
}
- uc = xinb(iobase + REG_OFFSET_BULK_IN);
+ xinb(iobase + REG_OFFSET_BULK_IN);
DEBUGP(2, dev, "<- cm4040_read (successfully)\n");
return min_bytes_to_read;
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
index 47feb39af34c..1bdce08fae3d 100644
--- a/drivers/char/pcmcia/scr24x_cs.c
+++ b/drivers/char/pcmcia/scr24x_cs.c
@@ -265,7 +265,6 @@ static int scr24x_probe(struct pcmcia_device *link)
cdev_init(&dev->c_dev, &scr24x_fops);
dev->c_dev.owner = THIS_MODULE;
- dev->c_dev.ops = &scr24x_fops;
ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1);
if (ret < 0)
goto err;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 3287a7627ed0..6eaefea0520e 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -985,7 +985,7 @@ static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty)
else
#endif
{
- if (tty && (tty->stopped || tty->hw_stopped)) {
+ if (tty && (tty->flow.stopped || tty->hw_stopped)) {
tx_stop(info);
return;
}
@@ -1005,7 +1005,7 @@ static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
if (!info->tx_active)
return;
} else {
- if (tty && (tty->stopped || tty->hw_stopped)) {
+ if (tty && (tty->flow.stopped || tty->hw_stopped)) {
tx_stop(info);
return;
}
@@ -1419,13 +1419,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
/* byte size and parity */
- switch (cflag & CSIZE) {
- case CS5: info->params.data_bits = 5; break;
- case CS6: info->params.data_bits = 6; break;
- case CS7: info->params.data_bits = 7; break;
- case CS8: info->params.data_bits = 8; break;
- default: info->params.data_bits = 7; break;
- }
+ info->params.data_bits = tty_get_char_size(cflag);
if (cflag & CSTOPB)
info->params.stop_bits = 2;
@@ -1525,7 +1519,7 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
return;
- if (info->tx_count <= 0 || tty->stopped ||
+ if (info->tx_count <= 0 || tty->flow.stopped ||
tty->hw_stopped || !info->tx_buf)
return;
@@ -1594,7 +1588,7 @@ static int mgslpc_write(struct tty_struct * tty,
ret += c;
}
start:
- if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+ if (info->tx_count && !tty->flow.stopped && !tty->hw_stopped) {
spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active)
tx_start(info, tty);
@@ -1609,7 +1603,7 @@ cleanup:
/* Return the count of free bytes in transmit buffer
*/
-static int mgslpc_write_room(struct tty_struct *tty)
+static unsigned int mgslpc_write_room(struct tty_struct *tty)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
int ret;
@@ -1637,10 +1631,10 @@ static int mgslpc_write_room(struct tty_struct *tty)
/* Return the count of bytes in transmit buffer
*/
-static int mgslpc_chars_in_buffer(struct tty_struct *tty)
+static unsigned int mgslpc_chars_in_buffer(struct tty_struct *tty)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- int rc;
+ unsigned int rc;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
@@ -1655,7 +1649,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
rc = info->tx_count;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
+ printk("%s(%d):mgslpc_chars_in_buffer(%s)=%u\n",
__FILE__, __LINE__, info->device_name, rc);
return rc;
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
index 027484ecfb0d..3c99696b145e 100644
--- a/drivers/char/powernv-op-panel.c
+++ b/drivers/char/powernv-op-panel.c
@@ -75,6 +75,7 @@ static int __op_panel_update_display(void)
rc);
break;
}
+ break;
case OPAL_SUCCESS:
break;
default:
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
deleted file mode 100644
index 5d52a1f4738c..000000000000
--- a/drivers/char/raw.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/char/raw.c
- *
- * Front-end raw character devices. These can be bound to any block
- * devices to provide genuine Unix raw character device semantics.
- *
- * We reserve minor number 0 for a control interface. ioctl()s on this
- * device are used to bind the other minor numbers to block devices.
- */
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/major.h>
-#include <linux/blkdev.h>
-#include <linux/backing-dev.h>
-#include <linux/module.h>
-#include <linux/raw.h>
-#include <linux/capability.h>
-#include <linux/uio.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-#include <linux/compat.h>
-#include <linux/vmalloc.h>
-
-#include <linux/uaccess.h>
-
-struct raw_device_data {
- dev_t binding;
- struct block_device *bdev;
- int inuse;
-};
-
-static struct class *raw_class;
-static struct raw_device_data *raw_devices;
-static DEFINE_MUTEX(raw_mutex);
-static const struct file_operations raw_ctl_fops; /* forward declaration */
-
-static int max_raw_minors = CONFIG_MAX_RAW_DEVS;
-
-module_param(max_raw_minors, int, 0);
-MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
-
-/*
- * Open/close code for raw IO.
- *
- * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
- * point at the blockdev's address_space and set the file handle to use
- * O_DIRECT.
- *
- * Set the device's soft blocksize to the minimum possible. This gives the
- * finest possible alignment and has no adverse impact on performance.
- */
-static int raw_open(struct inode *inode, struct file *filp)
-{
- const int minor = iminor(inode);
- struct block_device *bdev;
- int err;
-
- if (minor == 0) { /* It is the control device */
- filp->f_op = &raw_ctl_fops;
- return 0;
- }
-
- pr_warn_ratelimited(
- "process %s (pid %d) is using the deprecated raw device\n"
- "support will be removed in Linux 5.14.\n",
- current->comm, current->pid);
-
- mutex_lock(&raw_mutex);
-
- /*
- * All we need to do on open is check that the device is bound.
- */
- err = -ENODEV;
- if (!raw_devices[minor].binding)
- goto out;
- bdev = blkdev_get_by_dev(raw_devices[minor].binding,
- filp->f_mode | FMODE_EXCL, raw_open);
- if (IS_ERR(bdev)) {
- err = PTR_ERR(bdev);
- goto out;
- }
- err = set_blocksize(bdev, bdev_logical_block_size(bdev));
- if (err)
- goto out1;
- filp->f_flags |= O_DIRECT;
- filp->f_mapping = bdev->bd_inode->i_mapping;
- if (++raw_devices[minor].inuse == 1)
- file_inode(filp)->i_mapping =
- bdev->bd_inode->i_mapping;
- filp->private_data = bdev;
- raw_devices[minor].bdev = bdev;
- mutex_unlock(&raw_mutex);
- return 0;
-
-out1:
- blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
-out:
- mutex_unlock(&raw_mutex);
- return err;
-}
-
-/*
- * When the final fd which refers to this character-special node is closed, we
- * make its ->mapping point back at its own i_data.
- */
-static int raw_release(struct inode *inode, struct file *filp)
-{
- const int minor= iminor(inode);
- struct block_device *bdev;
-
- mutex_lock(&raw_mutex);
- bdev = raw_devices[minor].bdev;
- if (--raw_devices[minor].inuse == 0)
- /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
- inode->i_mapping = &inode->i_data;
- mutex_unlock(&raw_mutex);
-
- blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
- return 0;
-}
-
-/*
- * Forward ioctls to the underlying block device.
- */
-static long
-raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
-{
- struct block_device *bdev = filp->private_data;
- return blkdev_ioctl(bdev, 0, command, arg);
-}
-
-static int bind_set(int number, u64 major, u64 minor)
-{
- dev_t dev = MKDEV(major, minor);
- dev_t raw = MKDEV(RAW_MAJOR, number);
- struct raw_device_data *rawdev;
- int err = 0;
-
- if (number <= 0 || number >= max_raw_minors)
- return -EINVAL;
-
- if (MAJOR(dev) != major || MINOR(dev) != minor)
- return -EINVAL;
-
- rawdev = &raw_devices[number];
-
- /*
- * This is like making block devices, so demand the
- * same capability
- */
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- /*
- * For now, we don't need to check that the underlying
- * block device is present or not: we can do that when
- * the raw device is opened. Just check that the
- * major/minor numbers make sense.
- */
-
- if (MAJOR(dev) == 0 && dev != 0)
- return -EINVAL;
-
- mutex_lock(&raw_mutex);
- if (rawdev->inuse) {
- mutex_unlock(&raw_mutex);
- return -EBUSY;
- }
- if (rawdev->binding)
- module_put(THIS_MODULE);
-
- rawdev->binding = dev;
- if (!dev) {
- /* unbind */
- device_destroy(raw_class, raw);
- } else {
- __module_get(THIS_MODULE);
- device_destroy(raw_class, raw);
- device_create(raw_class, NULL, raw, NULL, "raw%d", number);
- }
- mutex_unlock(&raw_mutex);
- return err;
-}
-
-static int bind_get(int number, dev_t *dev)
-{
- if (number <= 0 || number >= max_raw_minors)
- return -EINVAL;
- *dev = raw_devices[number].binding;
- return 0;
-}
-
-/*
- * Deal with ioctls against the raw-device control interface, to bind
- * and unbind other raw devices.
- */
-static long raw_ctl_ioctl(struct file *filp, unsigned int command,
- unsigned long arg)
-{
- struct raw_config_request rq;
- dev_t dev;
- int err;
-
- switch (command) {
- case RAW_SETBIND:
- if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
- return -EFAULT;
-
- return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
-
- case RAW_GETBIND:
- if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
- return -EFAULT;
-
- err = bind_get(rq.raw_minor, &dev);
- if (err)
- return err;
-
- rq.block_major = MAJOR(dev);
- rq.block_minor = MINOR(dev);
-
- if (copy_to_user((void __user *)arg, &rq, sizeof(rq)))
- return -EFAULT;
-
- return 0;
- }
-
- return -EINVAL;
-}
-
-#ifdef CONFIG_COMPAT
-struct raw32_config_request {
- compat_int_t raw_minor;
- compat_u64 block_major;
- compat_u64 block_minor;
-};
-
-static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct raw32_config_request __user *user_req = compat_ptr(arg);
- struct raw32_config_request rq;
- dev_t dev;
- int err = 0;
-
- switch (cmd) {
- case RAW_SETBIND:
- if (copy_from_user(&rq, user_req, sizeof(rq)))
- return -EFAULT;
-
- return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
-
- case RAW_GETBIND:
- if (copy_from_user(&rq, user_req, sizeof(rq)))
- return -EFAULT;
-
- err = bind_get(rq.raw_minor, &dev);
- if (err)
- return err;
-
- rq.block_major = MAJOR(dev);
- rq.block_minor = MINOR(dev);
-
- if (copy_to_user(user_req, &rq, sizeof(rq)))
- return -EFAULT;
-
- return 0;
- }
-
- return -EINVAL;
-}
-#endif
-
-static const struct file_operations raw_fops = {
- .read_iter = blkdev_read_iter,
- .write_iter = blkdev_write_iter,
- .fsync = blkdev_fsync,
- .open = raw_open,
- .release = raw_release,
- .unlocked_ioctl = raw_ioctl,
- .llseek = default_llseek,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations raw_ctl_fops = {
- .unlocked_ioctl = raw_ctl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = raw_ctl_compat_ioctl,
-#endif
- .open = raw_open,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
-};
-
-static struct cdev raw_cdev;
-
-static char *raw_devnode(struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
-}
-
-static int __init raw_init(void)
-{
- dev_t dev = MKDEV(RAW_MAJOR, 0);
- int ret;
-
- if (max_raw_minors < 1 || max_raw_minors > 65536) {
- pr_warn("raw: invalid max_raw_minors (must be between 1 and 65536), using %d\n",
- CONFIG_MAX_RAW_DEVS);
- max_raw_minors = CONFIG_MAX_RAW_DEVS;
- }
-
- raw_devices = vzalloc(array_size(max_raw_minors,
- sizeof(struct raw_device_data)));
- if (!raw_devices) {
- printk(KERN_ERR "Not enough memory for raw device structures\n");
- ret = -ENOMEM;
- goto error;
- }
-
- ret = register_chrdev_region(dev, max_raw_minors, "raw");
- if (ret)
- goto error;
-
- cdev_init(&raw_cdev, &raw_fops);
- ret = cdev_add(&raw_cdev, dev, max_raw_minors);
- if (ret)
- goto error_region;
- raw_class = class_create(THIS_MODULE, "raw");
- if (IS_ERR(raw_class)) {
- printk(KERN_ERR "Error creating raw class.\n");
- cdev_del(&raw_cdev);
- ret = PTR_ERR(raw_class);
- goto error_region;
- }
- raw_class->devnode = raw_devnode;
- device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
-
- return 0;
-
-error_region:
- unregister_chrdev_region(dev, max_raw_minors);
-error:
- vfree(raw_devices);
- return ret;
-}
-
-static void __exit raw_exit(void)
-{
- device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
- class_destroy(raw_class);
- cdev_del(&raw_cdev);
- unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
-}
-
-module_init(raw_init);
-module_exit(raw_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index ca7158fa6e6c..f7dc986fa4a0 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -312,7 +312,7 @@ unsigned long tpm1_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
#define TPM_ST_CLEAR 1
/**
- * tpm_startup() - turn on the TPM
+ * tpm1_startup() - turn on the TPM
* @chip: TPM chip to use
*
* Normally the firmware should start the TPM. This function is provided as a
@@ -611,7 +611,7 @@ out:
#define TPM_ORD_CONTINUE_SELFTEST 83
/**
- * tpm_continue_selftest() - run TPM's selftest
+ * tpm1_continue_selftest() - run TPM's selftest
* @chip: TPM chip to use
*
* Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c84d23951219..a25815a6f625 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -87,7 +87,7 @@ static u8 tpm2_ordinal_duration_index(u32 ordinal)
return TPM_MEDIUM;
case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
- return TPM_LONG;
+ return TPM_LONG_LONG;
case TPM2_CC_PCR_EXTEND: /* 182 */
return TPM_MEDIUM;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a9dcf31eadd2..18606651d1aa 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -464,7 +464,7 @@ static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
/* Detect a 64 bit address on a 32 bit system */
if (start != new_res.start)
- return (void __iomem *) ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
if (!iores)
return devm_ioremap_resource(dev, &new_res);
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 2ccdf8ac6994..6e3235565a4d 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -254,11 +254,11 @@ static int ftpm_tee_probe(struct device *dev)
pvt_data->session = sess_arg.session;
/* Allocate dynamic shared memory with fTPM TA */
- pvt_data->shm = tee_shm_alloc(pvt_data->ctx,
- MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE,
- TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ pvt_data->shm = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+ MAX_COMMAND_SIZE +
+ MAX_RESPONSE_SIZE);
if (IS_ERR(pvt_data->shm)) {
- dev_err(dev, "%s: tee_shm_alloc failed\n", __func__);
+ dev_err(dev, "%s: tee_shm_alloc_kernel_buf failed\n", __func__);
rc = -ENOMEM;
goto out_shm_alloc;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 4ed6e660273a..d3f2e5364c27 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -363,11 +363,7 @@ static int tpm_tis_force_device(void)
{
struct platform_device *pdev;
static const struct resource x86_resources[] = {
- {
- .start = 0xFED40000,
- .end = 0xFED40000 + TIS_MEM_LEN - 1,
- .flags = IORESOURCE_MEM,
- },
+ DEFINE_RES_MEM(0xFED40000, TIS_MEM_LEN)
};
if (!force)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 55b9d3965ae1..69579efb247b 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -196,13 +196,24 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
return 0;
if (unlikely((status & TPM_STS_READ_ZERO) != 0)) {
- /*
- * If this trips, the chances are the read is
- * returning 0xff because the locality hasn't been
- * acquired. Usually because tpm_try_get_ops() hasn't
- * been called before doing a TPM operation.
- */
- WARN_ONCE(1, "TPM returned invalid status\n");
+ if (!test_and_set_bit(TPM_TIS_INVALID_STATUS, &priv->flags)) {
+ /*
+ * If this trips, the chances are the read is
+ * returning 0xff because the locality hasn't been
+ * acquired. Usually because tpm_try_get_ops() hasn't
+ * been called before doing a TPM operation.
+ */
+ dev_err(&chip->dev, "invalid TPM_STS.x 0x%02x, dumping stack for forensics\n",
+ status);
+
+ /*
+ * Dump stack for forensics, as invalid TPM_STS.x could be
+ * potentially triggered by impaired tpm_try_get_ops() or
+ * tpm_find_get_ops().
+ */
+ dump_stack();
+ }
+
return 0;
}
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 9b2d32a59f67..b2a3c6c72882 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -83,6 +83,7 @@ enum tis_defaults {
enum tpm_tis_flags {
TPM_TIS_ITPM_WORKAROUND = BIT(0),
+ TPM_TIS_INVALID_STATUS = BIT(1),
};
struct tpm_tis_data {
@@ -90,7 +91,7 @@ struct tpm_tis_data {
int locality;
int irq;
bool irq_tested;
- unsigned int flags;
+ unsigned long flags;
void __iomem *ilb_base_addr;
u16 clkrun_enabled;
wait_queue_head_t int_queue;
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index f19c227d20f4..44dde2fbe2fb 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -706,14 +706,14 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client,
if (client->irq > 0) {
rc = devm_request_irq(dev, client->irq, tpm_cr50_i2c_int_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
dev->driver->name, chip);
if (rc < 0) {
dev_err(dev, "Failed to probe IRQ %d\n", client->irq);
return rc;
}
- disable_irq(client->irq);
priv->irq = client->irq;
} else {
dev_warn(dev, "No IRQ, will use %ums delay for TPM ready\n",
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 3856f6ebcb34..54584b4b00d1 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -240,10 +240,14 @@ static int tpm_tis_spi_driver_probe(struct spi_device *spi)
tpm_tis_spi_probe_func probe_func;
probe_func = of_device_get_match_data(&spi->dev);
- if (!probe_func && spi_dev_id)
- probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
- if (!probe_func)
- return -ENODEV;
+ if (!probe_func) {
+ if (spi_dev_id) {
+ probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
+ if (!probe_func)
+ return -ENODEV;
+ } else
+ probe_func = tpm_tis_spi_probe;
+ }
return probe_func(spi);
}
@@ -260,6 +264,8 @@ static int tpm_tis_spi_remove(struct spi_device *dev)
}
static const struct spi_device_id tpm_tis_spi_id[] = {
+ { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
+ { "slb9670", (unsigned long)tpm_tis_spi_probe },
{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
{ "cr50", (unsigned long)cr50_spi_probe },
{}
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 93f5d11c830b..230b2c9b3e3c 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -52,12 +52,7 @@ static void tpk_flush(void)
static int tpk_printk(const unsigned char *buf, int count)
{
- int i = tpk_curr;
-
- if (buf == NULL) {
- tpk_flush();
- return i;
- }
+ int i;
for (i = 0; i < count; i++) {
if (tpk_curr >= TPK_STR_SIZE) {
@@ -100,12 +95,6 @@ static int tpk_open(struct tty_struct *tty, struct file *filp)
static void tpk_close(struct tty_struct *tty, struct file *filp)
{
struct ttyprintk_port *tpkp = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&tpkp->spinlock, flags);
- /* flush tpk_printk buffer */
- tpk_printk(NULL, 0);
- spin_unlock_irqrestore(&tpkp->spinlock, flags);
tty_port_close(&tpkp->port, tty, filp);
}
@@ -120,7 +109,6 @@ static int tpk_write(struct tty_struct *tty,
unsigned long flags;
int ret;
-
/* exclusive use of tpk_printk within this tty */
spin_lock_irqsave(&tpkp->spinlock, flags);
ret = tpk_printk(buf, count);
@@ -132,40 +120,33 @@ static int tpk_write(struct tty_struct *tty,
/*
* TTY operations write_room function.
*/
-static int tpk_write_room(struct tty_struct *tty)
+static unsigned int tpk_write_room(struct tty_struct *tty)
{
return TPK_MAX_ROOM;
}
/*
- * TTY operations ioctl function.
+ * TTY operations hangup function.
*/
-static int tpk_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
+static void tpk_hangup(struct tty_struct *tty)
{
struct ttyprintk_port *tpkp = tty->driver_data;
- if (!tpkp)
- return -EINVAL;
-
- switch (cmd) {
- /* Stop TIOCCONS */
- case TIOCCONS:
- return -EOPNOTSUPP;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
+ tty_port_hangup(&tpkp->port);
}
/*
- * TTY operations hangup function.
+ * TTY port operations shutdown function.
*/
-static void tpk_hangup(struct tty_struct *tty)
+static void tpk_port_shutdown(struct tty_port *tport)
{
- struct ttyprintk_port *tpkp = tty->driver_data;
+ struct ttyprintk_port *tpkp =
+ container_of(tport, struct ttyprintk_port, port);
+ unsigned long flags;
- tty_port_hangup(&tpkp->port);
+ spin_lock_irqsave(&tpkp->spinlock, flags);
+ tpk_flush();
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
}
static const struct tty_operations ttyprintk_ops = {
@@ -173,11 +154,12 @@ static const struct tty_operations ttyprintk_ops = {
.close = tpk_close,
.write = tpk_write,
.write_room = tpk_write_room,
- .ioctl = tpk_ioctl,
.hangup = tpk_hangup,
};
-static const struct tty_port_operations null_ops = { };
+static const struct tty_port_operations tpk_port_ops = {
+ .shutdown = tpk_port_shutdown,
+};
static struct tty_driver *ttyprintk_driver;
@@ -195,7 +177,7 @@ static int __init ttyprintk_init(void)
return PTR_ERR(ttyprintk_driver);
tty_port_init(&tpk_port.port);
- tpk_port.port.ops = &null_ops;
+ tpk_port.port.ops = &tpk_port_ops;
ttyprintk_driver->driver_name = "ttyprintk";
ttyprintk_driver->name = "ttyprintk";
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 59dfd9c421a1..7eaf303a7a86 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -475,7 +475,7 @@ static struct port_buffer *get_inbuf(struct port *port)
buf = virtqueue_get_buf(port->in_vq, &len);
if (buf) {
- buf->len = len;
+ buf->len = min_t(size_t, len, buf->size);
buf->offset = 0;
port->stats.bytes_received += len;
}
@@ -1709,7 +1709,7 @@ static void control_work_handler(struct work_struct *work)
while ((buf = virtqueue_get_buf(vq, &len))) {
spin_unlock(&portdev->c_ivq_lock);
- buf->len = len;
+ buf->len = min_t(size_t, len, buf->size);
buf->offset = 0;
handle_control_message(vq->vdev, portdev, buf);
diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig
index 130dbdce858f..a8036dad437e 100644
--- a/drivers/char/xillybus/Kconfig
+++ b/drivers/char/xillybus/Kconfig
@@ -3,10 +3,14 @@
# Xillybus devices
#
+config XILLYBUS_CLASS
+ tristate
+
config XILLYBUS
tristate "Xillybus generic FPGA interface"
depends on PCI || OF
select CRC32
+ select XILLYBUS_CLASS
help
Xillybus is a generic interface for peripherals designed on
programmable logic (FPGA). The driver probes the hardware for
@@ -21,7 +25,7 @@ config XILLYBUS_PCIE
depends on PCI_MSI
help
Set to M if you want Xillybus to use PCI Express for communicating
- with the FPGA.
+ with the FPGA. The module will be called xillybus_pcie.
config XILLYBUS_OF
tristate "Xillybus over Device Tree"
@@ -29,6 +33,20 @@ config XILLYBUS_OF
help
Set to M if you want Xillybus to find its resources from the
Open Firmware Flattened Device Tree. If the target is an embedded
- system, say M.
+ system, say M. The module will be called xillybus_of.
endif # if XILLYBUS
+
+# XILLYUSB doesn't depend on XILLYBUS
+
+config XILLYUSB
+ tristate "XillyUSB: Xillybus generic FPGA interface for USB"
+ depends on USB
+ select CRC32
+ select XILLYBUS_CLASS
+ help
+ XillyUSB is the Xillybus variant which uses USB for communicating
+ with the FPGA.
+
+ Set to M if you want Xillybus to use USB for communicating with
+ the FPGA. The module will be called xillyusb.
diff --git a/drivers/char/xillybus/Makefile b/drivers/char/xillybus/Makefile
index 099e9a3585fc..16f31d03209d 100644
--- a/drivers/char/xillybus/Makefile
+++ b/drivers/char/xillybus/Makefile
@@ -3,6 +3,8 @@
# Makefile for Xillybus driver
#
+obj-$(CONFIG_XILLYBUS_CLASS) += xillybus_class.o
obj-$(CONFIG_XILLYBUS) += xillybus_core.o
obj-$(CONFIG_XILLYBUS_PCIE) += xillybus_pcie.o
obj-$(CONFIG_XILLYBUS_OF) += xillybus_of.o
+obj-$(CONFIG_XILLYUSB) += xillyusb.o
diff --git a/drivers/char/xillybus/xillybus.h b/drivers/char/xillybus/xillybus.h
index 8e3ed4d1bb7f..c63ffc56637c 100644
--- a/drivers/char/xillybus/xillybus.h
+++ b/drivers/char/xillybus/xillybus.h
@@ -30,7 +30,8 @@ struct xilly_buffer {
struct xilly_idt_handle {
unsigned char *chandesc;
- unsigned char *idt;
+ unsigned char *names;
+ int names_len;
int entries;
};
@@ -94,7 +95,6 @@ struct xilly_endpoint {
struct device *dev;
struct xilly_endpoint_hardware *ephw;
- struct list_head ep_list;
int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
__iomem void *registers;
int fatal_error;
@@ -102,12 +102,6 @@ struct xilly_endpoint {
struct mutex register_mutex;
wait_queue_head_t ep_wait;
- /* Channels and message handling */
- struct cdev cdev;
-
- int major;
- int lowest_minor; /* Highest minor = lowest_minor + num_channels - 1 */
-
int num_channels; /* EXCLUDING message buffer */
struct xilly_channel **channels;
int msg_counter;
diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c
new file mode 100644
index 000000000000..5046486011c8
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_class.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus class
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include "xillybus_class.h"
+
+MODULE_DESCRIPTION("Driver for Xillybus class");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_ALIAS("xillybus_class");
+MODULE_LICENSE("GPL v2");
+
+static DEFINE_MUTEX(unit_mutex);
+static LIST_HEAD(unit_list);
+static struct class *xillybus_class;
+
+#define UNITNAMELEN 16
+
+struct xilly_unit {
+ struct list_head list_entry;
+ void *private_data;
+
+ struct cdev *cdev;
+ char name[UNITNAMELEN];
+ int major;
+ int lowest_minor;
+ int num_nodes;
+};
+
+int xillybus_init_chrdev(struct device *dev,
+ const struct file_operations *fops,
+ struct module *owner,
+ void *private_data,
+ unsigned char *idt, unsigned int len,
+ int num_nodes,
+ const char *prefix, bool enumerate)
+{
+ int rc;
+ dev_t mdev;
+ int i;
+ char devname[48];
+
+ struct device *device;
+ size_t namelen;
+ struct xilly_unit *unit, *u;
+
+ unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+
+ if (!unit)
+ return -ENOMEM;
+
+ mutex_lock(&unit_mutex);
+
+ if (!enumerate)
+ snprintf(unit->name, UNITNAMELEN, "%s", prefix);
+
+ for (i = 0; enumerate; i++) {
+ snprintf(unit->name, UNITNAMELEN, "%s_%02d",
+ prefix, i);
+
+ enumerate = false;
+ list_for_each_entry(u, &unit_list, list_entry)
+ if (!strcmp(unit->name, u->name)) {
+ enumerate = true;
+ break;
+ }
+ }
+
+ rc = alloc_chrdev_region(&mdev, 0, num_nodes, unit->name);
+
+ if (rc) {
+ dev_warn(dev, "Failed to obtain major/minors");
+ goto fail_obtain;
+ }
+
+ unit->major = MAJOR(mdev);
+ unit->lowest_minor = MINOR(mdev);
+ unit->num_nodes = num_nodes;
+ unit->private_data = private_data;
+
+ unit->cdev = cdev_alloc();
+ if (!unit->cdev) {
+ rc = -ENOMEM;
+ goto unregister_chrdev;
+ }
+ unit->cdev->ops = fops;
+ unit->cdev->owner = owner;
+
+ rc = cdev_add(unit->cdev, MKDEV(unit->major, unit->lowest_minor),
+ unit->num_nodes);
+ if (rc) {
+ dev_err(dev, "Failed to add cdev.\n");
+ /* kobject_put() is normally done by cdev_del() */
+ kobject_put(&unit->cdev->kobj);
+ goto unregister_chrdev;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ namelen = strnlen(idt, len);
+
+ if (namelen == len) {
+ dev_err(dev, "IDT's list of names is too short. This is exceptionally weird, because its CRC is OK\n");
+ rc = -ENODEV;
+ goto unroll_device_create;
+ }
+
+ snprintf(devname, sizeof(devname), "%s_%s",
+ unit->name, idt);
+
+ len -= namelen + 1;
+ idt += namelen + 1;
+
+ device = device_create(xillybus_class,
+ NULL,
+ MKDEV(unit->major,
+ i + unit->lowest_minor),
+ NULL,
+ "%s", devname);
+
+ if (IS_ERR(device)) {
+ dev_err(dev, "Failed to create %s device. Aborting.\n",
+ devname);
+ rc = -ENODEV;
+ goto unroll_device_create;
+ }
+ }
+
+ if (len) {
+ dev_err(dev, "IDT's list of names is too long. This is exceptionally weird, because its CRC is OK\n");
+ rc = -ENODEV;
+ goto unroll_device_create;
+ }
+
+ list_add_tail(&unit->list_entry, &unit_list);
+
+ dev_info(dev, "Created %d device files.\n", num_nodes);
+
+ mutex_unlock(&unit_mutex);
+
+ return 0;
+
+unroll_device_create:
+ for (i--; i >= 0; i--)
+ device_destroy(xillybus_class, MKDEV(unit->major,
+ i + unit->lowest_minor));
+
+ cdev_del(unit->cdev);
+
+unregister_chrdev:
+ unregister_chrdev_region(MKDEV(unit->major, unit->lowest_minor),
+ unit->num_nodes);
+
+fail_obtain:
+ mutex_unlock(&unit_mutex);
+
+ kfree(unit);
+
+ return rc;
+}
+EXPORT_SYMBOL(xillybus_init_chrdev);
+
+void xillybus_cleanup_chrdev(void *private_data,
+ struct device *dev)
+{
+ int minor;
+ struct xilly_unit *unit;
+ bool found = false;
+
+ mutex_lock(&unit_mutex);
+
+ list_for_each_entry(unit, &unit_list, list_entry)
+ if (unit->private_data == private_data) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ dev_err(dev, "Weird bug: Failed to find unit\n");
+ mutex_unlock(&unit_mutex);
+ return;
+ }
+
+ for (minor = unit->lowest_minor;
+ minor < (unit->lowest_minor + unit->num_nodes);
+ minor++)
+ device_destroy(xillybus_class, MKDEV(unit->major, minor));
+
+ cdev_del(unit->cdev);
+
+ unregister_chrdev_region(MKDEV(unit->major, unit->lowest_minor),
+ unit->num_nodes);
+
+ dev_info(dev, "Removed %d device files.\n",
+ unit->num_nodes);
+
+ list_del(&unit->list_entry);
+ kfree(unit);
+
+ mutex_unlock(&unit_mutex);
+}
+EXPORT_SYMBOL(xillybus_cleanup_chrdev);
+
+int xillybus_find_inode(struct inode *inode,
+ void **private_data, int *index)
+{
+ int minor = iminor(inode);
+ int major = imajor(inode);
+ struct xilly_unit *unit;
+ bool found = false;
+
+ mutex_lock(&unit_mutex);
+
+ list_for_each_entry(unit, &unit_list, list_entry)
+ if (unit->major == major &&
+ minor >= unit->lowest_minor &&
+ minor < (unit->lowest_minor + unit->num_nodes)) {
+ found = true;
+ break;
+ }
+
+ mutex_unlock(&unit_mutex);
+
+ if (!found)
+ return -ENODEV;
+
+ *private_data = unit->private_data;
+ *index = minor - unit->lowest_minor;
+
+ return 0;
+}
+EXPORT_SYMBOL(xillybus_find_inode);
+
+static int __init xillybus_class_init(void)
+{
+ xillybus_class = class_create(THIS_MODULE, "xillybus");
+
+ if (IS_ERR(xillybus_class)) {
+ pr_warn("Failed to register xillybus class\n");
+
+ return PTR_ERR(xillybus_class);
+ }
+ return 0;
+}
+
+static void __exit xillybus_class_exit(void)
+{
+ class_destroy(xillybus_class);
+}
+
+module_init(xillybus_class_init);
+module_exit(xillybus_class_exit);
diff --git a/drivers/char/xillybus/xillybus_class.h b/drivers/char/xillybus/xillybus_class.h
new file mode 100644
index 000000000000..5dbfdfc95c65
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_class.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 Xillybus Ltd, http://www.xillybus.com
+ *
+ * Header file for the Xillybus class
+ */
+
+#ifndef __XILLYBUS_CLASS_H
+#define __XILLYBUS_CLASS_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+
+int xillybus_init_chrdev(struct device *dev,
+ const struct file_operations *fops,
+ struct module *owner,
+ void *private_data,
+ unsigned char *idt, unsigned int len,
+ int num_nodes,
+ const char *prefix, bool enumerate);
+
+void xillybus_cleanup_chrdev(void *private_data,
+ struct device *dev);
+
+int xillybus_find_inode(struct inode *inode,
+ void **private_data, int *index);
+
+#endif /* __XILLYBUS_CLASS_H */
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index 57fa68834981..931d0bf4cec6 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -21,7 +21,6 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/fs.h>
-#include <linux/cdev.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/crc32.h>
@@ -30,10 +29,10 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "xillybus.h"
+#include "xillybus_class.h"
MODULE_DESCRIPTION("Xillybus core functions");
MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
-MODULE_VERSION("1.07");
MODULE_ALIAS("xillybus_core");
MODULE_LICENSE("GPL v2");
@@ -58,16 +57,6 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillybus";
-static struct class *xillybus_class;
-
-/*
- * ep_list_lock is the last lock to be taken; No other lock requests are
- * allowed while holding it. It merely protects list_of_endpoints, and not
- * the endpoints listed in it.
- */
-
-static LIST_HEAD(list_of_endpoints);
-static struct mutex ep_list_lock;
static struct workqueue_struct *xillybus_wq;
/*
@@ -570,10 +559,8 @@ static int xilly_scan_idt(struct xilly_endpoint *endpoint,
unsigned char *scan;
int len;
- scan = idt;
- idt_handle->idt = idt;
-
- scan++; /* Skip version number */
+ scan = idt + 1;
+ idt_handle->names = scan;
while ((scan <= end_of_idt) && *scan) {
while ((scan <= end_of_idt) && *scan++)
@@ -581,6 +568,8 @@ static int xilly_scan_idt(struct xilly_endpoint *endpoint,
count++;
}
+ idt_handle->names_len = scan - idt_handle->names;
+
scan++;
if (scan > end_of_idt) {
@@ -1407,36 +1396,20 @@ static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
static int xillybus_open(struct inode *inode, struct file *filp)
{
- int rc = 0;
+ int rc;
unsigned long flags;
- int minor = iminor(inode);
- int major = imajor(inode);
- struct xilly_endpoint *ep_iter, *endpoint = NULL;
+ struct xilly_endpoint *endpoint;
struct xilly_channel *channel;
+ int index;
- mutex_lock(&ep_list_lock);
-
- list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) {
- if ((ep_iter->major == major) &&
- (minor >= ep_iter->lowest_minor) &&
- (minor < (ep_iter->lowest_minor +
- ep_iter->num_channels))) {
- endpoint = ep_iter;
- break;
- }
- }
- mutex_unlock(&ep_list_lock);
-
- if (!endpoint) {
- pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n",
- major, minor);
- return -ENODEV;
- }
+ rc = xillybus_find_inode(inode, (void **)&endpoint, &index);
+ if (rc)
+ return rc;
if (endpoint->fatal_error)
return -EIO;
- channel = endpoint->channels[1 + minor - endpoint->lowest_minor];
+ channel = endpoint->channels[1 + index];
filp->private_data = channel;
/*
@@ -1799,95 +1772,6 @@ static const struct file_operations xillybus_fops = {
.poll = xillybus_poll,
};
-static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
- const unsigned char *idt)
-{
- int rc;
- dev_t dev;
- int devnum, i, minor, major;
- char devname[48];
- struct device *device;
-
- rc = alloc_chrdev_region(&dev, 0, /* minor start */
- endpoint->num_channels,
- xillyname);
- if (rc) {
- dev_warn(endpoint->dev, "Failed to obtain major/minors");
- return rc;
- }
-
- endpoint->major = major = MAJOR(dev);
- endpoint->lowest_minor = minor = MINOR(dev);
-
- cdev_init(&endpoint->cdev, &xillybus_fops);
- endpoint->cdev.owner = endpoint->ephw->owner;
- rc = cdev_add(&endpoint->cdev, MKDEV(major, minor),
- endpoint->num_channels);
- if (rc) {
- dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n");
- goto unregister_chrdev;
- }
-
- idt++;
-
- for (i = minor, devnum = 0;
- devnum < endpoint->num_channels;
- devnum++, i++) {
- snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt);
-
- devname[sizeof(devname)-1] = 0; /* Should never matter */
-
- while (*idt++)
- /* Skip to next */;
-
- device = device_create(xillybus_class,
- NULL,
- MKDEV(major, i),
- NULL,
- "%s", devname);
-
- if (IS_ERR(device)) {
- dev_warn(endpoint->dev,
- "Failed to create %s device. Aborting.\n",
- devname);
- rc = -ENODEV;
- goto unroll_device_create;
- }
- }
-
- dev_info(endpoint->dev, "Created %d device files.\n",
- endpoint->num_channels);
- return 0; /* succeed */
-
-unroll_device_create:
- devnum--; i--;
- for (; devnum >= 0; devnum--, i--)
- device_destroy(xillybus_class, MKDEV(major, i));
-
- cdev_del(&endpoint->cdev);
-unregister_chrdev:
- unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels);
-
- return rc;
-}
-
-static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint)
-{
- int minor;
-
- for (minor = endpoint->lowest_minor;
- minor < (endpoint->lowest_minor + endpoint->num_channels);
- minor++)
- device_destroy(xillybus_class, MKDEV(endpoint->major, minor));
- cdev_del(&endpoint->cdev);
- unregister_chrdev_region(MKDEV(endpoint->major,
- endpoint->lowest_minor),
- endpoint->num_channels);
-
- dev_info(endpoint->dev, "Removed %d device files.\n",
- endpoint->num_channels);
-}
-
struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
struct device *dev,
struct xilly_endpoint_hardware
@@ -2027,28 +1911,20 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
if (rc)
goto failed_idt;
- /*
- * endpoint is now completely configured. We put it on the list
- * available to open() before registering the char device(s)
- */
-
- mutex_lock(&ep_list_lock);
- list_add_tail(&endpoint->ep_list, &list_of_endpoints);
- mutex_unlock(&ep_list_lock);
+ rc = xillybus_init_chrdev(dev, &xillybus_fops,
+ endpoint->ephw->owner, endpoint,
+ idt_handle.names,
+ idt_handle.names_len,
+ endpoint->num_channels,
+ xillyname, false);
- rc = xillybus_init_chrdev(endpoint, idt_handle.idt);
if (rc)
- goto failed_chrdevs;
+ goto failed_idt;
devres_release_group(dev, bootstrap_resources);
return 0;
-failed_chrdevs:
- mutex_lock(&ep_list_lock);
- list_del(&endpoint->ep_list);
- mutex_unlock(&ep_list_lock);
-
failed_idt:
xilly_quiesce(endpoint);
flush_workqueue(xillybus_wq);
@@ -2059,11 +1935,7 @@ EXPORT_SYMBOL(xillybus_endpoint_discovery);
void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
{
- xillybus_cleanup_chrdev(endpoint);
-
- mutex_lock(&ep_list_lock);
- list_del(&endpoint->ep_list);
- mutex_unlock(&ep_list_lock);
+ xillybus_cleanup_chrdev(endpoint, endpoint->dev);
xilly_quiesce(endpoint);
@@ -2077,17 +1949,9 @@ EXPORT_SYMBOL(xillybus_endpoint_remove);
static int __init xillybus_init(void)
{
- mutex_init(&ep_list_lock);
-
- xillybus_class = class_create(THIS_MODULE, xillyname);
- if (IS_ERR(xillybus_class))
- return PTR_ERR(xillybus_class);
-
xillybus_wq = alloc_workqueue(xillyname, 0, 0);
- if (!xillybus_wq) {
- class_destroy(xillybus_class);
+ if (!xillybus_wq)
return -ENOMEM;
- }
return 0;
}
@@ -2096,8 +1960,6 @@ static void __exit xillybus_exit(void)
{
/* flush_workqueue() was called for each endpoint released */
destroy_workqueue(xillybus_wq);
-
- class_destroy(xillybus_class);
}
module_init(xillybus_init);
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 96b6de8a30e5..1a20b286fd1d 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -17,7 +17,6 @@
MODULE_DESCRIPTION("Xillybus driver for Open Firmware");
MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
-MODULE_VERSION("1.06");
MODULE_ALIAS("xillybus_of");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
index 18b0c392bc93..bdf1c366b4fc 100644
--- a/drivers/char/xillybus/xillybus_pcie.c
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -14,7 +14,6 @@
MODULE_DESCRIPTION("Xillybus driver for PCIe");
MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
-MODULE_VERSION("1.06");
MODULE_ALIAS("xillybus_pcie");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
new file mode 100644
index 000000000000..e7f88f35c702
--- /dev/null
+++ b/drivers/char/xillybus/xillyusb.c
@@ -0,0 +1,2259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the XillyUSB FPGA/host framework.
+ *
+ * This driver interfaces with a special IP core in an FPGA, setting up
+ * a pipe between a hardware FIFO in the programmable logic and a device
+ * file in the host. The number of such pipes and their attributes are
+ * set up on the logic. This driver detects these automatically and
+ * creates the device files accordingly.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+
+#include "xillybus_class.h"
+
+MODULE_DESCRIPTION("Driver for XillyUSB FPGA IP Core");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_ALIAS("xillyusb");
+MODULE_LICENSE("GPL v2");
+
+#define XILLY_RX_TIMEOUT (10 * HZ / 1000)
+#define XILLY_RESPONSE_TIMEOUT (500 * HZ / 1000)
+
+#define BUF_SIZE_ORDER 4
+#define BUFNUM 8
+#define LOG2_IDT_FIFO_SIZE 16
+#define LOG2_INITIAL_FIFO_BUF_SIZE 16
+
+#define MSG_EP_NUM 1
+#define IN_EP_NUM 1
+
+static const char xillyname[] = "xillyusb";
+
+static unsigned int fifo_buf_order;
+
+#define USB_VENDOR_ID_XILINX 0x03fd
+#define USB_VENDOR_ID_ALTERA 0x09fb
+
+#define USB_PRODUCT_ID_XILLYUSB 0xebbe
+
+static const struct usb_device_id xillyusb_table[] = {
+ { USB_DEVICE(USB_VENDOR_ID_XILINX, USB_PRODUCT_ID_XILLYUSB) },
+ { USB_DEVICE(USB_VENDOR_ID_ALTERA, USB_PRODUCT_ID_XILLYUSB) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, xillyusb_table);
+
+struct xillyusb_dev;
+
+struct xillyfifo {
+ unsigned int bufsize; /* In bytes, always a power of 2 */
+ unsigned int bufnum;
+ unsigned int size; /* Lazy: Equals bufsize * bufnum */
+ unsigned int buf_order;
+
+ int fill; /* Number of bytes in the FIFO */
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+
+ unsigned int readpos;
+ unsigned int readbuf;
+ unsigned int writepos;
+ unsigned int writebuf;
+ char **mem;
+};
+
+struct xillyusb_channel;
+
+struct xillyusb_endpoint {
+ struct xillyusb_dev *xdev;
+
+ struct mutex ep_mutex; /* serialize operations on endpoint */
+
+ struct list_head buffers;
+ struct list_head filled_buffers;
+ spinlock_t buffers_lock; /* protect these two lists */
+
+ unsigned int order;
+ unsigned int buffer_size;
+
+ unsigned int fill_mask;
+
+ int outstanding_urbs;
+
+ struct usb_anchor anchor;
+
+ struct xillyfifo fifo;
+
+ struct work_struct workitem;
+
+ bool shutting_down;
+ bool drained;
+ bool wake_on_drain;
+
+ u8 ep_num;
+};
+
+struct xillyusb_channel {
+ struct xillyusb_dev *xdev;
+
+ struct xillyfifo *in_fifo;
+ struct xillyusb_endpoint *out_ep;
+ struct mutex lock; /* protect @out_ep, @in_fifo, bit fields below */
+
+ struct mutex in_mutex; /* serialize fops on FPGA to host stream */
+ struct mutex out_mutex; /* serialize fops on host to FPGA stream */
+ wait_queue_head_t flushq;
+
+ int chan_idx;
+
+ u32 in_consumed_bytes;
+ u32 in_current_checkpoint;
+ u32 out_bytes;
+
+ unsigned int in_log2_element_size;
+ unsigned int out_log2_element_size;
+ unsigned int in_log2_fifo_size;
+ unsigned int out_log2_fifo_size;
+
+ unsigned int read_data_ok; /* EOF not arrived (yet) */
+ unsigned int poll_used;
+ unsigned int flushing;
+ unsigned int flushed;
+ unsigned int canceled;
+
+ /* Bit fields protected by @lock except for initialization */
+ unsigned readable:1;
+ unsigned writable:1;
+ unsigned open_for_read:1;
+ unsigned open_for_write:1;
+ unsigned in_synchronous:1;
+ unsigned out_synchronous:1;
+ unsigned in_seekable:1;
+ unsigned out_seekable:1;
+};
+
+struct xillybuffer {
+ struct list_head entry;
+ struct xillyusb_endpoint *ep;
+ void *buf;
+ unsigned int len;
+};
+
+struct xillyusb_dev {
+ struct xillyusb_channel *channels;
+
+ struct usb_device *udev;
+ struct device *dev; /* For dev_err() and such */
+ struct kref kref;
+ struct workqueue_struct *workq;
+
+ int error;
+ spinlock_t error_lock; /* protect @error */
+ struct work_struct wakeup_workitem;
+
+ int num_channels;
+
+ struct xillyusb_endpoint *msg_ep;
+ struct xillyusb_endpoint *in_ep;
+
+ struct mutex msg_mutex; /* serialize opcode transmission */
+ int in_bytes_left;
+ int leftover_chan_num;
+ unsigned int in_counter;
+ struct mutex process_in_mutex; /* synchronize wakeup_all() */
+};
+
+/* FPGA to host opcodes */
+enum {
+ OPCODE_DATA = 0,
+ OPCODE_QUIESCE_ACK = 1,
+ OPCODE_EOF = 2,
+ OPCODE_REACHED_CHECKPOINT = 3,
+ OPCODE_CANCELED_CHECKPOINT = 4,
+};
+
+/* Host to FPGA opcodes */
+enum {
+ OPCODE_QUIESCE = 0,
+ OPCODE_REQ_IDT = 1,
+ OPCODE_SET_CHECKPOINT = 2,
+ OPCODE_CLOSE = 3,
+ OPCODE_SET_PUSH = 4,
+ OPCODE_UPDATE_PUSH = 5,
+ OPCODE_CANCEL_CHECKPOINT = 6,
+ OPCODE_SET_ADDR = 7,
+};
+
+/*
+ * fifo_write() and fifo_read() are NOT reentrant (i.e. concurrent multiple
+ * calls to each on the same FIFO is not allowed) however it's OK to have
+ * threads calling each of the two functions once on the same FIFO, and
+ * at the same time.
+ */
+
+static int fifo_write(struct xillyfifo *fifo,
+ const void *data, unsigned int len,
+ int (*copier)(void *, const void *, int))
+{
+ unsigned int done = 0;
+ unsigned int todo = len;
+ unsigned int nmax;
+ unsigned int writepos = fifo->writepos;
+ unsigned int writebuf = fifo->writebuf;
+ unsigned long flags;
+ int rc;
+
+ nmax = fifo->size - READ_ONCE(fifo->fill);
+
+ while (1) {
+ unsigned int nrail = fifo->bufsize - writepos;
+ unsigned int n = min(todo, nmax);
+
+ if (n == 0) {
+ spin_lock_irqsave(&fifo->lock, flags);
+ fifo->fill += done;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ fifo->writepos = writepos;
+ fifo->writebuf = writebuf;
+
+ return done;
+ }
+
+ if (n > nrail)
+ n = nrail;
+
+ rc = (*copier)(fifo->mem[writebuf] + writepos, data + done, n);
+
+ if (rc)
+ return rc;
+
+ done += n;
+ todo -= n;
+
+ writepos += n;
+ nmax -= n;
+
+ if (writepos == fifo->bufsize) {
+ writepos = 0;
+ writebuf++;
+
+ if (writebuf == fifo->bufnum)
+ writebuf = 0;
+ }
+ }
+}
+
+static int fifo_read(struct xillyfifo *fifo,
+ void *data, unsigned int len,
+ int (*copier)(void *, const void *, int))
+{
+ unsigned int done = 0;
+ unsigned int todo = len;
+ unsigned int fill;
+ unsigned int readpos = fifo->readpos;
+ unsigned int readbuf = fifo->readbuf;
+ unsigned long flags;
+ int rc;
+
+ /*
+ * The spinlock here is necessary, because otherwise fifo->fill
+ * could have been increased by fifo_write() after writing data
+ * to the buffer, but this data would potentially not have been
+ * visible on this thread at the time the updated fifo->fill was.
+ * That could lead to reading invalid data.
+ */
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ fill = fifo->fill;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ while (1) {
+ unsigned int nrail = fifo->bufsize - readpos;
+ unsigned int n = min(todo, fill);
+
+ if (n == 0) {
+ spin_lock_irqsave(&fifo->lock, flags);
+ fifo->fill -= done;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ fifo->readpos = readpos;
+ fifo->readbuf = readbuf;
+
+ return done;
+ }
+
+ if (n > nrail)
+ n = nrail;
+
+ rc = (*copier)(data + done, fifo->mem[readbuf] + readpos, n);
+
+ if (rc)
+ return rc;
+
+ done += n;
+ todo -= n;
+
+ readpos += n;
+ fill -= n;
+
+ if (readpos == fifo->bufsize) {
+ readpos = 0;
+ readbuf++;
+
+ if (readbuf == fifo->bufnum)
+ readbuf = 0;
+ }
+ }
+}
+
+/*
+ * These three wrapper functions are used as the @copier argument to
+ * fifo_write() and fifo_read(), so that they can work directly with
+ * user memory as well.
+ */
+
+static int xilly_copy_from_user(void *dst, const void *src, int n)
+{
+ if (copy_from_user(dst, (const void __user *)src, n))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xilly_copy_to_user(void *dst, const void *src, int n)
+{
+ if (copy_to_user((void __user *)dst, src, n))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xilly_memcpy(void *dst, const void *src, int n)
+{
+ memcpy(dst, src, n);
+
+ return 0;
+}
+
+static int fifo_init(struct xillyfifo *fifo,
+ unsigned int log2_size)
+{
+ unsigned int log2_bufnum;
+ unsigned int buf_order;
+ int i;
+
+ unsigned int log2_fifo_buf_size;
+
+retry:
+ log2_fifo_buf_size = fifo_buf_order + PAGE_SHIFT;
+
+ if (log2_size > log2_fifo_buf_size) {
+ log2_bufnum = log2_size - log2_fifo_buf_size;
+ buf_order = fifo_buf_order;
+ fifo->bufsize = 1 << log2_fifo_buf_size;
+ } else {
+ log2_bufnum = 0;
+ buf_order = (log2_size > PAGE_SHIFT) ?
+ log2_size - PAGE_SHIFT : 0;
+ fifo->bufsize = 1 << log2_size;
+ }
+
+ fifo->bufnum = 1 << log2_bufnum;
+ fifo->size = fifo->bufnum * fifo->bufsize;
+ fifo->buf_order = buf_order;
+
+ fifo->mem = kmalloc_array(fifo->bufnum, sizeof(void *), GFP_KERNEL);
+
+ if (!fifo->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < fifo->bufnum; i++) {
+ fifo->mem[i] = (void *)
+ __get_free_pages(GFP_KERNEL, buf_order);
+
+ if (!fifo->mem[i])
+ goto memfail;
+ }
+
+ fifo->fill = 0;
+ fifo->readpos = 0;
+ fifo->readbuf = 0;
+ fifo->writepos = 0;
+ fifo->writebuf = 0;
+ spin_lock_init(&fifo->lock);
+ init_waitqueue_head(&fifo->waitq);
+ return 0;
+
+memfail:
+ for (i--; i >= 0; i--)
+ free_pages((unsigned long)fifo->mem[i], buf_order);
+
+ kfree(fifo->mem);
+ fifo->mem = NULL;
+
+ if (fifo_buf_order) {
+ fifo_buf_order--;
+ goto retry;
+ } else {
+ return -ENOMEM;
+ }
+}
+
+static void fifo_mem_release(struct xillyfifo *fifo)
+{
+ int i;
+
+ if (!fifo->mem)
+ return;
+
+ for (i = 0; i < fifo->bufnum; i++)
+ free_pages((unsigned long)fifo->mem[i], fifo->buf_order);
+
+ kfree(fifo->mem);
+}
+
+/*
+ * When endpoint_quiesce() returns, the endpoint has no URBs submitted,
+ * won't accept any new URB submissions, and its related work item doesn't
+ * and won't run anymore.
+ */
+
+static void endpoint_quiesce(struct xillyusb_endpoint *ep)
+{
+ mutex_lock(&ep->ep_mutex);
+ ep->shutting_down = true;
+ mutex_unlock(&ep->ep_mutex);
+
+ usb_kill_anchored_urbs(&ep->anchor);
+ cancel_work_sync(&ep->workitem);
+}
+
+/*
+ * Note that endpoint_dealloc() also frees fifo memory (if allocated), even
+ * though endpoint_alloc doesn't allocate that memory.
+ */
+
+static void endpoint_dealloc(struct xillyusb_endpoint *ep)
+{
+ struct list_head *this, *next;
+
+ fifo_mem_release(&ep->fifo);
+
+ /* Join @filled_buffers with @buffers to free these entries too */
+ list_splice(&ep->filled_buffers, &ep->buffers);
+
+ list_for_each_safe(this, next, &ep->buffers) {
+ struct xillybuffer *xb =
+ list_entry(this, struct xillybuffer, entry);
+
+ free_pages((unsigned long)xb->buf, ep->order);
+ kfree(xb);
+ }
+
+ kfree(ep);
+}
+
+static struct xillyusb_endpoint
+*endpoint_alloc(struct xillyusb_dev *xdev,
+ u8 ep_num,
+ void (*work)(struct work_struct *),
+ unsigned int order,
+ int bufnum)
+{
+ int i;
+
+ struct xillyusb_endpoint *ep;
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+
+ if (!ep)
+ return NULL;
+
+ INIT_LIST_HEAD(&ep->buffers);
+ INIT_LIST_HEAD(&ep->filled_buffers);
+
+ spin_lock_init(&ep->buffers_lock);
+ mutex_init(&ep->ep_mutex);
+
+ init_usb_anchor(&ep->anchor);
+ INIT_WORK(&ep->workitem, work);
+
+ ep->order = order;
+ ep->buffer_size = 1 << (PAGE_SHIFT + order);
+ ep->outstanding_urbs = 0;
+ ep->drained = true;
+ ep->wake_on_drain = false;
+ ep->xdev = xdev;
+ ep->ep_num = ep_num;
+ ep->shutting_down = false;
+
+ for (i = 0; i < bufnum; i++) {
+ struct xillybuffer *xb;
+ unsigned long addr;
+
+ xb = kzalloc(sizeof(*xb), GFP_KERNEL);
+
+ if (!xb) {
+ endpoint_dealloc(ep);
+ return NULL;
+ }
+
+ addr = __get_free_pages(GFP_KERNEL, order);
+
+ if (!addr) {
+ kfree(xb);
+ endpoint_dealloc(ep);
+ return NULL;
+ }
+
+ xb->buf = (void *)addr;
+ xb->ep = ep;
+ list_add_tail(&xb->entry, &ep->buffers);
+ }
+ return ep;
+}
+
+static void cleanup_dev(struct kref *kref)
+{
+ struct xillyusb_dev *xdev =
+ container_of(kref, struct xillyusb_dev, kref);
+
+ if (xdev->in_ep)
+ endpoint_dealloc(xdev->in_ep);
+
+ if (xdev->msg_ep)
+ endpoint_dealloc(xdev->msg_ep);
+
+ if (xdev->workq)
+ destroy_workqueue(xdev->workq);
+
+ kfree(xdev->channels); /* Argument may be NULL, and that's fine */
+ kfree(xdev);
+}
+
+/*
+ * @process_in_mutex is taken to ensure that bulk_in_work() won't call
+ * process_bulk_in() after wakeup_all()'s execution: The latter zeroes all
+ * @read_data_ok entries, which will make process_bulk_in() report false
+ * errors if executed. The mechanism relies on that xdev->error is assigned
+ * a non-zero value by report_io_error() prior to queueing wakeup_all(),
+ * which prevents bulk_in_work() from calling process_bulk_in().
+ *
+ * The fact that wakeup_all() and bulk_in_work() are queued on the same
+ * workqueue makes their concurrent execution very unlikely, however the
+ * kernel's API doesn't seem to ensure this strictly.
+ */
+
+static void wakeup_all(struct work_struct *work)
+{
+ int i;
+ struct xillyusb_dev *xdev = container_of(work, struct xillyusb_dev,
+ wakeup_workitem);
+
+ mutex_lock(&xdev->process_in_mutex);
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ struct xillyusb_channel *chan = &xdev->channels[i];
+
+ mutex_lock(&chan->lock);
+
+ if (chan->in_fifo) {
+ /*
+ * Fake an EOF: Even if such arrives, it won't be
+ * processed.
+ */
+ chan->read_data_ok = 0;
+ wake_up_interruptible(&chan->in_fifo->waitq);
+ }
+
+ if (chan->out_ep)
+ wake_up_interruptible(&chan->out_ep->fifo.waitq);
+
+ mutex_unlock(&chan->lock);
+
+ wake_up_interruptible(&chan->flushq);
+ }
+
+ mutex_unlock(&xdev->process_in_mutex);
+
+ wake_up_interruptible(&xdev->msg_ep->fifo.waitq);
+
+ kref_put(&xdev->kref, cleanup_dev);
+}
+
+static void report_io_error(struct xillyusb_dev *xdev,
+ int errcode)
+{
+ unsigned long flags;
+ bool do_once = false;
+
+ spin_lock_irqsave(&xdev->error_lock, flags);
+ if (!xdev->error) {
+ xdev->error = errcode;
+ do_once = true;
+ }
+ spin_unlock_irqrestore(&xdev->error_lock, flags);
+
+ if (do_once) {
+ kref_get(&xdev->kref); /* xdev is used by work item */
+ queue_work(xdev->workq, &xdev->wakeup_workitem);
+ }
+}
+
+/*
+ * safely_assign_in_fifo() changes the value of chan->in_fifo and ensures
+ * the previous pointer is never used after its return.
+ */
+
+static void safely_assign_in_fifo(struct xillyusb_channel *chan,
+ struct xillyfifo *fifo)
+{
+ mutex_lock(&chan->lock);
+ chan->in_fifo = fifo;
+ mutex_unlock(&chan->lock);
+
+ flush_work(&chan->xdev->in_ep->workitem);
+}
+
+static void bulk_in_completer(struct urb *urb)
+{
+ struct xillybuffer *xb = urb->context;
+ struct xillyusb_endpoint *ep = xb->ep;
+ unsigned long flags;
+
+ if (urb->status) {
+ if (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ report_io_error(ep->xdev, -EIO);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ return;
+ }
+
+ xb->len = urb->actual_length;
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->filled_buffers);
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ if (!ep->shutting_down)
+ queue_work(ep->xdev->workq, &ep->workitem);
+}
+
+static void bulk_out_completer(struct urb *urb)
+{
+ struct xillybuffer *xb = urb->context;
+ struct xillyusb_endpoint *ep = xb->ep;
+ unsigned long flags;
+
+ if (urb->status &&
+ (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN)))
+ report_io_error(ep->xdev, -EIO);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ if (!ep->shutting_down)
+ queue_work(ep->xdev->workq, &ep->workitem);
+}
+
+static void try_queue_bulk_in(struct xillyusb_endpoint *ep)
+{
+ struct xillyusb_dev *xdev = ep->xdev;
+ struct xillybuffer *xb;
+ struct urb *urb;
+
+ int rc;
+ unsigned long flags;
+ unsigned int bufsize = ep->buffer_size;
+
+ mutex_lock(&ep->ep_mutex);
+
+ if (ep->shutting_down || xdev->error)
+ goto done;
+
+ while (1) {
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+
+ if (list_empty(&ep->buffers)) {
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ goto done;
+ }
+
+ xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
+ list_del(&xb->entry);
+ ep->outstanding_urbs++;
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ report_io_error(xdev, -ENOMEM);
+ goto relist;
+ }
+
+ usb_fill_bulk_urb(urb, xdev->udev,
+ usb_rcvbulkpipe(xdev->udev, ep->ep_num),
+ xb->buf, bufsize, bulk_in_completer, xb);
+
+ usb_anchor_urb(urb, &ep->anchor);
+
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+
+ if (rc) {
+ report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
+ -EIO);
+ goto unanchor;
+ }
+
+ usb_free_urb(urb); /* This just decrements reference count */
+ }
+
+unanchor:
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+
+relist:
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+done:
+ mutex_unlock(&ep->ep_mutex);
+}
+
+static void try_queue_bulk_out(struct xillyusb_endpoint *ep)
+{
+ struct xillyfifo *fifo = &ep->fifo;
+ struct xillyusb_dev *xdev = ep->xdev;
+ struct xillybuffer *xb;
+ struct urb *urb;
+
+ int rc;
+ unsigned int fill;
+ unsigned long flags;
+ bool do_wake = false;
+
+ mutex_lock(&ep->ep_mutex);
+
+ if (ep->shutting_down || xdev->error)
+ goto done;
+
+ fill = READ_ONCE(fifo->fill) & ep->fill_mask;
+
+ while (1) {
+ int count;
+ unsigned int max_read;
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+
+ /*
+ * Race conditions might have the FIFO filled while the
+ * endpoint is marked as drained here. That doesn't matter,
+ * because the sole purpose of @drained is to ensure that
+ * certain data has been sent on the USB channel before
+ * shutting it down. Hence knowing that the FIFO appears
+ * to be empty with no outstanding URBs at some moment
+ * is good enough.
+ */
+
+ if (!fill) {
+ ep->drained = !ep->outstanding_urbs;
+ if (ep->drained && ep->wake_on_drain)
+ do_wake = true;
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ goto done;
+ }
+
+ ep->drained = false;
+
+ if ((fill < ep->buffer_size && ep->outstanding_urbs) ||
+ list_empty(&ep->buffers)) {
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ goto done;
+ }
+
+ xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
+ list_del(&xb->entry);
+ ep->outstanding_urbs++;
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ max_read = min(fill, ep->buffer_size);
+
+ count = fifo_read(&ep->fifo, xb->buf, max_read, xilly_memcpy);
+
+ /*
+ * xilly_memcpy always returns 0 => fifo_read can't fail =>
+ * count > 0
+ */
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ report_io_error(xdev, -ENOMEM);
+ goto relist;
+ }
+
+ usb_fill_bulk_urb(urb, xdev->udev,
+ usb_sndbulkpipe(xdev->udev, ep->ep_num),
+ xb->buf, count, bulk_out_completer, xb);
+
+ usb_anchor_urb(urb, &ep->anchor);
+
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+
+ if (rc) {
+ report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
+ -EIO);
+ goto unanchor;
+ }
+
+ usb_free_urb(urb); /* This just decrements reference count */
+
+ fill -= count;
+ do_wake = true;
+ }
+
+unanchor:
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+
+relist:
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+done:
+ mutex_unlock(&ep->ep_mutex);
+
+ if (do_wake)
+ wake_up_interruptible(&fifo->waitq);
+}
+
+static void bulk_out_work(struct work_struct *work)
+{
+ struct xillyusb_endpoint *ep = container_of(work,
+ struct xillyusb_endpoint,
+ workitem);
+ try_queue_bulk_out(ep);
+}
+
+static int process_in_opcode(struct xillyusb_dev *xdev,
+ int opcode,
+ int chan_num)
+{
+ struct xillyusb_channel *chan;
+ struct device *dev = xdev->dev;
+ int chan_idx = chan_num >> 1;
+
+ if (chan_idx >= xdev->num_channels) {
+ dev_err(dev, "Received illegal channel ID %d from FPGA\n",
+ chan_num);
+ return -EIO;
+ }
+
+ chan = &xdev->channels[chan_idx];
+
+ switch (opcode) {
+ case OPCODE_EOF:
+ if (!chan->read_data_ok) {
+ dev_err(dev, "Received unexpected EOF for channel %d\n",
+ chan_num);
+ return -EIO;
+ }
+
+ /*
+ * A write memory barrier ensures that the FIFO's fill level
+ * is visible before read_data_ok turns zero, so the data in
+ * the FIFO isn't missed by the consumer.
+ */
+ smp_wmb();
+ WRITE_ONCE(chan->read_data_ok, 0);
+ wake_up_interruptible(&chan->in_fifo->waitq);
+ break;
+
+ case OPCODE_REACHED_CHECKPOINT:
+ chan->flushing = 0;
+ wake_up_interruptible(&chan->flushq);
+ break;
+
+ case OPCODE_CANCELED_CHECKPOINT:
+ chan->canceled = 1;
+ wake_up_interruptible(&chan->flushq);
+ break;
+
+ default:
+ dev_err(dev, "Received illegal opcode %d from FPGA\n",
+ opcode);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int process_bulk_in(struct xillybuffer *xb)
+{
+ struct xillyusb_endpoint *ep = xb->ep;
+ struct xillyusb_dev *xdev = ep->xdev;
+ struct device *dev = xdev->dev;
+ int dws = xb->len >> 2;
+ __le32 *p = xb->buf;
+ u32 ctrlword;
+ struct xillyusb_channel *chan;
+ struct xillyfifo *fifo;
+ int chan_num = 0, opcode;
+ int chan_idx;
+ int bytes, count, dwconsume;
+ int in_bytes_left = 0;
+ int rc;
+
+ if ((dws << 2) != xb->len) {
+ dev_err(dev, "Received BULK IN transfer with %d bytes, not a multiple of 4\n",
+ xb->len);
+ return -EIO;
+ }
+
+ if (xdev->in_bytes_left) {
+ bytes = min(xdev->in_bytes_left, dws << 2);
+ in_bytes_left = xdev->in_bytes_left - bytes;
+ chan_num = xdev->leftover_chan_num;
+ goto resume_leftovers;
+ }
+
+ while (dws) {
+ ctrlword = le32_to_cpu(*p++);
+ dws--;
+
+ chan_num = ctrlword & 0xfff;
+ count = (ctrlword >> 12) & 0x3ff;
+ opcode = (ctrlword >> 24) & 0xf;
+
+ if (opcode != OPCODE_DATA) {
+ unsigned int in_counter = xdev->in_counter++ & 0x3ff;
+
+ if (count != in_counter) {
+ dev_err(dev, "Expected opcode counter %d, got %d\n",
+ in_counter, count);
+ return -EIO;
+ }
+
+ rc = process_in_opcode(xdev, opcode, chan_num);
+
+ if (rc)
+ return rc;
+
+ continue;
+ }
+
+ bytes = min(count + 1, dws << 2);
+ in_bytes_left = count + 1 - bytes;
+
+resume_leftovers:
+ chan_idx = chan_num >> 1;
+
+ if (!(chan_num & 1) || chan_idx >= xdev->num_channels ||
+ !xdev->channels[chan_idx].read_data_ok) {
+ dev_err(dev, "Received illegal channel ID %d from FPGA\n",
+ chan_num);
+ return -EIO;
+ }
+ chan = &xdev->channels[chan_idx];
+
+ fifo = chan->in_fifo;
+
+ if (unlikely(!fifo))
+ return -EIO; /* We got really unexpected data */
+
+ if (bytes != fifo_write(fifo, p, bytes, xilly_memcpy)) {
+ dev_err(dev, "Misbehaving FPGA overflowed an upstream FIFO!\n");
+ return -EIO;
+ }
+
+ wake_up_interruptible(&fifo->waitq);
+
+ dwconsume = (bytes + 3) >> 2;
+ dws -= dwconsume;
+ p += dwconsume;
+ }
+
+ xdev->in_bytes_left = in_bytes_left;
+ xdev->leftover_chan_num = chan_num;
+ return 0;
+}
+
+static void bulk_in_work(struct work_struct *work)
+{
+ struct xillyusb_endpoint *ep =
+ container_of(work, struct xillyusb_endpoint, workitem);
+ struct xillyusb_dev *xdev = ep->xdev;
+ unsigned long flags;
+ struct xillybuffer *xb;
+ bool consumed = false;
+ int rc = 0;
+
+ mutex_lock(&xdev->process_in_mutex);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+
+ while (1) {
+ if (rc || list_empty(&ep->filled_buffers)) {
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ mutex_unlock(&xdev->process_in_mutex);
+
+ if (rc)
+ report_io_error(xdev, rc);
+ else if (consumed)
+ try_queue_bulk_in(ep);
+
+ return;
+ }
+
+ xb = list_first_entry(&ep->filled_buffers, struct xillybuffer,
+ entry);
+ list_del(&xb->entry);
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ consumed = true;
+
+ if (!xdev->error)
+ rc = process_bulk_in(xb);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ }
+}
+
+static int xillyusb_send_opcode(struct xillyusb_dev *xdev,
+ int chan_num, char opcode, u32 data)
+{
+ struct xillyusb_endpoint *ep = xdev->msg_ep;
+ struct xillyfifo *fifo = &ep->fifo;
+ __le32 msg[2];
+
+ int rc = 0;
+
+ msg[0] = cpu_to_le32((chan_num & 0xfff) |
+ ((opcode & 0xf) << 24));
+ msg[1] = cpu_to_le32(data);
+
+ mutex_lock(&xdev->msg_mutex);
+
+ /*
+ * The wait queue is woken with the interruptible variant, so the
+ * wait function matches, however returning because of an interrupt
+ * will mess things up considerably, in particular when the caller is
+ * the release method. And the xdev->error part prevents being stuck
+ * forever in the event of a bizarre hardware bug: Pull the USB plug.
+ */
+
+ while (wait_event_interruptible(fifo->waitq,
+ fifo->fill <= (fifo->size - 8) ||
+ xdev->error))
+ ; /* Empty loop */
+
+ if (xdev->error) {
+ rc = xdev->error;
+ goto unlock_done;
+ }
+
+ fifo_write(fifo, (void *)msg, 8, xilly_memcpy);
+
+ try_queue_bulk_out(ep);
+
+unlock_done:
+ mutex_unlock(&xdev->msg_mutex);
+
+ return rc;
+}
+
+/*
+ * Note that flush_downstream() merely waits for the data to arrive to
+ * the application logic at the FPGA -- unlike PCIe Xillybus' counterpart,
+ * it does nothing to make it happen (and neither is it necessary).
+ *
+ * This function is not reentrant for the same @chan, but this is covered
+ * by the fact that for any given @chan, it's called either by the open,
+ * write, llseek and flush fops methods, which can't run in parallel (and the
+ * write + flush and llseek method handlers are protected with out_mutex).
+ *
+ * chan->flushed is there to avoid multiple flushes at the same position,
+ * in particular as a result of programs that close the file descriptor
+ * e.g. after a dup2() for redirection.
+ */
+
+static int flush_downstream(struct xillyusb_channel *chan,
+ long timeout,
+ bool interruptible)
+{
+ struct xillyusb_dev *xdev = chan->xdev;
+ int chan_num = chan->chan_idx << 1;
+ long deadline, left_to_sleep;
+ int rc;
+
+ if (chan->flushed)
+ return 0;
+
+ deadline = jiffies + 1 + timeout;
+
+ if (chan->flushing) {
+ long cancel_deadline = jiffies + 1 + XILLY_RESPONSE_TIMEOUT;
+
+ chan->canceled = 0;
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_CANCEL_CHECKPOINT, 0);
+
+ if (rc)
+ return rc; /* Only real error, never -EINTR */
+
+ /* Ignoring interrupts. Cancellation must be handled */
+ while (!chan->canceled) {
+ left_to_sleep = cancel_deadline - ((long)jiffies);
+
+ if (left_to_sleep <= 0) {
+ report_io_error(xdev, -EIO);
+ return -EIO;
+ }
+
+ rc = wait_event_interruptible_timeout(chan->flushq,
+ chan->canceled ||
+ xdev->error,
+ left_to_sleep);
+
+ if (xdev->error)
+ return xdev->error;
+ }
+ }
+
+ chan->flushing = 1;
+
+ /*
+ * The checkpoint is given in terms of data elements, not bytes. As
+ * a result, if less than an element's worth of data is stored in the
+ * FIFO, it's not flushed, including the flush before closing, which
+ * means that such data is lost. This is consistent with PCIe Xillybus.
+ */
+
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_SET_CHECKPOINT,
+ chan->out_bytes >>
+ chan->out_log2_element_size);
+
+ if (rc)
+ return rc; /* Only real error, never -EINTR */
+
+ if (!timeout) {
+ while (chan->flushing) {
+ rc = wait_event_interruptible(chan->flushq,
+ !chan->flushing ||
+ xdev->error);
+ if (xdev->error)
+ return xdev->error;
+
+ if (interruptible && rc)
+ return -EINTR;
+ }
+
+ goto done;
+ }
+
+ while (chan->flushing) {
+ left_to_sleep = deadline - ((long)jiffies);
+
+ if (left_to_sleep <= 0)
+ return -ETIMEDOUT;
+
+ rc = wait_event_interruptible_timeout(chan->flushq,
+ !chan->flushing ||
+ xdev->error,
+ left_to_sleep);
+
+ if (xdev->error)
+ return xdev->error;
+
+ if (interruptible && rc < 0)
+ return -EINTR;
+ }
+
+done:
+ chan->flushed = 1;
+ return 0;
+}
+
+/* request_read_anything(): Ask the FPGA for any little amount of data */
+static int request_read_anything(struct xillyusb_channel *chan,
+ char opcode)
+{
+ struct xillyusb_dev *xdev = chan->xdev;
+ unsigned int sh = chan->in_log2_element_size;
+ int chan_num = (chan->chan_idx << 1) | 1;
+ u32 mercy = chan->in_consumed_bytes + (2 << sh) - 1;
+
+ return xillyusb_send_opcode(xdev, chan_num, opcode, mercy >> sh);
+}
+
+static int xillyusb_open(struct inode *inode, struct file *filp)
+{
+ struct xillyusb_dev *xdev;
+ struct xillyusb_channel *chan;
+ struct xillyfifo *in_fifo = NULL;
+ struct xillyusb_endpoint *out_ep = NULL;
+ int rc;
+ int index;
+
+ rc = xillybus_find_inode(inode, (void **)&xdev, &index);
+ if (rc)
+ return rc;
+
+ chan = &xdev->channels[index];
+ filp->private_data = chan;
+
+ mutex_lock(&chan->lock);
+
+ rc = -ENODEV;
+
+ if (xdev->error)
+ goto unmutex_fail;
+
+ if (((filp->f_mode & FMODE_READ) && !chan->readable) ||
+ ((filp->f_mode & FMODE_WRITE) && !chan->writable))
+ goto unmutex_fail;
+
+ if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_READ) &&
+ chan->in_synchronous) {
+ dev_err(xdev->dev,
+ "open() failed: O_NONBLOCK not allowed for read on this device\n");
+ goto unmutex_fail;
+ }
+
+ if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_WRITE) &&
+ chan->out_synchronous) {
+ dev_err(xdev->dev,
+ "open() failed: O_NONBLOCK not allowed for write on this device\n");
+ goto unmutex_fail;
+ }
+
+ rc = -EBUSY;
+
+ if (((filp->f_mode & FMODE_READ) && chan->open_for_read) ||
+ ((filp->f_mode & FMODE_WRITE) && chan->open_for_write))
+ goto unmutex_fail;
+
+ kref_get(&xdev->kref);
+
+ if (filp->f_mode & FMODE_READ)
+ chan->open_for_read = 1;
+
+ if (filp->f_mode & FMODE_WRITE)
+ chan->open_for_write = 1;
+
+ mutex_unlock(&chan->lock);
+
+ if (filp->f_mode & FMODE_WRITE) {
+ out_ep = endpoint_alloc(xdev,
+ (chan->chan_idx + 2) | USB_DIR_OUT,
+ bulk_out_work, BUF_SIZE_ORDER, BUFNUM);
+
+ if (!out_ep) {
+ rc = -ENOMEM;
+ goto unopen;
+ }
+
+ rc = fifo_init(&out_ep->fifo, chan->out_log2_fifo_size);
+
+ if (rc)
+ goto late_unopen;
+
+ out_ep->fill_mask = -(1 << chan->out_log2_element_size);
+ chan->out_bytes = 0;
+ chan->flushed = 0;
+
+ /*
+ * Sending a flush request to a previously closed stream
+ * effectively opens it, and also waits until the command is
+ * confirmed by the FPGA. The latter is necessary because the
+ * data is sent through a separate BULK OUT endpoint, and the
+ * xHCI controller is free to reorder transmissions.
+ *
+ * This can't go wrong unless there's a serious hardware error
+ * (or the computer is stuck for 500 ms?)
+ */
+ rc = flush_downstream(chan, XILLY_RESPONSE_TIMEOUT, false);
+
+ if (rc == -ETIMEDOUT) {
+ rc = -EIO;
+ report_io_error(xdev, rc);
+ }
+
+ if (rc)
+ goto late_unopen;
+ }
+
+ if (filp->f_mode & FMODE_READ) {
+ in_fifo = kzalloc(sizeof(*in_fifo), GFP_KERNEL);
+
+ if (!in_fifo) {
+ rc = -ENOMEM;
+ goto late_unopen;
+ }
+
+ rc = fifo_init(in_fifo, chan->in_log2_fifo_size);
+
+ if (rc) {
+ kfree(in_fifo);
+ goto late_unopen;
+ }
+ }
+
+ mutex_lock(&chan->lock);
+ if (in_fifo) {
+ chan->in_fifo = in_fifo;
+ chan->read_data_ok = 1;
+ }
+ if (out_ep)
+ chan->out_ep = out_ep;
+ mutex_unlock(&chan->lock);
+
+ if (in_fifo) {
+ u32 in_checkpoint = 0;
+
+ if (!chan->in_synchronous)
+ in_checkpoint = in_fifo->size >>
+ chan->in_log2_element_size;
+
+ chan->in_consumed_bytes = 0;
+ chan->poll_used = 0;
+ chan->in_current_checkpoint = in_checkpoint;
+ rc = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
+ OPCODE_SET_CHECKPOINT,
+ in_checkpoint);
+
+ if (rc) /* Failure guarantees that opcode wasn't sent */
+ goto unfifo;
+
+ /*
+ * In non-blocking mode, request the FPGA to send any data it
+ * has right away. Otherwise, the first read() will always
+ * return -EAGAIN, which is OK strictly speaking, but ugly.
+ * Checking and unrolling if this fails isn't worth the
+ * effort -- the error is propagated to the first read()
+ * anyhow.
+ */
+ if (filp->f_flags & O_NONBLOCK)
+ request_read_anything(chan, OPCODE_SET_PUSH);
+ }
+
+ return 0;
+
+unfifo:
+ chan->read_data_ok = 0;
+ safely_assign_in_fifo(chan, NULL);
+ fifo_mem_release(in_fifo);
+ kfree(in_fifo);
+
+ if (out_ep) {
+ mutex_lock(&chan->lock);
+ chan->out_ep = NULL;
+ mutex_unlock(&chan->lock);
+ }
+
+late_unopen:
+ if (out_ep)
+ endpoint_dealloc(out_ep);
+
+unopen:
+ mutex_lock(&chan->lock);
+
+ if (filp->f_mode & FMODE_READ)
+ chan->open_for_read = 0;
+
+ if (filp->f_mode & FMODE_WRITE)
+ chan->open_for_write = 0;
+
+ mutex_unlock(&chan->lock);
+
+ kref_put(&xdev->kref, cleanup_dev);
+
+ return rc;
+
+unmutex_fail:
+ mutex_unlock(&chan->lock);
+ return rc;
+}
+
+static ssize_t xillyusb_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ struct xillyfifo *fifo = chan->in_fifo;
+ int chan_num = (chan->chan_idx << 1) | 1;
+
+ long deadline, left_to_sleep;
+ int bytes_done = 0;
+ bool sent_set_push = false;
+ int rc;
+
+ deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
+
+ rc = mutex_lock_interruptible(&chan->in_mutex);
+
+ if (rc)
+ return rc;
+
+ while (1) {
+ u32 fifo_checkpoint_bytes, complete_checkpoint_bytes;
+ u32 complete_checkpoint, fifo_checkpoint;
+ u32 checkpoint;
+ s32 diff, leap;
+ unsigned int sh = chan->in_log2_element_size;
+ bool checkpoint_for_complete;
+
+ rc = fifo_read(fifo, (__force void *)userbuf + bytes_done,
+ count - bytes_done, xilly_copy_to_user);
+
+ if (rc < 0)
+ break;
+
+ bytes_done += rc;
+ chan->in_consumed_bytes += rc;
+
+ left_to_sleep = deadline - ((long)jiffies);
+
+ /*
+ * Some 32-bit arithmetic that may wrap. Note that
+ * complete_checkpoint is rounded up to the closest element
+ * boundary, because the read() can't be completed otherwise.
+ * fifo_checkpoint_bytes is rounded down, because it protects
+ * in_fifo from overflowing.
+ */
+
+ fifo_checkpoint_bytes = chan->in_consumed_bytes + fifo->size;
+ complete_checkpoint_bytes =
+ chan->in_consumed_bytes + count - bytes_done;
+
+ fifo_checkpoint = fifo_checkpoint_bytes >> sh;
+ complete_checkpoint =
+ (complete_checkpoint_bytes + (1 << sh) - 1) >> sh;
+
+ diff = (fifo_checkpoint - complete_checkpoint) << sh;
+
+ if (chan->in_synchronous && diff >= 0) {
+ checkpoint = complete_checkpoint;
+ checkpoint_for_complete = true;
+ } else {
+ checkpoint = fifo_checkpoint;
+ checkpoint_for_complete = false;
+ }
+
+ leap = (checkpoint - chan->in_current_checkpoint) << sh;
+
+ /*
+ * To prevent flooding of OPCODE_SET_CHECKPOINT commands as
+ * data is consumed, it's issued only if it moves the
+ * checkpoint by at least an 8th of the FIFO's size, or if
+ * it's necessary to complete the number of bytes requested by
+ * the read() call.
+ *
+ * chan->read_data_ok is checked to spare an unnecessary
+ * submission after receiving EOF, however it's harmless if
+ * such slips away.
+ */
+
+ if (chan->read_data_ok &&
+ (leap > (fifo->size >> 3) ||
+ (checkpoint_for_complete && leap > 0))) {
+ chan->in_current_checkpoint = checkpoint;
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_SET_CHECKPOINT,
+ checkpoint);
+
+ if (rc)
+ break;
+ }
+
+ if (bytes_done == count ||
+ (left_to_sleep <= 0 && bytes_done))
+ break;
+
+ /*
+ * Reaching here means that the FIFO was empty when
+ * fifo_read() returned, but not necessarily right now. Error
+ * and EOF are checked and reported only now, so that no data
+ * that managed its way to the FIFO is lost.
+ */
+
+ if (!READ_ONCE(chan->read_data_ok)) { /* FPGA has sent EOF */
+ /* Has data slipped into the FIFO since fifo_read()? */
+ smp_rmb();
+ if (READ_ONCE(fifo->fill))
+ continue;
+
+ rc = 0;
+ break;
+ }
+
+ if (xdev->error) {
+ rc = xdev->error;
+ break;
+ }
+
+ if (filp->f_flags & O_NONBLOCK) {
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (!sent_set_push) {
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_SET_PUSH,
+ complete_checkpoint);
+
+ if (rc)
+ break;
+
+ sent_set_push = true;
+ }
+
+ if (left_to_sleep > 0) {
+ /*
+ * Note that when xdev->error is set (e.g. when the
+ * device is unplugged), read_data_ok turns zero and
+ * fifo->waitq is awaken.
+ * Therefore no special attention to xdev->error.
+ */
+
+ rc = wait_event_interruptible_timeout
+ (fifo->waitq,
+ fifo->fill || !chan->read_data_ok,
+ left_to_sleep);
+ } else { /* bytes_done == 0 */
+ /* Tell FPGA to send anything it has */
+ rc = request_read_anything(chan, OPCODE_UPDATE_PUSH);
+
+ if (rc)
+ break;
+
+ rc = wait_event_interruptible
+ (fifo->waitq,
+ fifo->fill || !chan->read_data_ok);
+ }
+
+ if (rc < 0) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (((filp->f_flags & O_NONBLOCK) || chan->poll_used) &&
+ !READ_ONCE(fifo->fill))
+ request_read_anything(chan, OPCODE_SET_PUSH);
+
+ mutex_unlock(&chan->in_mutex);
+
+ if (bytes_done)
+ return bytes_done;
+
+ return rc;
+}
+
+static int xillyusb_flush(struct file *filp, fl_owner_t id)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ int rc;
+
+ if (!(filp->f_mode & FMODE_WRITE))
+ return 0;
+
+ rc = mutex_lock_interruptible(&chan->out_mutex);
+
+ if (rc)
+ return rc;
+
+ /*
+ * One second's timeout on flushing. Interrupts are ignored, because if
+ * the user pressed CTRL-C, that interrupt will still be in flight by
+ * the time we reach here, and the opportunity to flush is lost.
+ */
+ rc = flush_downstream(chan, HZ, false);
+
+ mutex_unlock(&chan->out_mutex);
+
+ if (rc == -ETIMEDOUT) {
+ /* The things you do to use dev_warn() and not pr_warn() */
+ struct xillyusb_dev *xdev = chan->xdev;
+
+ mutex_lock(&chan->lock);
+ if (!xdev->error)
+ dev_warn(xdev->dev,
+ "Timed out while flushing. Output data may be lost.\n");
+ mutex_unlock(&chan->lock);
+ }
+
+ return rc;
+}
+
+static ssize_t xillyusb_write(struct file *filp, const char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ struct xillyfifo *fifo = &chan->out_ep->fifo;
+ int rc;
+
+ rc = mutex_lock_interruptible(&chan->out_mutex);
+
+ if (rc)
+ return rc;
+
+ while (1) {
+ if (xdev->error) {
+ rc = xdev->error;
+ break;
+ }
+
+ if (count == 0)
+ break;
+
+ rc = fifo_write(fifo, (__force void *)userbuf, count,
+ xilly_copy_from_user);
+
+ if (rc != 0)
+ break;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (wait_event_interruptible
+ (fifo->waitq,
+ fifo->fill != fifo->size || xdev->error)) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (rc < 0)
+ goto done;
+
+ chan->out_bytes += rc;
+
+ if (rc) {
+ try_queue_bulk_out(chan->out_ep);
+ chan->flushed = 0;
+ }
+
+ if (chan->out_synchronous) {
+ int flush_rc = flush_downstream(chan, 0, true);
+
+ if (flush_rc && !rc)
+ rc = flush_rc;
+ }
+
+done:
+ mutex_unlock(&chan->out_mutex);
+
+ return rc;
+}
+
+static int xillyusb_release(struct inode *inode, struct file *filp)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ int rc_read = 0, rc_write = 0;
+
+ if (filp->f_mode & FMODE_READ) {
+ struct xillyfifo *in_fifo = chan->in_fifo;
+
+ rc_read = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
+ OPCODE_CLOSE, 0);
+ /*
+ * If rc_read is nonzero, xdev->error indicates a global
+ * device error. The error is reported later, so that
+ * resources are freed.
+ *
+ * Looping on wait_event_interruptible() kinda breaks the idea
+ * of being interruptible, and this should have been
+ * wait_event(). Only it's being waken with
+ * wake_up_interruptible() for the sake of other uses. If
+ * there's a global device error, chan->read_data_ok is
+ * deasserted and the wait queue is awaken, so this is covered.
+ */
+
+ while (wait_event_interruptible(in_fifo->waitq,
+ !chan->read_data_ok))
+ ; /* Empty loop */
+
+ safely_assign_in_fifo(chan, NULL);
+ fifo_mem_release(in_fifo);
+ kfree(in_fifo);
+
+ mutex_lock(&chan->lock);
+ chan->open_for_read = 0;
+ mutex_unlock(&chan->lock);
+ }
+
+ if (filp->f_mode & FMODE_WRITE) {
+ struct xillyusb_endpoint *ep = chan->out_ep;
+ /*
+ * chan->flushing isn't zeroed. If the pre-release flush timed
+ * out, a cancel request will be sent before the next
+ * OPCODE_SET_CHECKPOINT (i.e. when the file is opened again).
+ * This is despite that the FPGA forgets about the checkpoint
+ * request as the file closes. Still, in an exceptional race
+ * condition, the FPGA could send an OPCODE_REACHED_CHECKPOINT
+ * just before closing that would reach the host after the
+ * file has re-opened.
+ */
+
+ mutex_lock(&chan->lock);
+ chan->out_ep = NULL;
+ mutex_unlock(&chan->lock);
+
+ endpoint_quiesce(ep);
+ endpoint_dealloc(ep);
+
+ /* See comments on rc_read above */
+ rc_write = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
+ OPCODE_CLOSE, 0);
+
+ mutex_lock(&chan->lock);
+ chan->open_for_write = 0;
+ mutex_unlock(&chan->lock);
+ }
+
+ kref_put(&xdev->kref, cleanup_dev);
+
+ return rc_read ? rc_read : rc_write;
+}
+
+/*
+ * Xillybus' API allows device nodes to be seekable, giving the user
+ * application access to a RAM array on the FPGA (or logic emulating it).
+ */
+
+static loff_t xillyusb_llseek(struct file *filp, loff_t offset, int whence)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ loff_t pos = filp->f_pos;
+ int rc = 0;
+ unsigned int log2_element_size = chan->readable ?
+ chan->in_log2_element_size : chan->out_log2_element_size;
+
+ /*
+ * Take both mutexes not allowing interrupts, since it seems like
+ * common applications don't expect an -EINTR here. Besides, multiple
+ * access to a single file descriptor on seekable devices is a mess
+ * anyhow.
+ */
+
+ mutex_lock(&chan->out_mutex);
+ mutex_lock(&chan->in_mutex);
+
+ switch (whence) {
+ case SEEK_SET:
+ pos = offset;
+ break;
+ case SEEK_CUR:
+ pos += offset;
+ break;
+ case SEEK_END:
+ pos = offset; /* Going to the end => to the beginning */
+ break;
+ default:
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* In any case, we must finish on an element boundary */
+ if (pos & ((1 << log2_element_size) - 1)) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
+ OPCODE_SET_ADDR,
+ pos >> log2_element_size);
+
+ if (rc)
+ goto end;
+
+ if (chan->writable) {
+ chan->flushed = 0;
+ rc = flush_downstream(chan, HZ, false);
+ }
+
+end:
+ mutex_unlock(&chan->out_mutex);
+ mutex_unlock(&chan->in_mutex);
+
+ if (rc) /* Return error after releasing mutexes */
+ return rc;
+
+ filp->f_pos = pos;
+
+ return pos;
+}
+
+static __poll_t xillyusb_poll(struct file *filp, poll_table *wait)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ __poll_t mask = 0;
+
+ if (chan->in_fifo)
+ poll_wait(filp, &chan->in_fifo->waitq, wait);
+
+ if (chan->out_ep)
+ poll_wait(filp, &chan->out_ep->fifo.waitq, wait);
+
+ /*
+ * If this is the first time poll() is called, and the file is
+ * readable, set the relevant flag. Also tell the FPGA to send all it
+ * has, to kickstart the mechanism that ensures there's always some
+ * data in in_fifo unless the stream is dry end-to-end. Note that the
+ * first poll() may not return a EPOLLIN, even if there's data on the
+ * FPGA. Rather, the data will arrive soon, and trigger the relevant
+ * wait queue.
+ */
+
+ if (!chan->poll_used && chan->in_fifo) {
+ chan->poll_used = 1;
+ request_read_anything(chan, OPCODE_SET_PUSH);
+ }
+
+ /*
+ * poll() won't play ball regarding read() channels which
+ * are synchronous. Allowing that will create situations where data has
+ * been delivered at the FPGA, and users expecting select() to wake up,
+ * which it may not. So make it never work.
+ */
+
+ if (chan->in_fifo && !chan->in_synchronous &&
+ (READ_ONCE(chan->in_fifo->fill) || !chan->read_data_ok))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ if (chan->out_ep &&
+ (READ_ONCE(chan->out_ep->fifo.fill) != chan->out_ep->fifo.size))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ if (chan->xdev->error)
+ mask |= EPOLLERR;
+
+ return mask;
+}
+
+static const struct file_operations xillyusb_fops = {
+ .owner = THIS_MODULE,
+ .read = xillyusb_read,
+ .write = xillyusb_write,
+ .open = xillyusb_open,
+ .flush = xillyusb_flush,
+ .release = xillyusb_release,
+ .llseek = xillyusb_llseek,
+ .poll = xillyusb_poll,
+};
+
+static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
+{
+ xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
+ bulk_out_work, 1, 2);
+ if (!xdev->msg_ep)
+ return -ENOMEM;
+
+ if (fifo_init(&xdev->msg_ep->fifo, 13)) /* 8 kiB */
+ goto dealloc;
+
+ xdev->msg_ep->fill_mask = -8; /* 8 bytes granularity */
+
+ xdev->in_ep = endpoint_alloc(xdev, IN_EP_NUM | USB_DIR_IN,
+ bulk_in_work, BUF_SIZE_ORDER, BUFNUM);
+ if (!xdev->in_ep)
+ goto dealloc;
+
+ try_queue_bulk_in(xdev->in_ep);
+
+ return 0;
+
+dealloc:
+ endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */
+ return -ENOMEM;
+}
+
+static int setup_channels(struct xillyusb_dev *xdev,
+ __le16 *chandesc,
+ int num_channels)
+{
+ struct xillyusb_channel *chan;
+ int i;
+
+ chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ xdev->channels = chan;
+
+ for (i = 0; i < num_channels; i++, chan++) {
+ unsigned int in_desc = le16_to_cpu(*chandesc++);
+ unsigned int out_desc = le16_to_cpu(*chandesc++);
+
+ chan->xdev = xdev;
+ mutex_init(&chan->in_mutex);
+ mutex_init(&chan->out_mutex);
+ mutex_init(&chan->lock);
+ init_waitqueue_head(&chan->flushq);
+
+ chan->chan_idx = i;
+
+ if (in_desc & 0x80) { /* Entry is valid */
+ chan->readable = 1;
+ chan->in_synchronous = !!(in_desc & 0x40);
+ chan->in_seekable = !!(in_desc & 0x20);
+ chan->in_log2_element_size = in_desc & 0x0f;
+ chan->in_log2_fifo_size = ((in_desc >> 8) & 0x1f) + 16;
+ }
+
+ /*
+ * A downstream channel should never exist above index 13,
+ * as it would request a nonexistent BULK endpoint > 15.
+ * In the peculiar case that it does, it's ignored silently.
+ */
+
+ if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
+ chan->writable = 1;
+ chan->out_synchronous = !!(out_desc & 0x40);
+ chan->out_seekable = !!(out_desc & 0x20);
+ chan->out_log2_element_size = out_desc & 0x0f;
+ chan->out_log2_fifo_size =
+ ((out_desc >> 8) & 0x1f) + 16;
+ }
+ }
+
+ return 0;
+}
+
+static int xillyusb_discovery(struct usb_interface *interface)
+{
+ int rc;
+ struct xillyusb_dev *xdev = usb_get_intfdata(interface);
+ __le16 bogus_chandesc[2];
+ struct xillyfifo idt_fifo;
+ struct xillyusb_channel *chan;
+ unsigned int idt_len, names_offset;
+ unsigned char *idt;
+ int num_channels;
+
+ rc = xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
+
+ if (rc) {
+ dev_err(&interface->dev, "Failed to send quiesce request. Aborting.\n");
+ return rc;
+ }
+
+ /* Phase I: Set up one fake upstream channel and obtain IDT */
+
+ /* Set up a fake IDT with one async IN stream */
+ bogus_chandesc[0] = cpu_to_le16(0x80);
+ bogus_chandesc[1] = cpu_to_le16(0);
+
+ rc = setup_channels(xdev, bogus_chandesc, 1);
+
+ if (rc)
+ return rc;
+
+ rc = fifo_init(&idt_fifo, LOG2_IDT_FIFO_SIZE);
+
+ if (rc)
+ return rc;
+
+ chan = xdev->channels;
+
+ chan->in_fifo = &idt_fifo;
+ chan->read_data_ok = 1;
+
+ xdev->num_channels = 1;
+
+ rc = xillyusb_send_opcode(xdev, ~0, OPCODE_REQ_IDT, 0);
+
+ if (rc) {
+ dev_err(&interface->dev, "Failed to send IDT request. Aborting.\n");
+ goto unfifo;
+ }
+
+ rc = wait_event_interruptible_timeout(idt_fifo.waitq,
+ !chan->read_data_ok,
+ XILLY_RESPONSE_TIMEOUT);
+
+ if (xdev->error) {
+ rc = xdev->error;
+ goto unfifo;
+ }
+
+ if (rc < 0) {
+ rc = -EINTR; /* Interrupt on probe method? Interesting. */
+ goto unfifo;
+ }
+
+ if (chan->read_data_ok) {
+ rc = -ETIMEDOUT;
+ dev_err(&interface->dev, "No response from FPGA. Aborting.\n");
+ goto unfifo;
+ }
+
+ idt_len = READ_ONCE(idt_fifo.fill);
+ idt = kmalloc(idt_len, GFP_KERNEL);
+
+ if (!idt) {
+ rc = -ENOMEM;
+ goto unfifo;
+ }
+
+ fifo_read(&idt_fifo, idt, idt_len, xilly_memcpy);
+
+ if (crc32_le(~0, idt, idt_len) != 0) {
+ dev_err(&interface->dev, "IDT failed CRC check. Aborting.\n");
+ rc = -ENODEV;
+ goto unidt;
+ }
+
+ if (*idt > 0x90) {
+ dev_err(&interface->dev, "No support for IDT version 0x%02x. Maybe the xillyusb driver needs an upgrade. Aborting.\n",
+ (int)*idt);
+ rc = -ENODEV;
+ goto unidt;
+ }
+
+ /* Phase II: Set up the streams as defined in IDT */
+
+ num_channels = le16_to_cpu(*((__le16 *)(idt + 1)));
+ names_offset = 3 + num_channels * 4;
+ idt_len -= 4; /* Exclude CRC */
+
+ if (idt_len < names_offset) {
+ dev_err(&interface->dev, "IDT too short. This is exceptionally weird, because its CRC is OK\n");
+ rc = -ENODEV;
+ goto unidt;
+ }
+
+ rc = setup_channels(xdev, (void *)idt + 3, num_channels);
+
+ if (rc)
+ goto unidt;
+
+ /*
+ * Except for wildly misbehaving hardware, or if it was disconnected
+ * just after responding with the IDT, there is no reason for any
+ * work item to be running now. To be sure that xdev->channels
+ * is updated on anything that might run in parallel, flush the
+ * workqueue, which rarely does anything.
+ */
+ flush_workqueue(xdev->workq);
+
+ xdev->num_channels = num_channels;
+
+ fifo_mem_release(&idt_fifo);
+ kfree(chan);
+
+ rc = xillybus_init_chrdev(&interface->dev, &xillyusb_fops,
+ THIS_MODULE, xdev,
+ idt + names_offset,
+ idt_len - names_offset,
+ num_channels,
+ xillyname, true);
+
+ kfree(idt);
+
+ return rc;
+
+unidt:
+ kfree(idt);
+
+unfifo:
+ safely_assign_in_fifo(chan, NULL);
+ fifo_mem_release(&idt_fifo);
+
+ return rc;
+}
+
+static int xillyusb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct xillyusb_dev *xdev;
+ int rc;
+
+ xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ kref_init(&xdev->kref);
+ mutex_init(&xdev->process_in_mutex);
+ mutex_init(&xdev->msg_mutex);
+
+ xdev->udev = usb_get_dev(interface_to_usbdev(interface));
+ xdev->dev = &interface->dev;
+ xdev->error = 0;
+ spin_lock_init(&xdev->error_lock);
+ xdev->in_counter = 0;
+ xdev->in_bytes_left = 0;
+ xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
+
+ if (!xdev->workq) {
+ dev_err(&interface->dev, "Failed to allocate work queue\n");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_WORK(&xdev->wakeup_workitem, wakeup_all);
+
+ usb_set_intfdata(interface, xdev);
+
+ rc = xillyusb_setup_base_eps(xdev);
+ if (rc)
+ goto fail;
+
+ rc = xillyusb_discovery(interface);
+ if (rc)
+ goto latefail;
+
+ return 0;
+
+latefail:
+ endpoint_quiesce(xdev->in_ep);
+ endpoint_quiesce(xdev->msg_ep);
+
+fail:
+ usb_set_intfdata(interface, NULL);
+ kref_put(&xdev->kref, cleanup_dev);
+ return rc;
+}
+
+static void xillyusb_disconnect(struct usb_interface *interface)
+{
+ struct xillyusb_dev *xdev = usb_get_intfdata(interface);
+ struct xillyusb_endpoint *msg_ep = xdev->msg_ep;
+ struct xillyfifo *fifo = &msg_ep->fifo;
+ int rc;
+ int i;
+
+ xillybus_cleanup_chrdev(xdev, &interface->dev);
+
+ /*
+ * Try to send OPCODE_QUIESCE, which will fail silently if the device
+ * was disconnected, but makes sense on module unload.
+ */
+
+ msg_ep->wake_on_drain = true;
+ xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
+
+ /*
+ * If the device has been disconnected, sending the opcode causes
+ * a global device error with xdev->error, if such error didn't
+ * occur earlier. Hence timing out means that the USB link is fine,
+ * but somehow the message wasn't sent. Should never happen.
+ */
+
+ rc = wait_event_interruptible_timeout(fifo->waitq,
+ msg_ep->drained || xdev->error,
+ XILLY_RESPONSE_TIMEOUT);
+
+ if (!rc)
+ dev_err(&interface->dev,
+ "Weird timeout condition on sending quiesce request.\n");
+
+ report_io_error(xdev, -ENODEV); /* Discourage further activity */
+
+ /*
+ * This device driver is declared with soft_unbind set, or else
+ * sending OPCODE_QUIESCE above would always fail. The price is
+ * that the USB framework didn't kill outstanding URBs, so it has
+ * to be done explicitly before returning from this call.
+ */
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ struct xillyusb_channel *chan = &xdev->channels[i];
+
+ /*
+ * Lock taken to prevent chan->out_ep from changing. It also
+ * ensures xillyusb_open() and xillyusb_flush() don't access
+ * xdev->dev after being nullified below.
+ */
+ mutex_lock(&chan->lock);
+ if (chan->out_ep)
+ endpoint_quiesce(chan->out_ep);
+ mutex_unlock(&chan->lock);
+ }
+
+ endpoint_quiesce(xdev->in_ep);
+ endpoint_quiesce(xdev->msg_ep);
+
+ usb_set_intfdata(interface, NULL);
+
+ xdev->dev = NULL;
+
+ kref_put(&xdev->kref, cleanup_dev);
+}
+
+static struct usb_driver xillyusb_driver = {
+ .name = xillyname,
+ .id_table = xillyusb_table,
+ .probe = xillyusb_probe,
+ .disconnect = xillyusb_disconnect,
+ .soft_unbind = 1,
+};
+
+static int __init xillyusb_init(void)
+{
+ int rc = 0;
+
+ if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
+ fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
+ else
+ fifo_buf_order = 0;
+
+ rc = usb_register(&xillyusb_driver);
+
+ return rc;
+}
+
+static void __exit xillyusb_exit(void)
+{
+ usb_deregister(&xillyusb_driver);
+}
+
+module_init(xillyusb_init);
+module_exit(xillyusb_exit);