aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/altera-msgdma.c20
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c3
-rw-r--r--drivers/dma/fsl-qdma.c6
-rw-r--r--drivers/dma/hsu/hsu.c3
-rw-r--r--drivers/dma/idxd/cdev.c3
-rw-r--r--drivers/dma/idxd/idxd.h14
-rw-r--r--drivers/dma/idxd/init.c100
-rw-r--r--drivers/dma/idxd/irq.c27
-rw-r--r--drivers/dma/idxd/submit.c92
-rw-r--r--drivers/dma/idxd/sysfs.c2
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c58
-rw-r--r--drivers/dma/ipu/ipu_idmac.c2
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c27
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/of-dma.c9
-rw-r--r--drivers/dma/pl330.c6
-rw-r--r--drivers/dma/qcom/Kconfig1
-rw-r--r--drivers/dma/qcom/gpi.c1
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c17
-rw-r--r--drivers/dma/sf-pdma/Kconfig1
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c5
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/sh/shdma-of.c76
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/dma/ste_dma40.c3
-rw-r--r--drivers/dma/stm32-dma.c4
-rw-r--r--drivers/dma/stm32-dmamux.c6
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/dma/sun4i-dma.c5
-rw-r--r--drivers/dma/ti/k3-udma.c1
-rw-r--r--drivers/dma/ti/omap-dma.c3
-rw-r--r--drivers/dma/uniphier-xdmac.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c12
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c75
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c2
39 files changed, 418 insertions, 187 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ab9d9a488a6..39b5b46e880f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -59,6 +59,7 @@ config DMA_OF
#devices
config ALTERA_MSGDMA
tristate "Altera / Intel mSGDMA Engine"
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for Altera / Intel mSGDMA controller.
@@ -701,6 +702,7 @@ config XILINX_ZYNQMP_DMA
config XILINX_ZYNQMP_DPDMA
tristate "Xilinx DPDMA Engine"
+ depends on HAS_IOMEM && OF
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 9a841ce5f0c5..0fe0676f8e1d 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of_dma.h>
#include "dmaengine.h"
@@ -888,6 +889,13 @@ static int msgdma_probe(struct platform_device *pdev)
if (ret)
goto fail;
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id, dma_dev);
+ if (ret == -EINVAL)
+ dev_warn(&pdev->dev, "device was not probed from DT");
+ else if (ret && ret != -ENODEV)
+ goto fail;
+
dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
return 0;
@@ -908,6 +916,8 @@ static int msgdma_remove(struct platform_device *pdev)
{
struct msgdma_device *mdev = platform_get_drvdata(pdev);
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->dmadev);
msgdma_dev_remove(mdev);
@@ -916,9 +926,19 @@ static int msgdma_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id msgdma_match[] = {
+ { .compatible = "altr,socfpga-msgdma", },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, msgdma_match);
+#endif
+
static struct platform_driver msgdma_driver = {
.driver = {
.name = "altera-msgdma",
+ .of_match_table = of_match_ptr(msgdma_match),
},
.probe = msgdma_probe,
.remove = msgdma_remove,
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 4ec909e0b810..4ae057922ef1 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
}
if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+ err = -EINVAL;
dev_err(dev, "DPDMAI major version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
}
if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+ err = -EINVAL;
dev_err(dev, "DPDMAI minor version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
ppriv->store =
dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
if (!ppriv->store) {
+ err = -ENOMEM;
dev_err(dev, "dpaa2_io_store_create() failed\n");
goto err_store;
}
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index ed2ab46b15e7..045ead46ec8f 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1235,7 +1235,11 @@ static int fsl_qdma_probe(struct platform_device *pdev)
fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_mask failure.\n");
+ return ret;
+ }
platform_set_drvdata(pdev, fsl_qdma);
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 025d8ad5a63c..92caae55aece 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -201,6 +201,7 @@ EXPORT_SYMBOL_GPL(hsu_dma_get_status);
*/
int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
{
+ struct dma_chan_percpu *stat;
struct hsu_dma_chan *hsuc;
struct hsu_dma_desc *desc;
unsigned long flags;
@@ -210,6 +211,7 @@ int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
return 0;
hsuc = &chip->hsu->chan[nr];
+ stat = this_cpu_ptr(hsuc->vchan.chan.local);
spin_lock_irqsave(&hsuc->vchan.lock, flags);
desc = hsuc->desc;
@@ -221,6 +223,7 @@ int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
} else {
vchan_cookie_complete(&desc->vdesc);
desc->status = DMA_COMPLETE;
+ stat->bytes_transferred += desc->length;
hsu_dma_start_transfer(hsuc);
}
}
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 302cba5ff779..e9def577c697 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -110,6 +110,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(sva);
+ rc = -EINVAL;
goto failed;
}
@@ -295,9 +296,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
void idxd_wq_del_cdev(struct idxd_wq *wq)
{
struct idxd_cdev *idxd_cdev;
- struct idxd_cdev_context *cdev_ctx;
- cdev_ctx = &ictx[wq->idxd->data->type];
idxd_cdev = wq->idxd_cdev;
wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 26482c7d4c3a..fc708be7ad9a 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -294,6 +294,14 @@ struct idxd_desc {
struct idxd_wq *wq;
};
+/*
+ * This is software defined error for the completion status. We overload the error code
+ * that will never appear in completion status and only SWERR register.
+ */
+enum idxd_completion_status {
+ IDXD_COMP_DESC_ABORT = 0xff,
+};
+
#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
@@ -482,4 +490,10 @@ static inline void perfmon_init(void) {}
static inline void perfmon_exit(void) {}
#endif
+static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
+{
+ idxd_dma_complete_txd(desc, reason);
+ idxd_free_desc(desc->wq, desc);
+}
+
#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 2a926bef87f2..c0f4c0422f32 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -102,6 +102,8 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
spin_lock_init(&idxd->irq_entries[i].list_lock);
}
+ idxd_msix_perm_setup(idxd);
+
irq_entry = &idxd->irq_entries[0];
rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
0, "idxd-misc", irq_entry);
@@ -148,7 +150,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
}
idxd_unmask_error_interrupts(idxd);
- idxd_msix_perm_setup(idxd);
return 0;
err_wq_irqs:
@@ -162,12 +163,39 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
err_misc_irq:
/* Disable error interrupt generation */
idxd_mask_error_interrupts(idxd);
+ idxd_msix_perm_clear(idxd);
err_irq_entries:
pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n");
return rc;
}
+static void idxd_cleanup_interrupts(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct idxd_irq_entry *irq_entry;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(pdev);
+ if (msixcnt <= 0)
+ return;
+
+ irq_entry = &idxd->irq_entries[0];
+ free_irq(irq_entry->vector, irq_entry);
+
+ for (i = 1; i < msixcnt; i++) {
+
+ irq_entry = &idxd->irq_entries[i];
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
+ idxd_device_release_int_handle(idxd, idxd->int_handles[i],
+ IDXD_IRQ_MSIX);
+ free_irq(irq_entry->vector, irq_entry);
+ }
+
+ idxd_mask_error_interrupts(idxd);
+ pci_free_irq_vectors(pdev);
+}
+
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -242,6 +270,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
engine->idxd = idxd;
device_initialize(&engine->conf_dev);
engine->conf_dev.parent = &idxd->conf_dev;
+ engine->conf_dev.bus = &dsa_bus_type;
engine->conf_dev.type = &idxd_engine_device_type;
rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
@@ -303,6 +332,19 @@ static int idxd_setup_groups(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup_internals(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_groups; i++)
+ put_device(&idxd->groups[i]->conf_dev);
+ for (i = 0; i < idxd->max_engines; i++)
+ put_device(&idxd->engines[i]->conf_dev);
+ for (i = 0; i < idxd->max_wqs; i++)
+ put_device(&idxd->wqs[i]->conf_dev);
+ destroy_workqueue(idxd->wq);
+}
+
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -311,7 +353,8 @@ static int idxd_setup_internals(struct idxd_device *idxd)
init_waitqueue_head(&idxd->cmd_waitq);
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
- idxd->int_handles = devm_kcalloc(dev, idxd->max_wqs, sizeof(int), GFP_KERNEL);
+ idxd->int_handles = kcalloc_node(idxd->max_wqs, sizeof(int), GFP_KERNEL,
+ dev_to_node(dev));
if (!idxd->int_handles)
return -ENOMEM;
}
@@ -531,12 +574,12 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "Loading RO device config\n");
rc = idxd_device_load_config(idxd);
if (rc < 0)
- goto err;
+ goto err_config;
}
rc = idxd_setup_interrupts(idxd);
if (rc)
- goto err;
+ goto err_config;
dev_dbg(dev, "IDXD interrupt setup complete.\n");
@@ -549,6 +592,8 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
return 0;
+ err_config:
+ idxd_cleanup_internals(idxd);
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
@@ -556,6 +601,18 @@ static int idxd_probe(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+
+ perfmon_pmu_remove(idxd);
+ idxd_cleanup_interrupts(idxd);
+ idxd_cleanup_internals(idxd);
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+}
+
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -608,7 +665,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = idxd_register_devices(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
- goto err;
+ goto err_dev_register;
}
idxd->state = IDXD_DEV_CONF_READY;
@@ -618,6 +675,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+ err_dev_register:
+ idxd_cleanup(idxd);
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
@@ -701,32 +760,40 @@ static void idxd_shutdown(struct pci_dev *pdev)
for (i = 0; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i];
synchronize_irq(irq_entry->vector);
- free_irq(irq_entry->vector, irq_entry);
if (i == 0)
continue;
idxd_flush_pending_llist(irq_entry);
idxd_flush_work_list(irq_entry);
}
-
- idxd_msix_perm_clear(idxd);
- idxd_release_int_handles(idxd);
- pci_free_irq_vectors(pdev);
- pci_iounmap(pdev, idxd->reg_base);
- pci_disable_device(pdev);
- destroy_workqueue(idxd->wq);
+ flush_workqueue(idxd->wq);
}
static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
+ struct idxd_irq_entry *irq_entry;
+ int msixcnt = pci_msix_vec_count(pdev);
+ int i;
dev_dbg(&pdev->dev, "%s called\n", __func__);
idxd_shutdown(pdev);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
idxd_unregister_devices(idxd);
- perfmon_pmu_remove(idxd);
+
+ for (i = 0; i < msixcnt; i++) {
+ irq_entry = &idxd->irq_entries[i];
+ free_irq(irq_entry->vector, irq_entry);
+ }
+ idxd_msix_perm_clear(idxd);
+ idxd_release_int_handles(idxd);
+ pci_free_irq_vectors(pdev);
+ pci_iounmap(pdev, idxd->reg_base);
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ pci_disable_device(pdev);
+ destroy_workqueue(idxd->wq);
+ perfmon_pmu_remove(idxd);
+ device_unregister(&idxd->conf_dev);
}
static struct pci_driver idxd_pci_driver = {
@@ -745,12 +812,12 @@ static int __init idxd_init_module(void)
* If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
* enumerating the device. We can not utilize it.
*/
- if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+ if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
pr_warn("idxd driver failed to load without MOVDIR64B.\n");
return -ENODEV;
}
- if (!boot_cpu_has(X86_FEATURE_ENQCMD))
+ if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
pr_warn("Platform does not have ENQCMD(S) support.\n");
else
support_enqcmd = true;
@@ -787,6 +854,7 @@ module_init(idxd_init_module);
static void __exit idxd_exit_module(void)
{
+ idxd_unregister_driver();
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
idxd_unregister_bus_type();
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index ae68e1e5487a..4e3a7198c0ca 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -245,12 +245,6 @@ static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
return false;
}
-static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
-{
- idxd_dma_complete_txd(desc, reason);
- idxd_free_desc(desc->wq, desc);
-}
-
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
enum irq_work_type wtype,
int *processed, u64 data)
@@ -272,8 +266,16 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
reason = IDXD_COMPLETE_DEV_FAIL;
llist_for_each_entry_safe(desc, t, head, llnode) {
- if (desc->completion->status) {
- if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+ if (status) {
+ if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ complete_desc(desc, IDXD_COMPLETE_ABORT);
+ (*processed)++;
+ continue;
+ }
+
+ if (unlikely(status != DSA_COMP_SUCCESS))
match_fault(desc, data);
complete_desc(desc, reason);
(*processed)++;
@@ -329,7 +331,14 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
list_for_each_entry(desc, &flist, list) {
- if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+ if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ complete_desc(desc, IDXD_COMPLETE_ABORT);
+ continue;
+ }
+
+ if (unlikely(status != DSA_COMP_SUCCESS))
match_fault(desc, data);
complete_desc(desc, reason);
}
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 19afb62abaff..36c9c1a89b7e 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -25,11 +25,10 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
* Descriptor completion vectors are 1...N for MSIX. We will round
* robin through the N vectors.
*/
- wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+ wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
if (!idxd->int_handles) {
desc->hw->int_handle = wq->vec_ptr;
} else {
- desc->vector = wq->vec_ptr;
/*
* int_handles are only for descriptor completion. However for device
* MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
@@ -88,9 +87,64 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
}
+static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+ struct idxd_desc *desc)
+{
+ struct idxd_desc *d, *n;
+
+ lockdep_assert_held(&ie->list_lock);
+ list_for_each_entry_safe(d, n, &ie->work_list, list) {
+ if (d == desc) {
+ list_del(&d->list);
+ return d;
+ }
+ }
+
+ /*
+ * At this point, the desc needs to be aborted is held by the completion
+ * handler where it has taken it off the pending list but has not added to the
+ * work list. It will be cleaned up by the interrupt handler when it sees the
+ * IDXD_COMP_DESC_ABORT for completion status.
+ */
+ return NULL;
+}
+
+static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+ struct idxd_desc *desc)
+{
+ struct idxd_desc *d, *t, *found = NULL;
+ struct llist_node *head;
+ unsigned long flags;
+
+ desc->completion->status = IDXD_COMP_DESC_ABORT;
+ /*
+ * Grab the list lock so it will block the irq thread handler. This allows the
+ * abort code to locate the descriptor need to be aborted.
+ */
+ spin_lock_irqsave(&ie->list_lock, flags);
+ head = llist_del_all(&ie->pending_llist);
+ if (head) {
+ llist_for_each_entry_safe(d, t, head, llnode) {
+ if (d == desc) {
+ found = desc;
+ continue;
+ }
+ list_add_tail(&desc->list, &ie->work_list);
+ }
+ }
+
+ if (!found)
+ found = list_abort_desc(wq, ie, desc);
+ spin_unlock_irqrestore(&ie->list_lock, flags);
+
+ if (found)
+ complete_desc(found, IDXD_COMPLETE_ABORT);
+}
+
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
struct idxd_device *idxd = wq->idxd;
+ struct idxd_irq_entry *ie = NULL;
void __iomem *portal;
int rc;
@@ -108,6 +162,16 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* even on UP because the recipient is a device.
*/
wmb();
+
+ /*
+ * Pending the descriptor to the lockless list for the irq_entry
+ * that we designated the descriptor to.
+ */
+ if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
+ ie = &idxd->irq_entries[desc->vector];
+ llist_add(&desc->llnode, &ie->pending_llist);
+ }
+
if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1);
} else {
@@ -118,29 +182,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* device is not accepting descriptor at all.
*/
rc = enqcmds(portal, desc->hw);
- if (rc < 0)
+ if (rc < 0) {
+ if (ie)
+ llist_abort_desc(wq, ie, desc);
return rc;
+ }
}
percpu_ref_put(&wq->wq_active);
-
- /*
- * Pending the descriptor to the lockless list for the irq_entry
- * that we designated the descriptor to.
- */
- if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
- int vec;
-
- /*
- * If the driver is on host kernel, it would be the value
- * assigned to interrupt handle, which is index for MSIX
- * vector. If it's guest then can't use the int_handle since
- * that is the index to IMS for the entire device. The guest
- * device local index will be used.
- */
- vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector;
- llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);
- }
-
return 0;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 0460d58e3941..bb4df63906a7 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1744,8 +1744,6 @@ void idxd_unregister_devices(struct idxd_device *idxd)
device_unregister(&group->conf_dev);
}
-
- device_unregister(&idxd->conf_dev);
}
int idxd_register_bus_type(void)
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 7f116bbcfad2..2ddc31e64db0 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -812,6 +812,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
dma_length += sg_dma_len(sg);
}
+ imxdma_config_write(chan, &imxdmac->config, direction);
+
switch (imxdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES:
if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d5590c08db51..8070fd664bfc 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -35,7 +35,6 @@
#include <linux/workqueue.h>
#include <asm/irq.h>
-#include <linux/platform_data/dma-imx-sdma.h>
#include <linux/platform_data/dma-imx.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -181,6 +180,61 @@
BIT(DMA_MEM_TO_DEV) | \
BIT(DMA_DEV_TO_DEV))
+/**
+ * struct sdma_script_start_addrs - SDMA script start pointers
+ *
+ * start addresses of the different functions in the physical
+ * address space of the SDMA engine.
+ */
+struct sdma_script_start_addrs {
+ s32 ap_2_ap_addr;
+ s32 ap_2_bp_addr;
+ s32 ap_2_ap_fixed_addr;
+ s32 bp_2_ap_addr;
+ s32 loopback_on_dsp_side_addr;
+ s32 mcu_interrupt_only_addr;
+ s32 firi_2_per_addr;
+ s32 firi_2_mcu_addr;
+ s32 per_2_firi_addr;
+ s32 mcu_2_firi_addr;
+ s32 uart_2_per_addr;
+ s32 uart_2_mcu_addr;
+ s32 per_2_app_addr;
+ s32 mcu_2_app_addr;
+ s32 per_2_per_addr;
+ s32 uartsh_2_per_addr;
+ s32 uartsh_2_mcu_addr;
+ s32 per_2_shp_addr;
+ s32 mcu_2_shp_addr;
+ s32 ata_2_mcu_addr;
+ s32 mcu_2_ata_addr;
+ s32 app_2_per_addr;
+ s32 app_2_mcu_addr;
+ s32 shp_2_per_addr;
+ s32 shp_2_mcu_addr;
+ s32 mshc_2_mcu_addr;
+ s32 mcu_2_mshc_addr;
+ s32 spdif_2_mcu_addr;
+ s32 mcu_2_spdif_addr;
+ s32 asrc_2_mcu_addr;
+ s32 ext_mem_2_ipu_addr;
+ s32 descrambler_addr;
+ s32 dptc_dvfs_addr;
+ s32 utra_addr;
+ s32 ram_code_start_addr;
+ /* End of v1 array */
+ s32 mcu_2_ssish_addr;
+ s32 ssish_2_mcu_addr;
+ s32 hdmi_dma_addr;
+ /* End of v2 array */
+ s32 zcanfd_2_mcu_addr;
+ s32 zqspi_2_mcu_addr;
+ s32 mcu_2_ecspi_addr;
+ /* End of v3 array */
+ s32 mcu_2_zqspi_addr;
+ /* End of v4 array */
+};
+
/*
* Mode/Count of data node descriptors - IPCv2
*/
@@ -1829,7 +1883,7 @@ static int sdma_get_firmware(struct sdma_engine *sdma,
int ret;
ret = request_firmware_nowait(THIS_MODULE,
- FW_ACTION_HOTPLUG, fw_name, sdma->dev,
+ FW_ACTION_UEVENT, fw_name, sdma->dev,
GFP_KERNEL, sdma, sdma_load_firmware);
return ret;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 104ad420abbe..baab1ca9f621 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -618,6 +618,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
case IDMAC_SDC_1:
case IDMAC_IC_7:
ipu_channel_set_priority(ipu, channel, true);
+ break;
default:
break;
}
@@ -978,6 +979,7 @@ static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
case IDMAC_SDC_0:
case IDMAC_SDC_1:
n_desc = 4;
+ break;
default:
break;
}
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 0d5c42f7bfa4..97d9a6f04f2a 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -230,7 +230,7 @@ out:
}
/**
- * ipu_irq_map() - map an IPU interrupt source to an IRQ number
+ * ipu_irq_unmap() - unmap an IPU interrupt source
* @source: interrupt source bit position (see ipu_irq_map())
* @return: 0 or negative error code
*/
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 27c07350971d..375e7e647df6 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
{
- struct dma_chan *chan = vd->tx.chan;
- struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
-
- kfree(c->desc);
+ kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
}
static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
@@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
{
- struct mtk_uart_apdma_desc *d = c->desc;
-
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
-
- list_del(&d->vd.node);
- vchan_cookie_complete(&d->vd);
}
static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
@@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
c->rx_status = d->avail_len - cnt;
mtk_uart_apdma_write(c, VFF_RPT, wg);
+}
- list_del(&d->vd.node);
- vchan_cookie_complete(&d->vd);
+static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
+{
+ struct mtk_uart_apdma_desc *d = c->desc;
+
+ if (d) {
+ list_del(&d->vd.node);
+ vchan_cookie_complete(&d->vd);
+ c->desc = NULL;
+ }
}
static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
@@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
mtk_uart_apdma_rx_handler(c);
else if (c->dir == DMA_MEM_TO_DEV)
mtk_uart_apdma_tx_handler(c);
+ mtk_uart_apdma_chan_complete_handler(c);
spin_unlock_irqrestore(&c->vc.lock, flags);
return IRQ_HANDLED;
@@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
return NULL;
/* Now allocate and setup the descriptor */
- d = kzalloc(sizeof(*d), GFP_ATOMIC);
+ d = kzalloc(sizeof(*d), GFP_NOWAIT);
if (!d)
return NULL;
@@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
unsigned long flags;
spin_lock_irqsave(&c->vc.lock, flags);
- if (vchan_issue_pending(&c->vc)) {
+ if (vchan_issue_pending(&c->vc) && !c->desc) {
vd = vchan_next_desc(&c->vc);
c->desc = to_mtk_uart_apdma_desc(&vd->tx);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index c1a69149c8bf..4a51fdbf5aa9 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -813,6 +813,7 @@ inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
case 16:
if (is_mpc8308)
return false;
+ break;
case 1:
case 2:
case 4:
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index ec00b20ae8e4..ac61ecda2926 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
return NULL;
ofdma_target = of_dma_find_controller(&dma_spec_target);
- if (!ofdma_target)
- return NULL;
+ if (!ofdma_target) {
+ ofdma->dma_router->route_free(ofdma->dma_router->dev,
+ route_data);
+ chan = ERR_PTR(-EPROBE_DEFER);
+ goto err;
+ }
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
if (IS_ERR_OR_NULL(chan)) {
@@ -89,6 +93,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
}
}
+err:
/*
* Need to put the node back since the ofdma->of_dma_route_allocate
* has taken it for generating the new, translated dma_spec
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fd8d2bc3be9f..110de8a60058 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
for (i = 0; i < len / period_len; i++) {
desc = pl330_get_desc(pch);
if (!desc) {
+ unsigned long iflags;
+
dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
if (!first)
return NULL;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, iflags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
@@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
list_move_tail(&first->node, &pl330->desc_pool);
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, iflags);
return NULL;
}
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
index 365f94eb3b08..3f926a653bd8 100644
--- a/drivers/dma/qcom/Kconfig
+++ b/drivers/dma/qcom/Kconfig
@@ -33,6 +33,7 @@ config QCOM_GPI_DMA
config QCOM_HIDMA_MGMT
tristate "Qualcomm Technologies HIDMA Management support"
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for the Qualcomm Technologies HIDMA Management.
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 43ac3ab23d4c..1a1b7d8458c9 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2282,6 +2282,7 @@ static int gpi_probe(struct platform_device *pdev)
static const struct of_device_id gpi_of_match[] = {
{ .compatible = "qcom,sdm845-gpi-dma" },
{ .compatible = "qcom,sm8150-gpi-dma" },
+ { .compatible = "qcom,sm8250-gpi-dma" },
{ },
};
MODULE_DEVICE_TABLE(of, gpi_of_match);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 806ca02c52d7..62026607f3f8 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -418,8 +418,23 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
- return platform_driver_register(&hidma_mgmt_driver);
+ /*
+ * We do not check for return value here, as it is assumed that
+ * platform_driver_register must not fail. The reason for this is that
+ * the (potential) hidma_mgmt_of_populate_channels calls above are not
+ * cleaned up if it does fail, and to do this work is quite
+ * complicated. In particular, various calls of of_address_to_resource,
+ * of_irq_to_resource, platform_device_register_full, of_dma_configure,
+ * and of_msi_configure which then call other functions and so on, must
+ * be cleaned up - this is not a trivial exercise.
+ *
+ * Currently, this module is not intended to be unloaded, and there is
+ * no module_exit function defined which does the needed cleanup. For
+ * this reason, we have to assume success here.
+ */
+ platform_driver_register(&hidma_mgmt_driver);
+ return 0;
}
module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
index f8ffa02e279f..ba46a0a15a93 100644
--- a/drivers/dma/sf-pdma/Kconfig
+++ b/drivers/dma/sf-pdma/Kconfig
@@ -1,5 +1,6 @@
config SF_PDMA
tristate "Sifive PDMA controller driver"
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index c4c4e8575764..f12606aeff87 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -94,6 +94,7 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
{
struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
struct sf_pdma_desc *desc;
+ unsigned long iflags;
if (chan && (!len || !dest || !src)) {
dev_err(chan->pdma->dma_dev.dev,
@@ -109,10 +110,10 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
desc->dirn = DMA_MEM_TO_MEM;
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->vchan.lock, iflags);
chan->desc = desc;
sf_pdma_fill_desc(desc, dest, src, len);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock_irqrestore(&chan->vchan.lock, iflags);
return desc->async_tx;
}
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 112fbd22bb3f..abdf10341725 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -3,7 +3,7 @@
# DMA Engine Helpers
#
-obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
+obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o
#
# DMA Controllers
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index d530c1bf11d9..6885b3dcd7a9 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1913,7 +1913,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
return ret;
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
deleted file mode 100644
index be89dd894328..000000000000
--- a/drivers/dma/sh/shdma-of.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SHDMA Device Tree glue
- *
- * Copyright (C) 2013 Renesas Electronics Inc.
- * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- */
-
-#include <linux/dmaengine.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_dma.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/shdma-base.h>
-
-#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
-
-static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- u32 id = dma_spec->args[0];
- dma_cap_mask_t mask;
- struct dma_chan *chan;
-
- if (dma_spec->args_count != 1)
- return NULL;
-
- dma_cap_zero(mask);
- /* Only slave DMA channels can be allocated via DT */
- dma_cap_set(DMA_SLAVE, mask);
-
- chan = dma_request_channel(mask, shdma_chan_filter,
- (void *)(uintptr_t)id);
- if (chan)
- to_shdma_chan(chan)->hw_req = id;
-
- return chan;
-}
-
-static int shdma_of_probe(struct platform_device *pdev)
-{
- const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
- int ret;
-
- ret = of_dma_controller_register(pdev->dev.of_node,
- shdma_of_xlate, pdev);
- if (ret < 0)
- return ret;
-
- ret = of_platform_populate(pdev->dev.of_node, NULL, lookup, &pdev->dev);
- if (ret < 0)
- of_dma_controller_free(pdev->dev.of_node);
-
- return ret;
-}
-
-static const struct of_device_id shdma_of_match[] = {
- { .compatible = "renesas,shdma-mux", },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
-
-static struct platform_driver shdma_of = {
- .driver = {
- .name = "shdma-of",
- .of_match_table = shdma_of_match,
- },
- .probe = shdma_of_probe,
-};
-
-module_platform_driver(shdma_of);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("SH-DMA driver DT glue");
-MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 8f7ceb698226..1cc06900153e 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
error:
of_dma_controller_free(pdev->dev.of_node);
- pm_runtime_put(&pdev->dev);
error_pm:
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 265d7c07b348..e1827393143f 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
kfree(base->lcla_pool.base_unaligned);
+ if (base->lcpa_base)
+ iounmap(base->lcpa_base);
+
if (base->phy_lcpa)
release_mem_region(base->phy_lcpa,
base->lcpa_size);
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index f54ecb123a52..7dd1d3d0bf06 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1200,7 +1200,7 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
chan->config_init = false;
- ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
if (ret < 0)
return ret;
@@ -1470,7 +1470,7 @@ static int stm32_dma_suspend(struct device *dev)
struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
int id, ret, scr;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index ef0d0555103d..a42164389ebc 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -137,7 +137,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
/* Set dma request */
spin_lock_irqsave(&dmamux->lock, flags);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
spin_unlock_irqrestore(&dmamux->lock, flags);
goto error;
@@ -336,7 +336,7 @@ static int stm32_dmamux_suspend(struct device *dev)
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
int i, ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
@@ -361,7 +361,7 @@ static int stm32_dmamux_resume(struct device *dev)
if (ret < 0)
return ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 36ba8b43e78d..18cbd1e43c2e 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1452,7 +1452,7 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
return -ENOMEM;
}
- ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
if (ret < 0)
return ret;
@@ -1718,7 +1718,7 @@ static int stm32_mdma_pm_suspend(struct device *dev)
u32 ccr, id;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e8b6633ae661..93f1645ae928 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1042,9 +1042,8 @@ handle_pending:
* Move the promise into the completed list now that
* we're done with it
*/
- list_del(&vchan->processing->list);
- list_add_tail(&vchan->processing->list,
- &contract->completed_demands);
+ list_move_tail(&vchan->processing->list,
+ &contract->completed_demands);
/*
* Cyclic DMA transfers are special:
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 96ad21869ba7..a35858610780 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4948,6 +4948,7 @@ static int setup_resources(struct udma_dev *ud)
ud->tchan_cnt),
ud->rchan_cnt - bitmap_weight(ud->rchan_map,
ud->rchan_cnt));
+ break;
default:
break;
}
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 268a08058714..7cb577e6587b 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1608,7 +1608,8 @@ static int omap_dma_context_notifier(struct notifier_block *nb,
return NOTIFY_BAD;
omap_dma_context_save(od);
break;
- case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
case CPU_CLUSTER_PM_EXIT:
omap_dma_context_restore(od);
break;
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index 16b19654873d..d6b8a202474f 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -209,8 +209,8 @@ static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
writel(0, xc->reg_ch_base + XDMAC_TSS);
/* wait until transfer is stopped */
- return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val,
- !(val & XDMAC_STAT_TENF), 100, 1000);
+ return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
+ !(val & XDMAC_STAT_TENF), 100, 1000);
}
/* xc->vc.lock must be held by caller */
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 75c0b8e904e5..4b9530a7bf65 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor {
* @genlock: Support genlock mode
* @err: Channel has errors
* @idle: Check for channel idle
+ * @terminating: Check for channel being synchronized by user
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@@ -431,6 +432,7 @@ struct xilinx_dma_chan {
bool genlock;
bool err;
bool idle;
+ bool terminating;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc);
+
+ /*
+ * While we ran a callback the user called a terminate function,
+ * which takes care of cleaning up any remaining descriptors
+ */
+ if (chan->terminating)
+ break;
}
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
if (desc->cyclic)
chan->cyclic = true;
+ chan->terminating = false;
+
spin_unlock_irqrestore(&chan->lock, flags);
return cookie;
@@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */
+ chan->terminating = true;
xilinx_dma_free_descriptors(chan);
chan->idle = true;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 70b29bd079c9..b280a53e8570 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -113,6 +113,7 @@
#define XILINX_DPDMA_CH_VDO 0x020
#define XILINX_DPDMA_CH_PYLD_SZ 0x024
#define XILINX_DPDMA_CH_DESC_ID 0x028
+#define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
/* DPDMA descriptor fields */
#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
@@ -530,7 +531,7 @@ static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev,
for (i = 1; i < num_src_addr; i++) {
u32 *addr = &hw_desc->src_addr2;
- addr[i-1] = lower_32_bits(dma_addr[i]);
+ addr[i - 1] = lower_32_bits(dma_addr[i]);
if (xdev->ext_addr) {
u32 *addr_ext = &hw_desc->addr_ext_23;
@@ -702,8 +703,9 @@ xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan,
size_t stride = hsize + xt->sgl[0].icg;
if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
- dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
- XILINX_DPDMA_ALIGN_BYTES);
+ dev_err(chan->xdev->dev,
+ "chan%u: buffer should be aligned at %d B\n",
+ chan->id, XILINX_DPDMA_ALIGN_BYTES);
return NULL;
}
@@ -866,7 +868,8 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
* will be used, but it should be enough.
*/
list_for_each_entry(sw_desc, &desc->descriptors, node)
- sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
+ sw_desc->hw.desc_id = desc->vdesc.tx.cookie
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
sw_desc = list_first_entry(&desc->descriptors,
struct xilinx_dpdma_sw_desc, node);
@@ -915,7 +918,7 @@ static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
}
/**
- * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
+ * xilinx_dpdma_chan_notify_no_ostand - Notify no outstanding transaction event
* @chan: DPDMA channel
*
* Notify waiters for no outstanding event, so waiters can stop the channel
@@ -934,7 +937,9 @@ static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
cnt = xilinx_dpdma_chan_ostand(chan);
if (cnt) {
- dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
+ dev_dbg(chan->xdev->dev,
+ "chan%u: %d outstanding transactions\n",
+ chan->id, cnt);
return -EWOULDBLOCK;
}
@@ -970,8 +975,8 @@ static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
return 0;
}
- dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
- xilinx_dpdma_chan_ostand(chan));
+ dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
+ chan->id, xilinx_dpdma_chan_ostand(chan));
if (ret == 0)
return -ETIMEDOUT;
@@ -1005,8 +1010,8 @@ static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
return 0;
}
- dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
- xilinx_dpdma_chan_ostand(chan));
+ dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
+ chan->id, xilinx_dpdma_chan_ostand(chan));
return -ETIMEDOUT;
}
@@ -1060,7 +1065,8 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
vchan_cyclic_callback(&active->vdesc);
else
dev_warn(chan->xdev->dev,
- "DONE IRQ with no active descriptor!\n");
+ "chan%u: DONE IRQ with no active descriptor!\n",
+ chan->id);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -1086,13 +1092,18 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
if (!chan->running || !pending)
goto out;
- desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
+ desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
/* If the retrigger raced with vsync, retry at the next frame. */
sw_desc = list_first_entry(&pending->descriptors,
struct xilinx_dpdma_sw_desc, node);
- if (sw_desc->hw.desc_id != desc_id)
+ if (sw_desc->hw.desc_id != desc_id) {
+ dev_dbg(chan->xdev->dev,
+ "chan%u: vsync race lost (%u != %u), retrying\n",
+ chan->id, sw_desc->hw.desc_id, desc_id);
goto out;
+ }
/*
* Complete the active descriptor, if any, promote the pending
@@ -1148,10 +1159,12 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
- dev_dbg(xdev->dev, "cur desc addr = 0x%04x%08x\n",
+ dev_dbg(xdev->dev, "chan%u: cur desc addr = 0x%04x%08x\n",
+ chan->id,
dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
- dev_dbg(xdev->dev, "cur payload addr = 0x%04x%08x\n",
+ dev_dbg(xdev->dev, "chan%u: cur payload addr = 0x%04x%08x\n",
+ chan->id,
dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
@@ -1167,7 +1180,8 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
xilinx_dpdma_chan_dump_tx_desc(chan, active);
if (active->error)
- dev_dbg(xdev->dev, "repeated error on desc\n");
+ dev_dbg(xdev->dev, "chan%u: repeated error on desc\n",
+ chan->id);
/* Reschedule if there's no new descriptor */
if (!chan->desc.pending &&
@@ -1232,7 +1246,8 @@ static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
align, 0);
if (!chan->desc_pool) {
dev_err(chan->xdev->dev,
- "failed to allocate a descriptor pool\n");
+ "chan%u: failed to allocate a descriptor pool\n",
+ chan->id);
return -ENOMEM;
}
@@ -1459,7 +1474,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
*/
static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
{
- dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
}
@@ -1585,7 +1600,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
- uint32_t chan_id = dma_spec->args[0];
+ u32 chan_id = dma_spec->args[0];
if (chan_id >= ARRAY_SIZE(xdev->chan))
return NULL;
@@ -1596,6 +1611,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
}
+static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
+{
+ unsigned int i;
+ void __iomem *reg;
+
+ /* Disable all interrupts */
+ xilinx_dpdma_disable_irq(xdev);
+
+ /* Stop all channels */
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
+ reg = xdev->reg + XILINX_DPDMA_CH_BASE
+ + XILINX_DPDMA_CH_OFFSET * i;
+ dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+ }
+
+ /* Clear the interrupt status registers */
+ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
+}
+
static int xilinx_dpdma_probe(struct platform_device *pdev)
{
struct xilinx_dpdma_device *xdev;
@@ -1622,6 +1657,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
if (IS_ERR(xdev->reg))
return PTR_ERR(xdev->reg);
+ dpdma_hw_init(xdev);
+
xdev->irq = platform_get_irq(pdev, 0);
if (xdev->irq < 0) {
dev_err(xdev->dev, "failed to get platform irq\n");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d8419565b92c..5fecf5aa6e85 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -468,7 +468,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
struct zynqmp_dma_desc_sw *desc;
int i, ret;
- ret = pm_runtime_get_sync(chan->dev);
+ ret = pm_runtime_resume_and_get(chan->dev);
if (ret < 0)
return ret;