diff options
Diffstat (limited to 'drivers/media/pci/intel/ipu3/ipu3-cio2.c')
-rw-r--r-- | drivers/media/pci/intel/ipu3/ipu3-cio2.c | 159 |
1 files changed, 61 insertions, 98 deletions
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 92f5eadf2c99..4e598e937dfe 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2017 Intel Corporation + * Copyright (C) 2017,2020 Intel Corporation * * Based partially on Intel IPU4 driver written by * Sakari Ailus <[email protected]> @@ -9,13 +9,14 @@ * Jouni Ukkonen <[email protected]> * Antti Laakso <[email protected]> * et al. - * */ #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/pfn.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/vmalloc.h> @@ -96,12 +97,12 @@ static inline u32 cio2_bytesperline(const unsigned int width) static void cio2_fbpt_exit_dummy(struct cio2_device *cio2) { if (cio2->dummy_lop) { - dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, + dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE, cio2->dummy_lop, cio2->dummy_lop_bus_addr); cio2->dummy_lop = NULL; } if (cio2->dummy_page) { - dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, + dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE, cio2->dummy_page, cio2->dummy_page_bus_addr); cio2->dummy_page = NULL; } @@ -111,12 +112,10 @@ static int cio2_fbpt_init_dummy(struct cio2_device *cio2) { unsigned int i; - cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, - CIO2_PAGE_SIZE, + cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE, &cio2->dummy_page_bus_addr, GFP_KERNEL); - cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, - CIO2_PAGE_SIZE, + cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE, &cio2->dummy_lop_bus_addr, GFP_KERNEL); if (!cio2->dummy_page || !cio2->dummy_lop) { @@ -127,8 +126,8 @@ static int cio2_fbpt_init_dummy(struct cio2_device *cio2) * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each * Initialize each entry to dummy_page bus base address. */ - for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++) - cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT; + for (i = 0; i < CIO2_LOP_ENTRIES; i++) + cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr); return 0; } @@ -160,12 +159,11 @@ static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2, unsigned int i; entry[0].first_entry.first_page_offset = 0; - entry[1].second_entry.num_of_pages = - CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS; - entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1; + entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS; + entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1; for (i = 0; i < CIO2_MAX_LOPS; i++) - entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT; + entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr); cio2_fbpt_entry_enable(cio2, entry); } @@ -182,26 +180,24 @@ static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2, entry[0].first_entry.first_page_offset = b->offset; remaining = length + entry[0].first_entry.first_page_offset; - entry[1].second_entry.num_of_pages = - DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE); + entry[1].second_entry.num_of_pages = PFN_UP(remaining); /* * last_page_available_bytes has the offset of the last byte in the * last page which is still accessible by DMA. DMA cannot access * beyond this point. Valid range for this is from 0 to 4095. * 0 indicates 1st byte in the page is DMA accessible. - * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page + * 4095 (PAGE_SIZE - 1) means every single byte in the last page * is available for DMA transfer. */ entry[1].second_entry.last_page_available_bytes = (remaining & ~PAGE_MASK) ? - (remaining & ~PAGE_MASK) - 1 : - CIO2_PAGE_SIZE - 1; + (remaining & ~PAGE_MASK) - 1 : PAGE_SIZE - 1; /* Fill FBPT */ remaining = length; i = 0; while (remaining > 0) { - entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT; - remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE; + entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]); + remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE; entry++; i++; } @@ -209,7 +205,7 @@ static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2, /* * The first not meaningful FBPT entry should point to a valid LOP */ - entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT; + entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr); cio2_fbpt_entry_enable(cio2, entry); } @@ -295,7 +291,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q, struct cio2_csi2_timing *timing) { struct device *dev = &cio2->pci_dev->dev; - struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, }; + struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ }; struct v4l2_ctrl *link_freq; s64 freq; int r; @@ -475,8 +471,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) } /* Enable DMA */ - writel(q->fbpt_bus_addr >> PAGE_SHIFT, - base + CIO2_REG_CDMABA(CIO2_DMA_CHAN)); + writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN)); writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT | FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT | @@ -512,8 +507,10 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q) { - void __iomem *base = cio2->base; - unsigned int i, maxloops = 1000; + void __iomem *const base = cio2->base; + unsigned int i; + u32 value; + int ret; /* Disable CSI receiver and MIPI backend devices */ writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK); @@ -523,13 +520,10 @@ static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q) /* Halt DMA */ writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)); - do { - if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) & - CIO2_CDMAC0_DMA_HALTED) - break; - usleep_range(1000, 2000); - } while (--maxloops); - if (!maxloops) + ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN), + value, value & CIO2_CDMAC0_DMA_HALTED, + 4000, 2000000); + if (ret) dev_err(&cio2->pci_dev->dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN); @@ -545,7 +539,7 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan) { struct device *dev = &cio2->pci_dev->dev; struct cio2_queue *q = cio2->cur_queue; - int buffers_found = 0; + struct cio2_fbpt_entry *entry; u64 ns = ktime_get_ns(); if (dma_chan >= CIO2_QUEUES) { @@ -553,15 +547,18 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan) return; } + entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; + if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) { + dev_warn(&cio2->pci_dev->dev, + "no ready buffers found on DMA channel %u\n", + dma_chan); + return; + } + /* Find out which buffer(s) are ready */ do { - struct cio2_fbpt_entry *const entry = - &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; struct cio2_buffer *b; - if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) - break; - b = q->bufs[q->bufs_first]; if (b) { unsigned int bytes = entry[1].second_entry.num_of_bytes; @@ -583,13 +580,8 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan) atomic_inc(&q->frame_sequence); cio2_fbpt_entry_init_dummy(cio2, entry); q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS; - buffers_found++; - } while (1); - - if (buffers_found == 0) - dev_warn(&cio2->pci_dev->dev, - "no ready buffers found on DMA channel %u\n", - dma_chan); + entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; + } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)); } static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q) @@ -841,13 +833,11 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb) struct device *dev = &cio2->pci_dev->dev; struct cio2_buffer *b = container_of(vb, struct cio2_buffer, vbb.vb2_buf); - static const unsigned int entries_per_page = - CIO2_PAGE_SIZE / sizeof(u32); - unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE); - unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page); + unsigned int pages = PFN_UP(vb->planes[0].length); + unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES); struct sg_table *sg; struct sg_dma_page_iter sg_iter; - int i, j; + unsigned int i, j; if (lops <= 0 || lops > CIO2_MAX_LOPS) { dev_err(dev, "%s: bad buffer size (%i)\n", __func__, @@ -858,7 +848,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb) memset(b->lop, 0, sizeof(b->lop)); /* Allocate LOP table */ for (i = 0; i < lops; i++) { - b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE, + b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE, &b->lop_bus_addr[i], GFP_KERNEL); if (!b->lop[i]) goto fail; @@ -873,23 +863,22 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb) b->offset = sg->sgl->offset; i = j = 0; - for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) { + for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) { if (!pages--) break; - b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT; + b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter)); j++; - if (j == entries_per_page) { + if (j == CIO2_LOP_ENTRIES) { i++; j = 0; } } - b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT; + b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr); return 0; fail: - for (i--; i >= 0; i--) - dma_free_coherent(dev, CIO2_PAGE_SIZE, - b->lop[i], b->lop_bus_addr[i]); + while (i--) + dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]); return -ENOMEM; } @@ -979,7 +968,7 @@ static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb) /* Free LOP table */ for (i = 0; i < CIO2_MAX_LOPS; i++) { if (b->lop[i]) - dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, + dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]); } } @@ -1633,7 +1622,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) if (r) { dev_err(&cio2->pci_dev->dev, "failed to initialize videobuf2 queue (%d)\n", r); - goto fail_vbq; + goto fail_subdev; } /* Initialize vdev */ @@ -1664,10 +1653,8 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) return 0; fail_link: - video_unregister_device(&q->vdev); + vb2_video_unregister_device(&q->vdev); fail_vdev: - vb2_queue_release(vbq); -fail_vbq: v4l2_device_unregister_subdev(subdev); fail_subdev: media_entity_cleanup(&vdev->entity); @@ -1683,9 +1670,8 @@ fail_fbpt: static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q) { - video_unregister_device(&q->vdev); + vb2_video_unregister_device(&q->vdev); media_entity_cleanup(&q->vdev.entity); - vb2_queue_release(&q->vbq); v4l2_device_unregister_subdev(&q->subdev); media_entity_cleanup(&q->subdev.entity); cio2_fbpt_exit(q, &cio2->pci_dev->dev); @@ -1721,29 +1707,10 @@ static void cio2_queues_exit(struct cio2_device *cio2) /**************** PCI interface ****************/ -static int cio2_pci_config_setup(struct pci_dev *dev) -{ - u16 pci_command; - int r = pci_enable_msi(dev); - - if (r) { - dev_err(&dev->dev, "failed to enable MSI (%d)\n", r); - return r; - } - - pci_read_config_word(dev, PCI_COMMAND, &pci_command); - pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | - PCI_COMMAND_INTX_DISABLE; - pci_write_config_word(dev, PCI_COMMAND, pci_command); - - return 0; -} - static int cio2_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct cio2_device *cio2; - void __iomem *const *iomap; int r; cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL); @@ -1766,13 +1733,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev, return -ENODEV; } - iomap = pcim_iomap_table(pci_dev); - if (!iomap) { - dev_err(&pci_dev->dev, "failed to iomap table\n"); - return -ENODEV; - } - - cio2->base = iomap[CIO2_PCI_BAR]; + cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR]; pci_set_drvdata(pci_dev, cio2); @@ -1784,9 +1745,11 @@ static int cio2_pci_probe(struct pci_dev *pci_dev, return -ENODEV; } - r = cio2_pci_config_setup(pci_dev); - if (r) - return -ENODEV; + r = pci_enable_msi(pci_dev); + if (r) { + dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r); + return r; + } r = cio2_fbpt_init_dummy(cio2); if (r) @@ -2012,8 +1975,8 @@ static int __maybe_unused cio2_suspend(struct device *dev) static int __maybe_unused cio2_resume(struct device *dev) { struct cio2_device *cio2 = dev_get_drvdata(dev); - int r = 0; struct cio2_queue *q = cio2->cur_queue; + int r; dev_dbg(dev, "cio2 resume\n"); if (!cio2->streaming) @@ -2040,7 +2003,7 @@ static const struct dev_pm_ops cio2_pm_ops = { static const struct pci_device_id cio2_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) }, - { 0 } + { } }; MODULE_DEVICE_TABLE(pci, cio2_pci_id_table); |