diff options
Diffstat (limited to 'drivers/iio/buffer/industrialio-buffer-dmaengine.c')
-rw-r--r-- | drivers/iio/buffer/industrialio-buffer-dmaengine.c | 134 |
1 files changed, 94 insertions, 40 deletions
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index a18c1da292af..12aa1412dfa0 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -64,15 +64,63 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(&queue->buffer); struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction dma_dir; + struct scatterlist *sgl; + struct dma_vec *vecs; + size_t max_size; dma_cookie_t cookie; + size_t len_total; + unsigned int i; + int nents; - block->bytes_used = min(block->size, dmaengine_buffer->max_size); - block->bytes_used = round_down(block->bytes_used, - dmaengine_buffer->align); + max_size = min(block->size, dmaengine_buffer->max_size); + max_size = round_down(max_size, dmaengine_buffer->align); - desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, - block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT); + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) + dma_dir = DMA_DEV_TO_MEM; + else + dma_dir = DMA_MEM_TO_DEV; + + if (block->sg_table) { + sgl = block->sg_table->sgl; + nents = sg_nents_for_len(sgl, block->bytes_used); + if (nents < 0) + return nents; + + vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC); + if (!vecs) + return -ENOMEM; + + len_total = block->bytes_used; + + for (i = 0; i < nents; i++) { + vecs[i].addr = sg_dma_address(sgl); + vecs[i].len = min(sg_dma_len(sgl), len_total); + len_total -= vecs[i].len; + + sgl = sg_next(sgl); + } + + desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan, + vecs, nents, dma_dir, + DMA_PREP_INTERRUPT); + kfree(vecs); + } else { + max_size = min(block->size, dmaengine_buffer->max_size); + max_size = round_down(max_size, dmaengine_buffer->align); + + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) + block->bytes_used = max_size; + + if (!block->bytes_used || block->bytes_used > max_size) + return -EINVAL; + + desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, + block->phys_addr, + block->bytes_used, + dma_dir, + DMA_PREP_INTERRUPT); + } if (!desc) return -ENOMEM; @@ -112,14 +160,23 @@ static void iio_dmaengine_buffer_release(struct iio_buffer *buf) static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { .read = iio_dma_buffer_read, + .write = iio_dma_buffer_write, .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum, .set_length = iio_dma_buffer_set_length, .request_update = iio_dma_buffer_request_update, .enable = iio_dma_buffer_enable, .disable = iio_dma_buffer_disable, - .data_available = iio_dma_buffer_data_available, + .data_available = iio_dma_buffer_usage, + .space_available = iio_dma_buffer_usage, .release = iio_dmaengine_buffer_release, + .enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf, + .attach_dmabuf = iio_dma_buffer_attach_dmabuf, + .detach_dmabuf = iio_dma_buffer_detach_dmabuf, + + .lock_queue = iio_dma_buffer_lock_queue, + .unlock_queue = iio_dma_buffer_unlock_queue, + .modes = INDIO_BUFFER_HARDWARE, .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK, }; @@ -159,7 +216,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = { * Once done using the buffer iio_dmaengine_buffer_free() should be used to * release it. */ -struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, +static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, const char *channel) { struct dmaengine_buffer *dmaengine_buffer; @@ -210,7 +267,6 @@ err_free: kfree(dmaengine_buffer); return ERR_PTR(ret); } -EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER); /** * iio_dmaengine_buffer_free() - Free dmaengine buffer @@ -230,66 +286,64 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer) } EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER); -static void __devm_iio_dmaengine_buffer_free(void *buffer) -{ - iio_dmaengine_buffer_free(buffer); -} - -/** - * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc() - * @dev: Parent device for the buffer - * @channel: DMA channel name, typically "rx". - * - * This allocates a new IIO buffer which internally uses the DMAengine framework - * to perform its transfers. The parent device will be used to request the DMA - * channel. - * - * The buffer will be automatically de-allocated once the device gets destroyed. - */ -static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev, - const char *channel) +struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir) { struct iio_buffer *buffer; int ret; buffer = iio_dmaengine_buffer_alloc(dev, channel); if (IS_ERR(buffer)) - return buffer; + return ERR_CAST(buffer); - ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, - buffer); - if (ret) + indio_dev->modes |= INDIO_BUFFER_HARDWARE; + + buffer->direction = dir; + + ret = iio_device_attach_buffer(indio_dev, buffer); + if (ret) { + iio_dmaengine_buffer_free(buffer); return ERR_PTR(ret); + } return buffer; } +EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER); + +static void __devm_iio_dmaengine_buffer_free(void *buffer) +{ + iio_dmaengine_buffer_free(buffer); +} /** - * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device + * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device * @dev: Parent device for the buffer * @indio_dev: IIO device to which to attach this buffer. * @channel: DMA channel name, typically "rx". + * @dir: Direction of buffer (in or out) * * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() * and attaches it to an IIO device with iio_device_attach_buffer(). * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the * IIO device. */ -int devm_iio_dmaengine_buffer_setup(struct device *dev, - struct iio_dev *indio_dev, - const char *channel) +int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir) { struct iio_buffer *buffer; - buffer = devm_iio_dmaengine_buffer_alloc(dev, channel); + buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir); if (IS_ERR(buffer)) return PTR_ERR(buffer); - indio_dev->modes |= INDIO_BUFFER_HARDWARE; - - return iio_device_attach_buffer(indio_dev, buffer); + return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, + buffer); } -EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); +EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("DMA buffer for the IIO framework"); |