diff options
author | Mark Brown <broonie@kernel.org> | 2016-11-04 12:16:38 -0600 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2016-11-04 12:16:38 -0600 |
commit | cc9b94029e9ef51787af908e9856b1eed314bc00 (patch) | |
tree | 9675310b89d0f6fb1f7bd9423f0638c4ee5226fd /drivers/spi/spi.c | |
parent | 13bed58ce8748d430a26e353a09b89f9d613a71f (diff) | |
parent | 1b5b42216469b05ef4b5916cb40b127dfab1da88 (diff) |
Merge branch 'topic/error' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator into regulator-fixed
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r-- | drivers/spi/spi.c | 211 |
1 files changed, 170 insertions, 41 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 77e6e45951f4..5787b723b593 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -37,6 +37,7 @@ #include <linux/kthread.h> #include <linux/ioport.h> #include <linux/acpi.h> +#include <linux/highmem.h> #define CREATE_TRACE_POINTS #include <trace/events/spi.h> @@ -622,6 +623,8 @@ void spi_unregister_device(struct spi_device *spi) if (spi->dev.of_node) of_node_clear_flag(spi->dev.of_node, OF_POPULATED); + if (ACPI_COMPANION(&spi->dev)) + acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); device_unregister(&spi->dev); } EXPORT_SYMBOL_GPL(spi_unregister_device); @@ -707,6 +710,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, { const bool vmalloced_buf = is_vmalloc_addr(buf); unsigned int max_seg_size = dma_get_max_seg_size(dev); +#ifdef CONFIG_HIGHMEM + const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && + (unsigned long)buf < (PKMAP_BASE + + (LAST_PKMAP * PAGE_SIZE))); +#else + const bool kmap_buf = false; +#endif int desc_len; int sgs; struct page *vm_page; @@ -714,7 +724,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, size_t min; int i, ret; - if (vmalloced_buf) { + if (vmalloced_buf || kmap_buf) { desc_len = min_t(int, max_seg_size, PAGE_SIZE); sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); } else if (virt_addr_valid(buf)) { @@ -730,10 +740,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, for (i = 0; i < sgs; i++) { - if (vmalloced_buf) { + if (vmalloced_buf || kmap_buf) { min = min_t(size_t, len, desc_len - offset_in_page(buf)); - vm_page = vmalloc_to_page(buf); + if (vmalloced_buf) + vm_page = vmalloc_to_page(buf); + else + vm_page = kmap_to_page(buf); if (!vm_page) { sg_free_table(sgt); return -ENOMEM; @@ -849,6 +862,20 @@ static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) return 0; } #else /* !CONFIG_HAS_DMA */ +static inline int spi_map_buf(struct spi_master *master, + struct device *dev, struct sg_table *sgt, + void *buf, size_t len, + enum dma_data_direction dir) +{ + return -EINVAL; +} + +static inline void spi_unmap_buf(struct spi_master *master, + struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir) +{ +} + static inline int __spi_map_msg(struct spi_master *master, struct spi_message *msg) { @@ -944,7 +971,7 @@ static int spi_transfer_one_message(struct spi_master *master, struct spi_transfer *xfer; bool keep_cs = false; int ret = 0; - unsigned long ms = 1; + unsigned long long ms = 1; struct spi_statistics *statm = &master->statistics; struct spi_statistics *stats = &msg->spi->statistics; @@ -975,9 +1002,13 @@ static int spi_transfer_one_message(struct spi_master *master, if (ret > 0) { ret = 0; - ms = xfer->len * 8 * 1000 / xfer->speed_hz; + ms = 8LL * 1000LL * xfer->len; + do_div(ms, xfer->speed_hz); ms += ms + 100; /* some tolerance */ + if (ms > UINT_MAX) + ms = UINT_MAX; + ms = wait_for_completion_timeout(&master->xfer_completion, msecs_to_jiffies(ms)); } @@ -1055,7 +1086,6 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); * __spi_pump_messages - function which processes spi message queue * @master: master to process queue for * @in_kthread: true if we are in the context of the message pump thread - * @bus_locked: true if the bus mutex is held when calling this function * * This function checks if there is any spi message in the queue that * needs processing and if so call out to the driver to initialize hardware @@ -1065,8 +1095,7 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); * inside spi_sync(); the queue extraction handling at the top of the * function should deal with this safely. */ -static void __spi_pump_messages(struct spi_master *master, bool in_kthread, - bool bus_locked) +static void __spi_pump_messages(struct spi_master *master, bool in_kthread) { unsigned long flags; bool was_busy = false; @@ -1083,7 +1112,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread, /* If another context is idling the device then defer */ if (master->idling) { - queue_kthread_work(&master->kworker, &master->pump_messages); + kthread_queue_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); return; } @@ -1097,7 +1126,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread, /* Only do teardown in the thread */ if (!in_kthread) { - queue_kthread_work(&master->kworker, + kthread_queue_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); return; @@ -1138,11 +1167,14 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread, master->busy = true; spin_unlock_irqrestore(&master->queue_lock, flags); + mutex_lock(&master->io_mutex); + if (!was_busy && master->auto_runtime_pm) { ret = pm_runtime_get_sync(master->dev.parent); if (ret < 0) { dev_err(&master->dev, "Failed to power device: %d\n", ret); + mutex_unlock(&master->io_mutex); return; } } @@ -1158,13 +1190,11 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread, if (master->auto_runtime_pm) pm_runtime_put(master->dev.parent); + mutex_unlock(&master->io_mutex); return; } } - if (!bus_locked) - mutex_lock(&master->bus_lock_mutex); - trace_spi_message_start(master->cur_msg); if (master->prepare_message) { @@ -1194,8 +1224,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread, } out: - if (!bus_locked) - mutex_unlock(&master->bus_lock_mutex); + mutex_unlock(&master->io_mutex); /* Prod the scheduler in case transfer_one() was busy waiting */ if (!ret) @@ -1211,7 +1240,7 @@ static void spi_pump_messages(struct kthread_work *work) struct spi_master *master = container_of(work, struct spi_master, pump_messages); - __spi_pump_messages(master, true, master->bus_lock_flag); + __spi_pump_messages(master, true); } static int spi_init_queue(struct spi_master *master) @@ -1221,7 +1250,7 @@ static int spi_init_queue(struct spi_master *master) master->running = false; master->busy = false; - init_kthread_worker(&master->kworker); + kthread_init_worker(&master->kworker); master->kworker_task = kthread_run(kthread_worker_fn, &master->kworker, "%s", dev_name(&master->dev)); @@ -1229,7 +1258,7 @@ static int spi_init_queue(struct spi_master *master) dev_err(&master->dev, "failed to create message pump task\n"); return PTR_ERR(master->kworker_task); } - init_kthread_work(&master->pump_messages, spi_pump_messages); + kthread_init_work(&master->pump_messages, spi_pump_messages); /* * Master config will indicate if this controller should run the @@ -1302,7 +1331,7 @@ void spi_finalize_current_message(struct spi_master *master) spin_lock_irqsave(&master->queue_lock, flags); master->cur_msg = NULL; master->cur_msg_prepared = false; - queue_kthread_work(&master->kworker, &master->pump_messages); + kthread_queue_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); trace_spi_message_done(mesg); @@ -1328,7 +1357,7 @@ static int spi_start_queue(struct spi_master *master) master->cur_msg = NULL; spin_unlock_irqrestore(&master->queue_lock, flags); - queue_kthread_work(&master->kworker, &master->pump_messages); + kthread_queue_work(&master->kworker, &master->pump_messages); return 0; } @@ -1375,7 +1404,7 @@ static int spi_destroy_queue(struct spi_master *master) ret = spi_stop_queue(master); /* - * flush_kthread_worker will block until all work is done. + * kthread_flush_worker will block until all work is done. * If the reason that stop_queue timed out is that the work will never * finish, then it does no good to call flush/stop thread, so * return anyway. @@ -1385,7 +1414,7 @@ static int spi_destroy_queue(struct spi_master *master) return ret; } - flush_kthread_worker(&master->kworker); + kthread_flush_worker(&master->kworker); kthread_stop(master->kworker_task); return 0; @@ -1409,7 +1438,7 @@ static int __spi_queued_transfer(struct spi_device *spi, list_add_tail(&msg->queue, &master->queue); if (!master->busy && need_pump) - queue_kthread_work(&master->kworker, &master->pump_messages); + kthread_queue_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); return 0; @@ -1646,18 +1675,15 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) return 1; } -static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, - void *data, void **return_value) +static acpi_status acpi_register_spi_device(struct spi_master *master, + struct acpi_device *adev) { - struct spi_master *master = data; struct list_head resource_list; - struct acpi_device *adev; struct spi_device *spi; int ret; - if (acpi_bus_get_device(handle, &adev)) - return AE_OK; - if (acpi_bus_get_status(adev) || !adev->status.present) + if (acpi_bus_get_status(adev) || !adev->status.present || + acpi_device_enumerated(adev)) return AE_OK; spi = spi_alloc_device(master); @@ -1683,6 +1709,8 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, if (spi->irq < 0) spi->irq = acpi_dev_gpio_irq_get(adev, 0); + acpi_device_set_enumerated(adev); + adev->power.flags.ignore_parent = true; strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); if (spi_add_device(spi)) { @@ -1695,6 +1723,18 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, return AE_OK; } +static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, + void *data, void **return_value) +{ + struct spi_master *master = data; + struct acpi_device *adev; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + + return acpi_register_spi_device(master, adev); +} + static void acpi_register_spi_devices(struct spi_master *master) { acpi_status status; @@ -1873,6 +1913,7 @@ int spi_register_master(struct spi_master *master) spin_lock_init(&master->queue_lock); spin_lock_init(&master->bus_lock_spinlock); mutex_init(&master->bus_lock_mutex); + mutex_init(&master->io_mutex); master->bus_lock_flag = 0; init_completion(&master->xfer_completion); if (!master->max_dma_len) @@ -2725,6 +2766,7 @@ int spi_flash_read(struct spi_device *spi, { struct spi_master *master = spi->master; + struct device *rx_dev = NULL; int ret; if ((msg->opcode_nbits == SPI_NBITS_DUAL || @@ -2750,9 +2792,24 @@ int spi_flash_read(struct spi_device *spi, return ret; } } + mutex_lock(&master->bus_lock_mutex); + mutex_lock(&master->io_mutex); + if (master->dma_rx) { + rx_dev = master->dma_rx->device->dev; + ret = spi_map_buf(master, rx_dev, &msg->rx_sg, + msg->buf, msg->len, + DMA_FROM_DEVICE); + if (!ret) + msg->cur_msg_mapped = true; + } ret = master->spi_flash_read(spi, msg); + if (msg->cur_msg_mapped) + spi_unmap_buf(master, rx_dev, &msg->rx_sg, + DMA_FROM_DEVICE); + mutex_unlock(&master->io_mutex); mutex_unlock(&master->bus_lock_mutex); + if (master->auto_runtime_pm) pm_runtime_put(master->dev.parent); @@ -2772,8 +2829,7 @@ static void spi_complete(void *arg) complete(arg); } -static int __spi_sync(struct spi_device *spi, struct spi_message *message, - int bus_locked) +static int __spi_sync(struct spi_device *spi, struct spi_message *message) { DECLARE_COMPLETION_ONSTACK(done); int status; @@ -2791,9 +2847,6 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message, SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); - if (!bus_locked) - mutex_lock(&master->bus_lock_mutex); - /* If we're not using the legacy transfer method then we will * try to transfer in the calling context so special case. * This code would be less tricky if we could remove the @@ -2811,9 +2864,6 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message, status = spi_async_locked(spi, message); } - if (!bus_locked) - mutex_unlock(&master->bus_lock_mutex); - if (status == 0) { /* Push out the messages in the calling context if we * can. @@ -2823,7 +2873,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message, spi_sync_immediate); SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync_immediate); - __spi_pump_messages(master, false, bus_locked); + __spi_pump_messages(master, false); } wait_for_completion(&done); @@ -2856,7 +2906,13 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message, */ int spi_sync(struct spi_device *spi, struct spi_message *message) { - return __spi_sync(spi, message, spi->master->bus_lock_flag); + int ret; + + mutex_lock(&spi->master->bus_lock_mutex); + ret = __spi_sync(spi, message); + mutex_unlock(&spi->master->bus_lock_mutex); + + return ret; } EXPORT_SYMBOL_GPL(spi_sync); @@ -2878,7 +2934,7 @@ EXPORT_SYMBOL_GPL(spi_sync); */ int spi_sync_locked(struct spi_device *spi, struct spi_message *message) { - return __spi_sync(spi, message, 1); + return __spi_sync(spi, message); } EXPORT_SYMBOL_GPL(spi_sync_locked); @@ -3107,6 +3163,77 @@ static struct notifier_block spi_of_notifier = { extern struct notifier_block spi_of_notifier; #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ +#if IS_ENABLED(CONFIG_ACPI) +static int spi_acpi_master_match(struct device *dev, const void *data) +{ + return ACPI_COMPANION(dev->parent) == data; +} + +static int spi_acpi_device_match(struct device *dev, void *data) +{ + return ACPI_COMPANION(dev) == data; +} + +static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) +{ + struct device *dev; + + dev = class_find_device(&spi_master_class, NULL, adev, + spi_acpi_master_match); + if (!dev) + return NULL; + + return container_of(dev, struct spi_master, dev); +} + +static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) +{ + struct device *dev; + + dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); + + return dev ? to_spi_device(dev) : NULL; +} + +static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, + void *arg) +{ + struct acpi_device *adev = arg; + struct spi_master *master; + struct spi_device *spi; + + switch (value) { + case ACPI_RECONFIG_DEVICE_ADD: + master = acpi_spi_find_master_by_adev(adev->parent); + if (!master) + break; + + acpi_register_spi_device(master, adev); + put_device(&master->dev); + break; + case ACPI_RECONFIG_DEVICE_REMOVE: + if (!acpi_device_enumerated(adev)) + break; + + spi = acpi_spi_find_device_by_adev(adev); + if (!spi) + break; + + spi_unregister_device(spi); + put_device(&spi->dev); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block spi_acpi_notifier = { + .notifier_call = acpi_spi_notify, +}; +#else +extern struct notifier_block spi_acpi_notifier; +#endif + static int __init spi_init(void) { int status; @@ -3127,6 +3254,8 @@ static int __init spi_init(void) if (IS_ENABLED(CONFIG_OF_DYNAMIC)) WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); + if (IS_ENABLED(CONFIG_ACPI)) + WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); return 0; |